From 0521deb1ddbb33fc8beaec7e1d28d57fe7423af0 Mon Sep 17 00:00:00 2001 From: yomori Date: Tue, 19 Dec 2023 20:14:09 -0800 Subject: [PATCH 01/17] re-add desy3 before I forget --- examples/desy3/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/desy3/config.yml b/examples/desy3/config.yml index 937905ab0..c188a1194 100644 --- a/examples/desy3/config.yml +++ b/examples/desy3/config.yml @@ -60,7 +60,7 @@ TXSourceSelectorMetacal: delta_gamma: 0.02 source_zbin_edges: [0.0, 0.2, 0.4, 0.6, 0.8] shear_prefix: mcal_ - resp_mean_diag : True + use_diagonal_response : True TXSourceSelectorLensfit: bands: ri #used for selection From b9164363b66c9d086c99a702d8db3aac69d29788 Mon Sep 17 00:00:00 2001 From: yomori Date: Tue, 13 Feb 2024 08:36:38 -0800 Subject: [PATCH 02/17] initial push --- txpipe/twopoint_fourier.py | 87 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/txpipe/twopoint_fourier.py b/txpipe/twopoint_fourier.py index e04993d32..3249efdfb 100755 --- a/txpipe/twopoint_fourier.py +++ b/txpipe/twopoint_fourier.py @@ -37,6 +37,93 @@ ) +class TXpureB(TXTwoPointFourier): + ''' + Make shear B-mode measurements + ''' + name = "TXpureB" + + config_options = { + 'method' : 'becker' # 'namaster' or 'becker' + 'Nell' : 20, # Number of ell bins + 'lmin' : 200, # ell min + 'lmax' : 2000, # ell max + 'lspacing' : 'log' # ell spacing for binning + 'bin_file' : None, # specifiy bins + 'theta_min' : 2.5, # (only for hybrideb) theta min in arcmin + 'theta_max' : 250, # (only for hybrideb) theta max in arcmin + 'Ntheta' : 1000, # (only for hybrideb) Number of theta bins + 'precomputed_hybrideb_weights' : None + } + + def run(self): + import pymaster + import healpy + import sacc + import pyccl + import hybrideb + + if method == 'namaster': + # Create bins !!!!!!!!!!!!!!!!!!!!! fix + b = nmt.NmtBin.from_nside_linear(hp.npix2nside(mask.shape[0]), self.config["Nell"]) + + # Initialize the fields + f2yp0 = nmt.NmtField(mask, [dmap[1], dmap[2]], purify_e=False, purify_b=True) + + # Initialize workspace and compute coupling matrix + w_yp = nmt.NmtWorkspace() + w_yp.compute_coupling_matrix(f2yp0, f2yp0, b) + + #Compute spectra + cl_coupled = nmt.compute_coupled_cell(f2yp0, f2yp0) + cl_decoupled = w_yp.decouple_cell(cl_coupled) + + #return ell bins and the deconvolved clbb + return b.get_effective_ells(), cl_decoupled + + + elif method == 'becker' + ''' + B-mode method of Becker and Rozo 2015 http://arxiv.org/abs/1412.3851 + ''' + if self.config["precomputed_hybrideb_weights"] is None: + # Recomputing all weight functinons -- slow + heb = hybrideb.HybridEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) + beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) + geb = hybrideb.GaussEB(beb, heb) + + else: + # Loading precomputed weight functions + geb_nosep = np.load('/lcrc/project/SPT3G/users/ac.yomori/repo/nulltests_txpipe/geb_nosep_dict.npz',allow_pickle=True) + geb_sep = np.load('/lcrc/project/SPT3G/users/ac.yomori/repo/nulltests_txpipe/geb_sep_dict.npz',allow_pickle=True) + + En = np.zeros(Nl); En_nosep = np.zeros(Nl) + Bn = np.zeros(Nl); Bn_nosep = np.zeros(Nl) + ell = np.zeros(Nl) + + for i in range(int(Nl)): + + # with-separation calculation + res = geb_sep['%d'%(i+1)].tolist() + + Fp = res['2'] + Fm = res['3'] + En[i] = np.sum(Fp*xip + Fm*xim)/2 + Bn[i] = np.sum(Fp*xip - Fm*xim)/2 + ell[i] = res['4'][np.argmax(res['5'])] + + # no-separation calculation + res = geb_nosep['%d'%(i+1)].tolist() + + Fp_nosep = res['2'] + Fm_nosep = res['3'] + + En_nosep[i] = np.sum(Fp_nosep*xip + Fm_nosep*xim)/2 + Bn_nosep[i] = np.sum(Fp_nosep*xip - Fm_nosep*xim)/2 + + + + class TXTwoPointFourier(PipelineStage): """ Make Fourier space 3x2pt measurements using NaMaster From ae7fa4d2017d1250451ce5d5b64312aa3f932b98 Mon Sep 17 00:00:00 2001 From: yomori Date: Tue, 13 Feb 2024 16:52:02 -0800 Subject: [PATCH 03/17] update --- txpipe/twopoint_fourier.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/txpipe/twopoint_fourier.py b/txpipe/twopoint_fourier.py index 3249efdfb..e031c5573 100755 --- a/txpipe/twopoint_fourier.py +++ b/txpipe/twopoint_fourier.py @@ -64,6 +64,9 @@ def run(self): import hybrideb if method == 'namaster': + ''' + B-mode method that is already implemented in NaMaster + ''' # Create bins !!!!!!!!!!!!!!!!!!!!! fix b = nmt.NmtBin.from_nside_linear(hp.npix2nside(mask.shape[0]), self.config["Nell"]) @@ -94,11 +97,10 @@ def run(self): else: # Loading precomputed weight functions - geb_nosep = np.load('/lcrc/project/SPT3G/users/ac.yomori/repo/nulltests_txpipe/geb_nosep_dict.npz',allow_pickle=True) - geb_sep = np.load('/lcrc/project/SPT3G/users/ac.yomori/repo/nulltests_txpipe/geb_sep_dict.npz',allow_pickle=True) + geb_sep = np.load(self.config['precomputed_hybrideb_weights'],allow_pickle=True) - En = np.zeros(Nl); En_nosep = np.zeros(Nl) - Bn = np.zeros(Nl); Bn_nosep = np.zeros(Nl) + En = np.zeros(Nl) + Bn = np.zeros(Nl) ell = np.zeros(Nl) for i in range(int(Nl)): @@ -112,16 +114,7 @@ def run(self): Bn[i] = np.sum(Fp*xip - Fm*xim)/2 ell[i] = res['4'][np.argmax(res['5'])] - # no-separation calculation - res = geb_nosep['%d'%(i+1)].tolist() - - Fp_nosep = res['2'] - Fm_nosep = res['3'] - - En_nosep[i] = np.sum(Fp_nosep*xip + Fm_nosep*xim)/2 - Bn_nosep[i] = np.sum(Fp_nosep*xip - Fm_nosep*xim)/2 - - + return ell, Bn class TXTwoPointFourier(PipelineStage): From 712e6620b23071b5249b49de48dcd12552a05904 Mon Sep 17 00:00:00 2001 From: yomori Date: Tue, 13 Feb 2024 16:57:05 -0800 Subject: [PATCH 04/17] update --- txpipe/twopoint_fourier.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/txpipe/twopoint_fourier.py b/txpipe/twopoint_fourier.py index e031c5573..1b7ae9fac 100755 --- a/txpipe/twopoint_fourier.py +++ b/txpipe/twopoint_fourier.py @@ -95,27 +95,36 @@ def run(self): beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) geb = hybrideb.GaussEB(beb, heb) + geb_dict = {} + for i in range(0,self.config['Nell']): + geb_dict['%d'%(i+1)] = {} + for j in range(0,6): + geb_dict['%d'%(i+1)]['%d'%(j+1)]=geb(i)[j] + + np.savez('/lcrc/project/SPT3G/users/ac.yomori/repo/nulltests_txpipe/geb_nosep_dict.npz', **geb_dict) + else: # Loading precomputed weight functions - geb_sep = np.load(self.config['precomputed_hybrideb_weights'],allow_pickle=True) + geb = np.load(self.config['precomputed_hybrideb_weights'],allow_pickle=True) En = np.zeros(Nl) Bn = np.zeros(Nl) ell = np.zeros(Nl) for i in range(int(Nl)): - + # with-separation calculation - res = geb_sep['%d'%(i+1)].tolist() - - Fp = res['2'] - Fm = res['3'] + res = geb['%d'%(i+1)].tolist() + Fp = res['2'] + Fm = res['3'] En[i] = np.sum(Fp*xip + Fm*xim)/2 Bn[i] = np.sum(Fp*xip - Fm*xim)/2 ell[i] = res['4'][np.argmax(res['5'])] return ell, Bn + else: + sys.exit("The only method implemented right now are 'becker' or 'namaster' ") class TXTwoPointFourier(PipelineStage): """ From 3023080e89d27bbb3e50817d7c0ebfc4c8c58936 Mon Sep 17 00:00:00 2001 From: yomori Date: Tue, 13 Feb 2024 21:02:50 -0800 Subject: [PATCH 05/17] added yaml --- examples/desy1/pipeline_Bmodes.yml | 116 +++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 examples/desy1/pipeline_Bmodes.yml diff --git a/examples/desy1/pipeline_Bmodes.yml b/examples/desy1/pipeline_Bmodes.yml new file mode 100644 index 000000000..0f6890305 --- /dev/null +++ b/examples/desy1/pipeline_Bmodes.yml @@ -0,0 +1,116 @@ + +# Stages to run +stages: + + - name: TXSourceSelectorMetacal # select and split objects into source bins + - name: TXShearCalibration # Calibrate and split the source sample tomographically + #- name: TXTruthLensCatalogSplitter + #- name: TXStarCatalogSplitter # Split the star catalog into separate bins (psf/non-psf) + #- name: TXPhotozPlot + - name: TXSourceMaps + #nprocess: 6 + #threads_per_process: 1 + #nodes: 1 + #- name: TXLensMaps + #- name: TXAuxiliarySourceMaps # make PSF and flag maps + #- name: TXAuxiliaryLensMaps # make depth and bright object maps + #- name: TXSourceNoiseMaps # noise maps for sources using random rotations + # stage below is not working because TXAuxiliaryLensMaps is broken + #- name: TXLensNoiseMaps # noise maps for lenses using density splits + #- name: TXDensityMaps # turn mask and ngal maps into overdensity maps + #- name: TXMapPlots # make pictures of all the maps + #- name: TXTracerMetadata # collate metadata + #- name: TXJackknifeCenters # Split the area into jackknife regions + #- name: TXRandomCat + #- name: TXTwoPoint # Compute real-space 2-point correlations + # threads_per_process: 128 + #- name: TXBlinding # Blind the data following Muir et al + #threads_per_process: 2 + #- name: TXTwoPointTheoryReal # compute theory using CCL to save in sacc file and plot later + # - name: TXTwoPointPlots # Make plots of 2pt correlations + #- name: TXSourceHistogramPlots + #- name: TXSourceDiagnosticPlots # Make a suite of diagnostic plots + #nprocess: 8 + #nodes: 2 + #- name: TXLensDiagnosticPlots # Make a suite of diagnostic plots + #- name: TXGammaTFieldCenters # Compute and plot gamma_t around center points # ERROR: EXPOSURE FILE + ##threads_per_process: 2 + #- name: TXGammaTStars # Compute and plot gamma_t around bright stars + #threads_per_process: 2 + #- name: TXGammaTRandoms # Compute and plot gamma_t around randoms + #threads_per_process: 2 + #- name: TXRoweStatistics # Compute and plot Rowe statistics + # threads_per_process: 128 + #- name: TXFocalPlanePlot # mean PSF ellipticity and mean residual on the focal plane + #- name: TXGalaxyStarDensity # Compute and plot the star-galaxy density cross-correlation + #- name: TXGalaxyStarShear # Compute and plot the star-galaxy shear cross-correlation + #- name: TXPSFDiagnostics # Compute and plots other PSF diagnostics + #- name: TXBrighterFatterPlot # Make plots tracking the brighter-fatter effect + #- name: TXConvergenceMaps # Make convergence kappa maps from g1, g2 maps #ERROR: WLMASSMAP + #- name: TXConvergenceMapPlots # Plot the convergence map + #- name: TXMapCorrelations # plot the correlations between systematics and data # ERROR: NEEDS CONVERGENCE MAP + #- name: TXApertureMass # Compute aperture-mass statistics + ##threads_per_process: 2 + # Disabled since not yet synchronised with new Treecorr MPI + # - name: TXSelfCalibrationIA # Self-calibration intrinsic alignments of galaxies + # + # Disabling these as they takes too long for a quick test. + # The latter three need NaMaster + + ##- name: TXRealGaussianCovariance # Compute covariance of real-space correlations + - name: TXTwoPointFourier # Compute power spectra C_ell + ##- name: TXFourierNamasterCovariance # Compute the C_ell covariance + ##- name: TXFourierTJPCovariance # Compute the C_ell covariance with TJPCov + +#=========== + +launcher: + name: mini + interval: 1.0 + +site: + name: local + max_threads: 128 + +modules: txpipe + +# where to find any modules that are not in this repo, +# and any other code we need. +python_paths: + - submodules/WLMassMap/python/desc/ + - submodules/TJPCov + +# where to put outputs +output_dir: data/desy1/outputs + +# configuration settings +config: examples/desy1/config.yml + +# These are overall inputs to the whole pipeline, not generated within it +inputs: + shear_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/shear_catalog_desy1_masked.h5 + # subsampled catalog for debugging: + #shear_catalog: /global/cscratch1/sd/jjeffers/TXPipe/data/desy1/inputs/small_DESY1_shear_catalog.h5 + photometry_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/photometry_catalog_desy1_RM.h5 + lens_tomography_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/lens_tomography_catalog_desy1_RM_091423.h5 + shear_photoz_stack: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/shear_photoz_stack.hdf5 + lens_photoz_stack: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/lens_photoz_stack.hdf5 + calibration_table: data/example/inputs/sample_cosmodc2_w10year_errors.dat + exposures: data/example/inputs/exposures.hdf5 + star_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/DES_psf_catalog.hdf5 + fiducial_cosmology: data/fiducial_cosmology.yml + # For the self-calibration extension we are not using Random_cat_source for now + # So it is set to Null, so the yaml intepreter returns a None value to python. + random_cats_source: Null + mask: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/mask_desy1.h5 + #random_cats: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/randoms_desy1_RM.hdf5 + #binned_random_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/binned_randoms_desy1_RM.hdf5 +# if supported by the launcher, restart the pipeline where it left off +# if interrupted +resume: True + +# where to put output logs for individual stages +log_dir: data/desy1/logs + +# where to put an overall parsl pipeline log +pipeline_log: data/desy1/log.txt From 54bd9ac4321ddced58e921c605663e6ed433bc62 Mon Sep 17 00:00:00 2001 From: yomori Date: Wed, 14 Feb 2024 22:49:23 -0800 Subject: [PATCH 06/17] update --- examples/desy3/config_Bmodes.yml | 395 +++++++++++++++++++++++++++++ examples/desy3/pipeline_Bmodes.yml | 152 +++++++++++ 2 files changed, 547 insertions(+) create mode 100644 examples/desy3/config_Bmodes.yml create mode 100644 examples/desy3/pipeline_Bmodes.yml diff --git a/examples/desy3/config_Bmodes.yml b/examples/desy3/config_Bmodes.yml new file mode 100644 index 000000000..9fbc51c8a --- /dev/null +++ b/examples/desy3/config_Bmodes.yml @@ -0,0 +1,395 @@ +# Values in this section are accessible to all the different stages. +# They can be overridden by individual stages though. +global: + # This is read by many stages that read complete + # catalog data, and tells them how many rows to read + # at once + chunk_rows: 100000 + # These mapping options are also read by a range of stages + pixelization: healpix + nside: 64 + sparse: True # Generate sparse maps - faster if using small areas + +TXGCRTwoCatalogInput: + metacal_dir: /global/cscratch1/sd/desc/DC2/data/Run2.2i/dpdd/Run2.2i-t3828/metacal_table_summary + photo_dir: /global/cscratch1/sd/desc/DC2/data/Run2.2i/dpdd/Run2.2i-t3828/object_table_summary + +TXMetacalGCRInput: + cat_name: dc2_object_run2.1i_dr1b_with_metacal_griz + +TXExposureInfo: + dc2_name: '1.2p' + + +TXCosmoDC2Mock: + cat_name: cosmoDC2_v1.1.4_image + visits_per_band: 16 + extra_cols: redshift_true size_true shear_1 shear_2 + flip_g2: True # to match metacal + +TXIngestRedmagic: + lens_zbin_edges: [0.1, 0.3, 0.5] + +PZPDFMLZ: + nz: 301 + zmax: 3.0 + +TXLensTrueNumberDensity: + zmax: 3.0 + nz: 301 + +PZPrepareEstimatorLens: + name: PZPrepareEstimatorLens + classname: Inform_BPZ_lite + aliases: + input: spectroscopic_catalog + model: lens_photoz_model + zmin: 0.0 + zmax: 3.0 + nzbins: 301 + columns_file: ./data/bpz_ugrizy.columns + data_path: ./data/example/rail-bpz-inputs + spectra_file: CWWSB4.list + prior_band: i + ref_band: i + # Not sure about this + prior_file: hdfn_gen + p_min: 0.005 + gauss_kernel: 0.0 + mag_err_min: 0.005 + inform_options: {'save_train': False, 'load_model': False, 'modelfile': 'BPZpriormodel.out'} + madau_reddening: no + bands: riz + zp_errors: [0.01, 0.01, 0.01, 0.01, 0.01, 0.01] + hdf5_groupname: photometry + + +PZPrepareEstimatorSource: + name: PZPrepareEstimatorSource + classname: Inform_BPZ_lite + aliases: + input: spectroscopic_catalog + model: source_photoz_model + zmin: 0.0 + zmax: 3.0 + nzbins: 301 + columns_file: ./data/bpz_ugrizy.columns + data_path: ./data/example/rail-bpz-inputs + spectra_file: CWWSB4.list + prior_band: i + ref_band: i + # Not sure about this + prior_file: hdfn_gen + p_min: 0.005 + gauss_kernel: 0.0 + mag_err_min: 0.005 + inform_options: {'save_train': False, 'load_model': False, 'modelfile': 'BPZpriormodel.out'} + madau_reddening: no + bands: riz + zp_errors: [0.01, 0.01, 0.01, 0.01, 0.01, 0.01] + hdf5_groupname: photometry + + +PZEstimatorLens: + name: PZEstimatorLens + classname: BPZ_lite + aliases: + model: lens_photoz_model + input: photometry_catalog + output: lens_photoz_pdfs + zmin: 0.0 + zmax: 3.0 + dz: 0.01 + nzbins: 301 + band_names: [mag_u, mag_g, mag_r, mag_i, mag_z, mag_y] + bands: [mag_u, mag_g, mag_r, mag_i, mag_z, mag_y] + err_bands: [mag_err_u, mag_err_g, mag_err_r, mag_err_i, mag_err_z, mag_err_y] + hdf5_groupname: photometry + nondetect_val: .inf + columns_file: ./data/bpz_ugrizy.columns + data_path: ./data/example/rail-bpz-inputs + spectra_file: CWWSB4.list + prior_band: mag_i + ref_band: mag_i + prior_file: hdfn_gen + p_min: 0.005 + gauss_kernel: 0.0 + zp_errors: [0.01, 0.01, 0.01, 0.01, 0.01, 0.01] + mag_err_min: 0.005 + madau_reddening: false + mag_limits: + mag_u: 27.79 + mag_g: 29.04 + mag_r: 29.06 + mag_i: 28.62 + mag_z: 27.98 + mag_y: 27.05 + + + +# Mock version of stacking: +TXSourceTrueNumberDensity: + nz: 301 + zmax: 3.0 + + +TXSourceSelectorMetacal: + input_pz: True + true_z: False + bands: riz #used for selection + T_cut: 0.5 + s2n_cut: 10.0 + max_rows: 10000000000 + delta_gamma: 0.02 + source_zbin_edges: [0.0, 0.2, 0.4, 0.6, 0.8] + shear_prefix: mcal_ + use_diagonal_response : True + +TXTruthLensSelector: + # Mag cuts + input_pz: False + true_z: True + lens_zbin_edges: [0.1, 0.3, 0.5] + cperp_cut: 0.2 + r_cpar_cut: 13.5 + r_lo_cut: 16.0 + r_hi_cut: 21.6 + i_lo_cut: 17.5 + i_hi_cut: 21.9 + r_i_cut: 2.0 + # may also need one for r_cpar_cut + +TXMeanLensSelector: + # Mag cuts + lens_zbin_edges: [0.0,0.2,0.4] + cperp_cut: 0.2 + r_cpar_cut: 13.5 + r_lo_cut: 16.0 + r_hi_cut: 21.6 + i_lo_cut: 17.5 + i_hi_cut: 21.9 + r_i_cut: 2.0 + +TXModeLensSelector: + # Mag cuts + lens_zbin_edges: [0.0,0.2,0.4] + cperp_cut: 0.2 + r_cpar_cut: 13.5 + r_lo_cut: 16.0 + r_hi_cut: 21.6 + i_lo_cut: 17.5 + i_hi_cut: 21.9 + r_i_cut: 2.0 + +TXRandomCat: + density: 10 # gals per sq arcmin + +TXTwoPoint: + bin_slop: 0.1 + delta_gamma: 0.02 + do_pos_pos: True + do_shear_shear: True + do_shear_pos: True + flip_g2: True # use true when using metacal shears + min_sep: 2.5 + max_sep: 60.0 + nbins: 10 + verbose: 0 + subtract_mean_shear: True + +TXGammaTBrightStars: {} + +TXGammaTDimStars: {} + +TXGammaTRandoms: {} + +TXGammaTFieldCenters: {} + +TXBlinding: + seed: 1972 ## seed uniquely specifies the shift in parameters + Omega_b: [0.0485, 0.001] ## fiducial_model_value, shift_sigma + Omega_c: [0.2545, 0.01] + w0: [-1.0, 0.1] + h: [0.682, 0.02] + sigma8: [0.801, 0.01] + n_s: [0.971, 0.03] + b0: 0.95 ## we use bias of the form b0/g + delete_unblinded: True + +TXSourceDiagnosticPlots: + psf_prefix: mcal_psf_ + shear_prefix: mcal_ + nbins: 20 + g_min: -0.01 + g_max: 0.01 + psfT_min: 0.2 + psfT_max: 0.28 + T_min: 0.01 + T_max: 2.1 + s2n_min: 1.25 + s2n_max: 300 + +TXLensDiagnosticPlots: {} + +TXDiagnosticMaps: + sparse: True # Generate sparse maps - faster if using small areas + snr_threshold: 10.0 + snr_delta: 1.0 + # pixelization: gnomonic + pixel_size: 0.2 + ra_cent: 62. + dec_cent: -35. + npix_x: 60 + npix_y: 60 + depth_cut: 23.0 + psf_prefix: mcal_psf_ + +TXSourceMaps: + sparse: True # Generate sparse maps - faster if using small areas + +TXLensMaps: + sparse: True # Generate sparse maps - faster if using small areas + +# For redmagic mapping +TXExternalLensMaps: + chunk_rows: 100000 + sparse: True + pixelization: healpix + nside: 512 + + +TXSourceMaps: {} +TXLensMaps: {} + +TXAuxiliarySourceMaps: + flag_exponent_max: 8 + psf_prefix: psf_ + +TXAuxiliaryLensMaps: + flag_exponent_max: 8 + bright_obj_threshold: 22.0 # The magnitude threshold for a object to be counted as bright + depth_band : i + snr_threshold: 10.0 # The S/N value to generate maps for (e.g. 5 for 5-sigma depth) + snr_delta: 1.0 # The range threshold +/- delta is used for finding objects at the boundary + +TXRealGaussianCovariance: + min_sep: 2.5 + max_sep: 60. + nbins: 10 + pickled_wigner_transform: data/example/inputs/wigner.pkl + + +TXJackknifeCenters: + npatch: 5 + + +TXTwoPointFourier: + flip_g2: True + ell_min: 30 + ell_max: 150 + bandwidth: 20 + cache_dir: ./cache/workspaces + +TXSimpleMask: + depth_cut : 23.5 + bright_object_max: 10.0 + +TXMapCorrelations: + supreme_path_root: data/example/inputs/supreme + outlier_fraction: 0.05 + nbin: 20 + + +PZRailSummarizeLens: + leafsize: 20 + zmin: 0.0 + zmax: 3.0 + nzbins: 50 + name: PZRailSummarizeLens + catalog_group: "photometry" + tomography_name: "lens" + aliases: + tomography_catalog: lens_tomography_catalog + photometry_catalog: photometry_catalog + model: lens_direct_calibration_model + photoz_stack: lens_photoz_stack + model: None + + +PZRailSummarizeSource: + leafsize: 20 + zmin: 0.0 + zmax: 3.0 + nzbins: 50 + mag_prefix: "/shear/mcal_mag_" + tomography_name: "source" + name: PZRailSummarizeSource + aliases: + tomography_catalog: shear_tomography_catalog + photometry_catalog: shear_catalog + model: source_direct_calibration_model + photoz_stack: shear_photoz_stack + + +TXPhotozPlotLens: + name: TXPhotozPlotLens + aliases: + photoz_stack: lens_photoz_stack + nz_plot: lens_nz + +TXPhotozPlotSource: + name: TXPhotozPlotSource + aliases: + photoz_stack: shear_photoz_stack + nz_plot: source_nz + +FlowCreator: + n_samples: 1000000 + seed: 5763248 + aliases: + # This input was generated using get_example_flow in pzflow, + # not something specific. + output: ideal_specz_catalog + model: flow + +InvRedshiftIncompleteness: + pivot_redshift: 0.8 + aliases: + input: ideal_specz_catalog + output: specz_catalog_pq + +GridSelection: + aliases: + input: ideal_specz_catalog + output: specz_catalog_pq + redshift_cut: 5.1 + ratio_file: data/example/inputs/hsc_ratios_and_specz.hdf5 + settings_file: data/example/inputs/HSC_grid_settings.pkl + random_seed: 66 + pessimistic_redshift_cut: 1.0 + + + + +TXParqetToHDF: + hdf_group: photometry + aliases: + input: specz_catalog_pq + output: spectroscopic_catalog + + +Inform_NZDirSource: + name: Inform_NZDirSource + usecols: [r, i, z] + hdf5_groupname: photometry + aliases: + input: spectroscopic_catalog + model: source_direct_calibration_model + +Inform_NZDirLens: + name: Inform_NZDirLens + usecols: [u, g, r, i, z, "y"] + hdf5_groupname: photometry + aliases: + input: spectroscopic_catalog + model: lens_direct_calibration_model diff --git a/examples/desy3/pipeline_Bmodes.yml b/examples/desy3/pipeline_Bmodes.yml new file mode 100644 index 000000000..e3c9b04bb --- /dev/null +++ b/examples/desy3/pipeline_Bmodes.yml @@ -0,0 +1,152 @@ + +# Stages to run +stages: + #- name: TXBrighterFatterPlot # Make plots tracking the brighter-fatter effect + - name: TXPSFDiagnostics # Compute and plots other PSF diagnostics + #- name: TXRoweStatistics # Compute and plot Rowe statistics + # threads_per_process: 2 + - name: TXAuxiliaryLensMaps # make depth and bright object maps + - name: TXSimpleMask # combine maps to make a simple mask + - name: TXSourceSelectorMetacal # select and split objects into source bins + - name: TXSourceNoiseMaps # Compute shear noise using rotations + - name: TXShearCalibration # Calibrate and split the source sample tomographically + - name: TXSourceMaps # make source g1 and g2 maps + - name: TXConvergenceMaps # Make convergence kappa maps from g1, g2 maps + - name: TXAuxiliarySourceMaps # make PSF and flag maps + - name: FlowCreator # Simulate a spectroscopic population + - name: GridSelection # Simulate a spectroscopic sample + - name: TXParqetToHDF # Convert the spec sample format + - name: PZPrepareEstimatorLens # Prepare the p(z) estimator + classname: Inform_BPZ_lite + - name: PZEstimatorLens # Measure lens galaxy PDFs + classname: BPZ_lite + - name: TXMeanLensSelector # select objects for lens bins from the PDFs + - name: Inform_NZDirLens # Prepare the DIR method inputs for the lens sample + classname: Inform_NZDir + - name: Inform_NZDirSource # Prepare the DIR method inputs for the source sample + classname: Inform_NZDir + - name: PZRailSummarizeLens # Run the DIR method on the lens sample to find n(z) + classname: PZRailSummarize + - name: PZRailSummarizeSource # Run the DIR method on the lens sample to find n(z) + classname: PZRailSummarize + - name: TXLensCatalogSplitter # Split the lens sample tomographically + - name: TXLSSWeightsUnit # add systematic weights to the lens sample (weight=1 for this example) + #- name: TXStarCatalogSplitter # Split the star catalog into separate bins (psf/non-psf) + #- name: TXSourceMaps # make source g1 and g2 maps + #- name: TXLensMaps # make source lens and n_gal maps + #- name: TXAuxiliarySourceMaps # make PSF and flag maps + #- name: TXAuxiliaryLensMaps # make depth and bright object maps + #- name: TXSimpleMask # combine maps to make a simple mask + #- name: TXLSSWeightsUnit # add systematic weights to the lens sample (weight=1 for this example) + #- name: TXSourceNoiseMaps # Compute shear noise using rotations + #- name: TXLensNoiseMaps # Compute lens noise using half-splits + #- name: TXDensityMaps # turn mask and ngal maps into overdensity maps + #- name: TXMapPlots # make pictures of all the maps + #- name: TXTracerMetadata # collate metadata + #- name: TXRandomCat # generate lens bin random catalogs + #- name: TXJackknifeCenters # Split the area into jackknife regions + #- name: TXTwoPoint # Compute real-space 2-point correlations + # threads_per_process: 2 + #- name: TXBlinding # Blind the data following Muir et al + # threads_per_process: 2 + #- name: TXTwoPointTheoryReal # compute theory using CCL to save in sacc file and plot later + #- name: TXTwoPointPlotsTheory # Make plots of 2pt correlations + #- name: TXSourceDiagnosticPlots # Make a suite of diagnostic plots + #- name: TXLensDiagnosticPlots # Make a suite of diagnostic plots + #- name: TXGammaTFieldCenters # Compute and plot gamma_t around center points + # threads_per_process: 2 + #- name: TXGammaTStars # Compute and plot gamma_t around bright stars + # threads_per_process: 2 + #- name: TXGammaTRandoms # Compute and plot gamma_t around randoms + # threads_per_process: 2 + #- name: TXGalaxyStarDensity # Compute and plot the star-galaxy density cross-correlation + #- name: TXGalaxyStarShear # Compute and plot the star-galaxy shear cross-correlation + #- name: TXPhotozPlotSource # Plot the bin n(z) + # classname: TXPhotozPlot + #- name: TXPhotozPlotLens # Plot the bin n(z) + # classname: TXPhotozPlot + #- name: TXConvergenceMaps # Make convergence kappa maps from g1, g2 maps + #- name: TXConvergenceMapPlots # Plot the convergence map + #- name: TXMapCorrelations # plot the correlations between systematics and data + #- name: TXApertureMass # Compute aperture-mass statistics + # threads_per_process: 2 + #- name: TXTwoPointFourier # Compute power spectra C_ell + # Disablig this since not yet synchronised with new Treecorr MPI + # - name: TXSelfCalibrationIA # Self-calibration intrinsic alignments of galaxies + # Disabling these as they take too long for a quick test. + # - name: TXRealGaussianCovariance # Compute covariance of real-space correlations + # - name: TXTwoPointFourier # Compute power spectra C_ell + # - name: TXFourierNamasterCovariance # Compute the C_ell covariance + # - name: TXFourierTJPCovariance # Compute the C_ell covariance with TJPCov + + +# modules and packages to import that have pipeline +# stages defined in them +modules: > + txpipe + rail.creation.degradation.grid_selection + rail.creation.engines.flowEngine + rail.estimation.algos.NZDir + rail.estimation.algos.bpz_lite + +# where to find any modules that are not in this repo, +# and any other code we need. +python_paths: + - submodules/WLMassMap/python/desc/ + +# Where to put outputs +output_dir: data/desy3a/outputs + +# How to run the pipeline: mini, parsl, or cwl +launcher: + name: mini + interval: 1.0 + +# Where to run the pipeline: cori-interactive, cori-batch, or local +site: + name: local + max_threads: 2 + +# configuration settings +config: examples/desy3/config_Bmodes.yml + +# These are overall inputs to the whole pipeline, not generated within it +inputs: + # See README for paths to download these files + #shear_catalog: data/example/inputs/shear_catalog.hdf5 + shear_catalog: //global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/shear_catalog_desy3_unmasked_withfakez_v2.h5 + photometry_catalog: data/example/inputs/photometry_catalog.hdf5 + calibration_table: data/example/inputs/sample_cosmodc2_w10year_errors.dat + exposures: data/example/inputs/exposures.hdf5 + #star_catalog: data/example/inputs/star_catalog.hdf5 + star_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/DES_psf_y3_catalog.hdf5 + #lens_tomography_catalog: /global/cfs/cdirs/lsst/groups/WL/users/yomori/repo/aaa/TXPipe/data/desy3a/outputs/lens_tomography_catalog_unweighted.hdf5 ##############<----- manually add this line + # This file comes with the code + fiducial_cosmology: data/fiducial_cosmology.yml + # For the self-calibration extension we are not using Random_cat_source for now + # So it is set to Null, so the yaml intepreter returns a None value to python. + random_cats_source: Null + flow: data/example/inputs/example_flow.pkl + +# These are overall inputs to the whole pipeline, not generated within it +#inputs: +# shear_catalog: //global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/shear_catalog_desy3_unmasked_withfakez_v2.h5 +# photometry_catalog: +# #shear_tomography_catalog: /global/cfs/cdirs/desc-wl/projects/txpipe-sys-tests/des-y3/shear_tomography_desy3_unmasked_test.h5 +# lens_tomography_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/lens_tomography_catalog_desy1_RM.h5 +# lens_photoz_stack: +# mask: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/mask_desy1.h5 +# star_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/DES_psf_y3_catalog.h5 +# calibration_table: data/example/inputs/sample_cosmodc2_w10year_errors.dat +# fiducial_cosmology: data/fiducial_cosmology.yml +# random_cats_source: Null +# flow: data/example/inputs/example_flow.pkl + + +# if supported by the launcher, restart the pipeline where it left off +# if interrupted +resume: True +# where to put output logs for individual stages +log_dir: data/desy3a/logs +# where to put an overall parsl pipeline log +pipeline_log: data/desy3a/log.txt From 461ada83566ce5769b685b88a4b1a3e07f7cd4a7 Mon Sep 17 00:00:00 2001 From: yomori Date: Fri, 16 Feb 2024 08:03:27 -0800 Subject: [PATCH 07/17] fix npdf index --- txpipe/data_types/types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/txpipe/data_types/types.py b/txpipe/data_types/types.py index 9dd7e9e42..6d23d35ba 100755 --- a/txpipe/data_types/types.py +++ b/txpipe/data_types/types.py @@ -528,7 +528,7 @@ def get_2d_n_of_z(self, zmax=3.0, nz=301): def get_bin_n_of_z(self, bin_index, zmax=3.0, nz=301): ensemble = self.read_ensemble() npdf = ensemble.npdf - if bin_index >= npdf - 1: + if bin_index > npdf - 1: raise ValueError(f"Requested n(z) for bin {bin_index} but only {npdf-1} bins available. For the 2D bin use get_2d_n_of_z.") z = np.linspace(0, zmax, nz) return z, ensemble.pdf(z)[bin_index] From 43ebf0b5db55bd19fc5e683e36403fff9288e156 Mon Sep 17 00:00:00 2001 From: yomori Date: Fri, 16 Feb 2024 08:08:50 -0800 Subject: [PATCH 08/17] update --- examples/desy3/config_Bmodes.yml | 7 ++- examples/desy3/pipeline_Bmodes.yml | 74 +++++++++++++++++++++--------- 2 files changed, 58 insertions(+), 23 deletions(-) diff --git a/examples/desy3/config_Bmodes.yml b/examples/desy3/config_Bmodes.yml index 9fbc51c8a..099a95aee 100644 --- a/examples/desy3/config_Bmodes.yml +++ b/examples/desy3/config_Bmodes.yml @@ -7,7 +7,7 @@ global: chunk_rows: 100000 # These mapping options are also read by a range of stages pixelization: healpix - nside: 64 + nside: 128 sparse: True # Generate sparse maps - faster if using small areas TXGCRTwoCatalogInput: @@ -249,6 +249,7 @@ TXSourceMaps: TXLensMaps: sparse: True # Generate sparse maps - faster if using small areas + nside: 128 # For redmagic mapping TXExternalLensMaps: @@ -261,6 +262,10 @@ TXExternalLensMaps: TXSourceMaps: {} TXLensMaps: {} +TXDensityMaps: + nside: 128 + + TXAuxiliarySourceMaps: flag_exponent_max: 8 psf_prefix: psf_ diff --git a/examples/desy3/pipeline_Bmodes.yml b/examples/desy3/pipeline_Bmodes.yml index e3c9b04bb..e1db26c2d 100644 --- a/examples/desy3/pipeline_Bmodes.yml +++ b/examples/desy3/pipeline_Bmodes.yml @@ -12,7 +12,7 @@ stages: - name: TXShearCalibration # Calibrate and split the source sample tomographically - name: TXSourceMaps # make source g1 and g2 maps - name: TXConvergenceMaps # Make convergence kappa maps from g1, g2 maps - - name: TXAuxiliarySourceMaps # make PSF and flag maps + #- name: TXAuxiliarySourceMaps # make PSF and flag maps - name: FlowCreator # Simulate a spectroscopic population - name: GridSelection # Simulate a spectroscopic sample - name: TXParqetToHDF # Convert the spec sample format @@ -27,10 +27,15 @@ stages: classname: Inform_NZDir - name: PZRailSummarizeLens # Run the DIR method on the lens sample to find n(z) classname: PZRailSummarize - - name: PZRailSummarizeSource # Run the DIR method on the lens sample to find n(z) - classname: PZRailSummarize - - name: TXLensCatalogSplitter # Split the lens sample tomographically - - name: TXLSSWeightsUnit # add systematic weights to the lens sample (weight=1 for this example) + #- name: PZRailSummarizeSource # Run the DIR method on the lens sample to find n(z) + # classname: PZRailSummarize + - name: TXLensCatalogSplitter # Split the lens sample tomographically + - name: TXLSSWeightsUnit # add systematic weights to the lens sample (weight=1 for this example) + - name: TXTracerMetadata # collate metadata + - name: TXLensNoiseMaps # Compute lens noise using half-splits + - name: TXLensMaps # make source lens and n_gal maps + - name: TXDensityMaps # turn mask and ngal maps into overdensity maps + - name: TXTwoPointFourier # Compute power spectra C_ell #- name: TXStarCatalogSplitter # Split the star catalog into separate bins (psf/non-psf) #- name: TXSourceMaps # make source g1 and g2 maps #- name: TXLensMaps # make source lens and n_gal maps @@ -39,9 +44,7 @@ stages: #- name: TXSimpleMask # combine maps to make a simple mask #- name: TXLSSWeightsUnit # add systematic weights to the lens sample (weight=1 for this example) #- name: TXSourceNoiseMaps # Compute shear noise using rotations - #- name: TXLensNoiseMaps # Compute lens noise using half-splits #- name: TXDensityMaps # turn mask and ngal maps into overdensity maps - #- name: TXMapPlots # make pictures of all the maps #- name: TXTracerMetadata # collate metadata #- name: TXRandomCat # generate lens bin random catalogs #- name: TXJackknifeCenters # Split the area into jackknife regions @@ -61,10 +64,10 @@ stages: # threads_per_process: 2 #- name: TXGalaxyStarDensity # Compute and plot the star-galaxy density cross-correlation #- name: TXGalaxyStarShear # Compute and plot the star-galaxy shear cross-correlation - #- name: TXPhotozPlotSource # Plot the bin n(z) - # classname: TXPhotozPlot - #- name: TXPhotozPlotLens # Plot the bin n(z) - # classname: TXPhotozPlot + - name: TXPhotozPlotSource # Plot the bin n(z) + classname: TXPhotozPlot + - name: TXPhotozPlotLens # Plot the bin n(z) + classname: TXPhotozPlot #- name: TXConvergenceMaps # Make convergence kappa maps from g1, g2 maps #- name: TXConvergenceMapPlots # Plot the convergence map #- name: TXMapCorrelations # plot the correlations between systematics and data @@ -113,21 +116,18 @@ config: examples/desy3/config_Bmodes.yml # These are overall inputs to the whole pipeline, not generated within it inputs: # See README for paths to download these files - #shear_catalog: data/example/inputs/shear_catalog.hdf5 - shear_catalog: //global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/shear_catalog_desy3_unmasked_withfakez_v2.h5 - photometry_catalog: data/example/inputs/photometry_catalog.hdf5 - calibration_table: data/example/inputs/sample_cosmodc2_w10year_errors.dat - exposures: data/example/inputs/exposures.hdf5 - #star_catalog: data/example/inputs/star_catalog.hdf5 - star_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/DES_psf_y3_catalog.hdf5 + shear_catalog : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/shear_catalog_desy3_unmasked_withfakez_v2.h5 + star_catalog : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/DES_psf_y3_catalog.hdf5 #lens_tomography_catalog: /global/cfs/cdirs/lsst/groups/WL/users/yomori/repo/aaa/TXPipe/data/desy3a/outputs/lens_tomography_catalog_unweighted.hdf5 ##############<----- manually add this line # This file comes with the code fiducial_cosmology: data/fiducial_cosmology.yml - # For the self-calibration extension we are not using Random_cat_source for now - # So it is set to Null, so the yaml intepreter returns a None value to python. + shear_photoz_stack: data/example/outputs/shear_photoz_stack_manual.hdf5 + photometry_catalog: data/example/inputs/photometry_catalog.hdf5 + calibration_table : data/example/inputs/sample_cosmodc2_w10year_errors.dat + exposures : data/example/inputs/exposures.hdf5 + flow : data/example/inputs/example_flow.pkl random_cats_source: Null - flow: data/example/inputs/example_flow.pkl - + # These are overall inputs to the whole pipeline, not generated within it #inputs: # shear_catalog: //global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/shear_catalog_desy3_unmasked_withfakez_v2.h5 @@ -150,3 +150,33 @@ resume: True log_dir: data/desy3a/logs # where to put an overall parsl pipeline log pipeline_log: data/desy3a/log.txt + + + + + +########################################################################################### + +#'/global/cfs/cdirs/desc-wl/users/yomori/repo/aaa/TXPipe/data/desy3a/outputs/manual_shear_photoz_stack.ipynb' + +#from astropy.io import fits + +## http://desdr-server.ncsa.illinois.edu/despublic/y3a2_files/datavectors/2pt_NG_final_2ptunblind_02_26_21_wnz_maglim_covupdate.fits +#d=fits.open('2pt_NG_final_2ptunblind_02_26_21_wnz_maglim_covupdate.fits') + +#bins=np.append(d['nz_source'].data['Z_LOW'],d['nz_source'].data['Z_HIGH'][-1]) + +#nz1 = d['nz_source'].data['BIN1'] +#nz2 = d['nz_source'].data['BIN2'] +#nz3 = d['nz_source'].data['BIN3'] +#nz4 = d['nz_source'].data['BIN4'] + +#with h5py.File('shear_photoz_stack_manual.hdf5', 'w') as f: +# f.create_group("qp") +# f.create_group("qp/data") +# f.create_group("qp/meta") +# f['qp/meta/bins'] = bins[np.newaxis] +# f['qp/meta/pdf_name'] = np.array([b'hist'], dtype='|S4') +# f['qp/meta/pdf_version'] = [0] +# f['qp/data/pdfs'] = (np.c_[nz1,nz2,nz3,nz4]).T + \ No newline at end of file From be3c7d9aab4d6f9350dae289db88cd749ebed0bb Mon Sep 17 00:00:00 2001 From: yomori Date: Sat, 17 Feb 2024 12:52:19 -0800 Subject: [PATCH 09/17] add modifications --- txpipe/twopoint_fourier.py | 273 +++++++++++++++++++++++++------------ 1 file changed, 184 insertions(+), 89 deletions(-) diff --git a/txpipe/twopoint_fourier.py b/txpipe/twopoint_fourier.py index 1b7ae9fac..a6fdd1f6f 100755 --- a/txpipe/twopoint_fourier.py +++ b/txpipe/twopoint_fourier.py @@ -37,95 +37,6 @@ ) -class TXpureB(TXTwoPointFourier): - ''' - Make shear B-mode measurements - ''' - name = "TXpureB" - - config_options = { - 'method' : 'becker' # 'namaster' or 'becker' - 'Nell' : 20, # Number of ell bins - 'lmin' : 200, # ell min - 'lmax' : 2000, # ell max - 'lspacing' : 'log' # ell spacing for binning - 'bin_file' : None, # specifiy bins - 'theta_min' : 2.5, # (only for hybrideb) theta min in arcmin - 'theta_max' : 250, # (only for hybrideb) theta max in arcmin - 'Ntheta' : 1000, # (only for hybrideb) Number of theta bins - 'precomputed_hybrideb_weights' : None - } - - def run(self): - import pymaster - import healpy - import sacc - import pyccl - import hybrideb - - if method == 'namaster': - ''' - B-mode method that is already implemented in NaMaster - ''' - # Create bins !!!!!!!!!!!!!!!!!!!!! fix - b = nmt.NmtBin.from_nside_linear(hp.npix2nside(mask.shape[0]), self.config["Nell"]) - - # Initialize the fields - f2yp0 = nmt.NmtField(mask, [dmap[1], dmap[2]], purify_e=False, purify_b=True) - - # Initialize workspace and compute coupling matrix - w_yp = nmt.NmtWorkspace() - w_yp.compute_coupling_matrix(f2yp0, f2yp0, b) - - #Compute spectra - cl_coupled = nmt.compute_coupled_cell(f2yp0, f2yp0) - cl_decoupled = w_yp.decouple_cell(cl_coupled) - - #return ell bins and the deconvolved clbb - return b.get_effective_ells(), cl_decoupled - - - elif method == 'becker' - ''' - B-mode method of Becker and Rozo 2015 http://arxiv.org/abs/1412.3851 - ''' - if self.config["precomputed_hybrideb_weights"] is None: - # Recomputing all weight functinons -- slow - heb = hybrideb.HybridEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) - beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) - geb = hybrideb.GaussEB(beb, heb) - - geb_dict = {} - for i in range(0,self.config['Nell']): - geb_dict['%d'%(i+1)] = {} - for j in range(0,6): - geb_dict['%d'%(i+1)]['%d'%(j+1)]=geb(i)[j] - - np.savez('/lcrc/project/SPT3G/users/ac.yomori/repo/nulltests_txpipe/geb_nosep_dict.npz', **geb_dict) - - else: - # Loading precomputed weight functions - geb = np.load(self.config['precomputed_hybrideb_weights'],allow_pickle=True) - - En = np.zeros(Nl) - Bn = np.zeros(Nl) - ell = np.zeros(Nl) - - for i in range(int(Nl)): - - # with-separation calculation - res = geb['%d'%(i+1)].tolist() - Fp = res['2'] - Fm = res['3'] - En[i] = np.sum(Fp*xip + Fm*xim)/2 - Bn[i] = np.sum(Fp*xip - Fm*xim)/2 - ell[i] = res['4'][np.argmax(res['5'])] - - return ell, Bn - - else: - sys.exit("The only method implemented right now are 'becker' or 'namaster' ") - class TXTwoPointFourier(PipelineStage): """ Make Fourier space 3x2pt measurements using NaMaster @@ -998,5 +909,189 @@ def save_power_spectra(self, tracer_sacc, nbin_source, nbin_lens): S.save_fits(output_filename, overwrite=True) + +class TXpureB(PipelineStage): + ''' + Make shear B-mode measurements + ''' + name = "TXpureB" + parallel = False + + inputs = [ + ("shear_photoz_stack", QPNOfZFile), + ("source_maps", MapsFile), + ("mask" , MapsFile), + ("twopoint_data_real_raw", SACCFile), + ] + + outputs = [("twopoint_data_fourier_pureB", SACCFile)] + + config_options = { + 'method' : 'hybrideb', + 'Nell' : 20, + 'lmin' : 200, + 'lmax' : 2000, + 'lspacing' : 'log', + 'bin_file' : "", + 'theta_min' : 2.5, + 'theta_max' : 250, + 'Ntheta' : 1000, + 'precomputed_hybrideb_weights' : "" + } + + def run(self): + import pymaster as nmt + import healpy as hp + import sacc + import pyccl + import hybrideb + import datetime + + if self.config["method"] == 'namaster': + ''' + B-mode method that is already implemented in NaMaster + ''' + print('----------------namaster-----------') + + # Open mask + with self.open_input("mask", wrapper=True) as f: + mask = f.read_map("mask") + + # Open source maps (g1,g2,weights) + with self.open_input("source_maps", wrapper=True) as f: + nbin_source = f.file["maps"].attrs["nbin_source"] + g1_maps = [f.read_map(f"g1_{b}") for b in range(nbin_source)] + g2_maps = [f.read_map(f"g2_{b}") for b in range(nbin_source)] + weights = [f.read_map(f"lensing_weight_{b}") for b in range(nbin_source)] + + # Define binning + b = nmt.NmtBin.from_nside_linear(hp.npix2nside(mask.shape[0]), self.config["Nell"]) + + # Initialize the fields + fields = {} + for i in range(nbin_source): + fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=True) + + # Compute Cls and store them in dictionrary + results = {} + for zi,f1 in enumerate(fields.keys()) : + for zj,f2 in enumerate(fields.keys()): + field1 = fields[f1] + field2 = fields[f2] + w_yp = nmt.NmtWorkspace() + w_yp.compute_coupling_matrix(field1, field2, b) + + cl_coupled = nmt.compute_coupled_cell(field1,field2) + cl_decoupled = w_yp.decouple_cell(cl_coupled) + results[('source_%d'%zi,'source_%d'%zj)] = cl_decoupled + + results['ell'] = b.get_effective_ells() + self.results = results + + + elif self.config["method"] == 'hybrideb': + ''' + B-mode method of Becker and Rozo 2015 http://arxiv.org/abs/1412.3851 + ''' + #print(self.config['precomputed_hybrideb_weights'] ); sys.exit() + print('--------------hybrideb-----------') + if self.config["precomputed_hybrideb_weights"] == "": + # Recomputing all weight functinons -- This is a slow rocedure + heb = hybrideb.HybridEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) + beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) + geb = hybrideb.GaussEB(beb, heb) + + geb_dict = {} + for i in range(0,self.config['Nell']): + geb_dict['%d'%(i+1)] = {} + for j in range(0,6): + geb_dict['%d'%(i+1)]['%d'%(j+1)]=geb(i)[j] + + np.savez('geb_nosep_dict.npz', **geb_dict) + + else: + # Loading precomputed weight functions to speed up computation + geb = np.load(self.config['precomputed_hybrideb_weights'],allow_pickle=True) + + En = np.zeros(self.config["Nell"]) + Bn = np.zeros(self.config["Nell"]) + ell = np.zeros(self.config["Nell"]) + + filename = self.get_input("twopoint_data_real_raw") + s = sacc.Sacc.load_fits(filename) + + Qxip = sacc.standard_types.galaxy_shear_xi_plus + + source_tracers = set() + for b1, b2 in s.get_tracer_combinations(Qxip): + source_tracers.add(b1) + source_tracers.add(b2) + + nbin_source = max(len(source_tracers), 1) + + results= {} + + for zi in range(0,nbin_source): + for zj in range(0,nbin_source): + xip = s.get_mean("galaxy_shear_xi_plus",('source_0', 'source_0') ) + xim = s.get_mean("galaxy_shear_xi_plus",('source_0', 'source_0') ) + + for i in range(int(self.config["Nell"])): + + # with-separation calculation + res = geb['%d'%(i+1)].tolist() + Fp = res['2'] + Fm = res['3'] + En[i] = np.sum(Fp*xip + Fm*xim)/2 + Bn[i] = np.sum(Fp*xip - Fm*xim)/2 + ell[i] = res['4'][np.argmax(res['5'])] + + results[('source_%d'%zi,'source_%d'%zj)] = Bn + results['ell'] = ell + + self.results = results + + else: + sys.exit("The only method implemented right now are 'hybrideb' or 'namaster' ") + + + print('Saving pureB Cls in sacc file') + self.save_power_spectra(nbin_source) + + + def save_power_spectra(self, nbin_source): + import sacc + import datetime + + S = sacc.Sacc() + S.metadata['nbin_source'] = nbin_source + S.metadata['creation'] = datetime.datetime.now().isoformat() + S.metadata['method'] = self.config['method'] + S.metadata['info'] = 'ClBB using pureB' + + CBB = sacc.standard_types.galaxy_shear_cl_bb + + with self.open_input("shear_photoz_stack", wrapper=True) as f: + for i in range(nbin_source): + z, Nz = f.get_bin_n_of_z(i) + S.add_tracer("NZ", f"source_{i}", z, Nz) + + for zi in range(0,nbin_source): + for zj in range(0,nbin_source): + tracer1 = "source_%d"%zi + tracer2 = "source_%d"%zj + #import pdb; pdb.set_trace() + val = self.results[(tracer1,tracer2)] + ell = self.results['ell'] + for k in range(0,len(val)): + S.add_data_point(CBB, (tracer1, tracer2), value=val[k], ell=ell[k]) + + # And we're all done! + output_filename = self.get_output("twopoint_data_fourier_pureB") + S.save_fits(output_filename, overwrite=True) + + + + if __name__ == "__main__": PipelineStage.main() From 5bae2e8c01a4d410f1a64285f57f8be6a51fe54c Mon Sep 17 00:00:00 2001 From: yomori Date: Sat, 17 Feb 2024 12:57:03 -0800 Subject: [PATCH 10/17] revert twopoint_fourier.py and move pureB to twopoint_null_tests.py --- txpipe/twopoint_fourier.py | 184 ------------------------------------- 1 file changed, 184 deletions(-) diff --git a/txpipe/twopoint_fourier.py b/txpipe/twopoint_fourier.py index a6fdd1f6f..e04993d32 100755 --- a/txpipe/twopoint_fourier.py +++ b/txpipe/twopoint_fourier.py @@ -909,189 +909,5 @@ def save_power_spectra(self, tracer_sacc, nbin_source, nbin_lens): S.save_fits(output_filename, overwrite=True) - -class TXpureB(PipelineStage): - ''' - Make shear B-mode measurements - ''' - name = "TXpureB" - parallel = False - - inputs = [ - ("shear_photoz_stack", QPNOfZFile), - ("source_maps", MapsFile), - ("mask" , MapsFile), - ("twopoint_data_real_raw", SACCFile), - ] - - outputs = [("twopoint_data_fourier_pureB", SACCFile)] - - config_options = { - 'method' : 'hybrideb', - 'Nell' : 20, - 'lmin' : 200, - 'lmax' : 2000, - 'lspacing' : 'log', - 'bin_file' : "", - 'theta_min' : 2.5, - 'theta_max' : 250, - 'Ntheta' : 1000, - 'precomputed_hybrideb_weights' : "" - } - - def run(self): - import pymaster as nmt - import healpy as hp - import sacc - import pyccl - import hybrideb - import datetime - - if self.config["method"] == 'namaster': - ''' - B-mode method that is already implemented in NaMaster - ''' - print('----------------namaster-----------') - - # Open mask - with self.open_input("mask", wrapper=True) as f: - mask = f.read_map("mask") - - # Open source maps (g1,g2,weights) - with self.open_input("source_maps", wrapper=True) as f: - nbin_source = f.file["maps"].attrs["nbin_source"] - g1_maps = [f.read_map(f"g1_{b}") for b in range(nbin_source)] - g2_maps = [f.read_map(f"g2_{b}") for b in range(nbin_source)] - weights = [f.read_map(f"lensing_weight_{b}") for b in range(nbin_source)] - - # Define binning - b = nmt.NmtBin.from_nside_linear(hp.npix2nside(mask.shape[0]), self.config["Nell"]) - - # Initialize the fields - fields = {} - for i in range(nbin_source): - fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=True) - - # Compute Cls and store them in dictionrary - results = {} - for zi,f1 in enumerate(fields.keys()) : - for zj,f2 in enumerate(fields.keys()): - field1 = fields[f1] - field2 = fields[f2] - w_yp = nmt.NmtWorkspace() - w_yp.compute_coupling_matrix(field1, field2, b) - - cl_coupled = nmt.compute_coupled_cell(field1,field2) - cl_decoupled = w_yp.decouple_cell(cl_coupled) - results[('source_%d'%zi,'source_%d'%zj)] = cl_decoupled - - results['ell'] = b.get_effective_ells() - self.results = results - - - elif self.config["method"] == 'hybrideb': - ''' - B-mode method of Becker and Rozo 2015 http://arxiv.org/abs/1412.3851 - ''' - #print(self.config['precomputed_hybrideb_weights'] ); sys.exit() - print('--------------hybrideb-----------') - if self.config["precomputed_hybrideb_weights"] == "": - # Recomputing all weight functinons -- This is a slow rocedure - heb = hybrideb.HybridEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) - beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) - geb = hybrideb.GaussEB(beb, heb) - - geb_dict = {} - for i in range(0,self.config['Nell']): - geb_dict['%d'%(i+1)] = {} - for j in range(0,6): - geb_dict['%d'%(i+1)]['%d'%(j+1)]=geb(i)[j] - - np.savez('geb_nosep_dict.npz', **geb_dict) - - else: - # Loading precomputed weight functions to speed up computation - geb = np.load(self.config['precomputed_hybrideb_weights'],allow_pickle=True) - - En = np.zeros(self.config["Nell"]) - Bn = np.zeros(self.config["Nell"]) - ell = np.zeros(self.config["Nell"]) - - filename = self.get_input("twopoint_data_real_raw") - s = sacc.Sacc.load_fits(filename) - - Qxip = sacc.standard_types.galaxy_shear_xi_plus - - source_tracers = set() - for b1, b2 in s.get_tracer_combinations(Qxip): - source_tracers.add(b1) - source_tracers.add(b2) - - nbin_source = max(len(source_tracers), 1) - - results= {} - - for zi in range(0,nbin_source): - for zj in range(0,nbin_source): - xip = s.get_mean("galaxy_shear_xi_plus",('source_0', 'source_0') ) - xim = s.get_mean("galaxy_shear_xi_plus",('source_0', 'source_0') ) - - for i in range(int(self.config["Nell"])): - - # with-separation calculation - res = geb['%d'%(i+1)].tolist() - Fp = res['2'] - Fm = res['3'] - En[i] = np.sum(Fp*xip + Fm*xim)/2 - Bn[i] = np.sum(Fp*xip - Fm*xim)/2 - ell[i] = res['4'][np.argmax(res['5'])] - - results[('source_%d'%zi,'source_%d'%zj)] = Bn - results['ell'] = ell - - self.results = results - - else: - sys.exit("The only method implemented right now are 'hybrideb' or 'namaster' ") - - - print('Saving pureB Cls in sacc file') - self.save_power_spectra(nbin_source) - - - def save_power_spectra(self, nbin_source): - import sacc - import datetime - - S = sacc.Sacc() - S.metadata['nbin_source'] = nbin_source - S.metadata['creation'] = datetime.datetime.now().isoformat() - S.metadata['method'] = self.config['method'] - S.metadata['info'] = 'ClBB using pureB' - - CBB = sacc.standard_types.galaxy_shear_cl_bb - - with self.open_input("shear_photoz_stack", wrapper=True) as f: - for i in range(nbin_source): - z, Nz = f.get_bin_n_of_z(i) - S.add_tracer("NZ", f"source_{i}", z, Nz) - - for zi in range(0,nbin_source): - for zj in range(0,nbin_source): - tracer1 = "source_%d"%zi - tracer2 = "source_%d"%zj - #import pdb; pdb.set_trace() - val = self.results[(tracer1,tracer2)] - ell = self.results['ell'] - for k in range(0,len(val)): - S.add_data_point(CBB, (tracer1, tracer2), value=val[k], ell=ell[k]) - - # And we're all done! - output_filename = self.get_output("twopoint_data_fourier_pureB") - S.save_fits(output_filename, overwrite=True) - - - - if __name__ == "__main__": PipelineStage.main() From 857235e22a4bac5f244371dc5aa838d2befaee36 Mon Sep 17 00:00:00 2001 From: yomori Date: Sat, 17 Feb 2024 12:58:26 -0800 Subject: [PATCH 11/17] revert twopoint_fourier.py and move pureB to twopoint_null_tests.py --- txpipe/twopoint_null_tests.py | 185 ++++++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) diff --git a/txpipe/twopoint_null_tests.py b/txpipe/twopoint_null_tests.py index b1e8f1191..7f9a8377f 100644 --- a/txpipe/twopoint_null_tests.py +++ b/txpipe/twopoint_null_tests.py @@ -8,6 +8,7 @@ PNGFile, TextFile, QPNOfZFile, + MapsFile ) import numpy as np from .twopoint import TXTwoPoint, SHEAR_SHEAR, SHEAR_POS, POS_POS @@ -766,3 +767,187 @@ def write_output(self, source_list, lens_list, meta, results): # Finally, save the output to Sacc file S.save_fits(self.get_output("aperture_mass_data"), overwrite=True) + + + +class TXpureB(PipelineStage): + ''' + Make shear B-mode measurements + ''' + name = "TXpureB" + parallel = False + + inputs = [ + ("shear_photoz_stack", QPNOfZFile), + ("source_maps", MapsFile), + ("mask" , MapsFile), + ("twopoint_data_real_raw", SACCFile), + ] + + outputs = [("twopoint_data_fourier_pureB", SACCFile)] + + config_options = { + 'method' : 'hybrideb', + 'Nell' : 20, + 'lmin' : 200, + 'lmax' : 2000, + 'lspacing' : 'log', + 'bin_file' : "", + 'theta_min' : 2.5, + 'theta_max' : 250, + 'Ntheta' : 1000, + 'precomputed_hybrideb_weights' : "" + } + + def run(self): + import pymaster as nmt + import healpy as hp + import sacc + import pyccl + import hybrideb + import datetime + + if self.config["method"] == 'namaster': + ''' + B-mode method that is already implemented in NaMaster + ''' + print('----------------namaster-----------') + + # Open mask + with self.open_input("mask", wrapper=True) as f: + mask = f.read_map("mask") + + # Open source maps (g1,g2,weights) + with self.open_input("source_maps", wrapper=True) as f: + nbin_source = f.file["maps"].attrs["nbin_source"] + g1_maps = [f.read_map(f"g1_{b}") for b in range(nbin_source)] + g2_maps = [f.read_map(f"g2_{b}") for b in range(nbin_source)] + weights = [f.read_map(f"lensing_weight_{b}") for b in range(nbin_source)] + + # Define binning + b = nmt.NmtBin.from_nside_linear(hp.npix2nside(mask.shape[0]), self.config["Nell"]) + + # Initialize the fields + fields = {} + for i in range(nbin_source): + fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=True) + + # Compute Cls and store them in dictionrary + results = {} + for zi,f1 in enumerate(fields.keys()) : + for zj,f2 in enumerate(fields.keys()): + field1 = fields[f1] + field2 = fields[f2] + w_yp = nmt.NmtWorkspace() + w_yp.compute_coupling_matrix(field1, field2, b) + + cl_coupled = nmt.compute_coupled_cell(field1,field2) + cl_decoupled = w_yp.decouple_cell(cl_coupled) + results[('source_%d'%zi,'source_%d'%zj)] = cl_decoupled + + results['ell'] = b.get_effective_ells() + self.results = results + + + elif self.config["method"] == 'hybrideb': + ''' + B-mode method of Becker and Rozo 2015 http://arxiv.org/abs/1412.3851 + ''' + #print(self.config['precomputed_hybrideb_weights'] ); sys.exit() + print('--------------hybrideb-----------') + if self.config["precomputed_hybrideb_weights"] == "": + # Recomputing all weight functinons -- This is a slow rocedure + heb = hybrideb.HybridEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) + beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) + geb = hybrideb.GaussEB(beb, heb) + + geb_dict = {} + for i in range(0,self.config['Nell']): + geb_dict['%d'%(i+1)] = {} + for j in range(0,6): + geb_dict['%d'%(i+1)]['%d'%(j+1)]=geb(i)[j] + + np.savez('geb_nosep_dict.npz', **geb_dict) + + else: + # Loading precomputed weight functions to speed up computation + geb = np.load(self.config['precomputed_hybrideb_weights'],allow_pickle=True) + + En = np.zeros(self.config["Nell"]) + Bn = np.zeros(self.config["Nell"]) + ell = np.zeros(self.config["Nell"]) + + filename = self.get_input("twopoint_data_real_raw") + s = sacc.Sacc.load_fits(filename) + + Qxip = sacc.standard_types.galaxy_shear_xi_plus + + source_tracers = set() + for b1, b2 in s.get_tracer_combinations(Qxip): + source_tracers.add(b1) + source_tracers.add(b2) + + nbin_source = max(len(source_tracers), 1) + + results= {} + + for zi in range(0,nbin_source): + for zj in range(0,nbin_source): + xip = s.get_mean("galaxy_shear_xi_plus",('source_0', 'source_0') ) + xim = s.get_mean("galaxy_shear_xi_plus",('source_0', 'source_0') ) + + for i in range(int(self.config["Nell"])): + + # with-separation calculation + res = geb['%d'%(i+1)].tolist() + Fp = res['2'] + Fm = res['3'] + En[i] = np.sum(Fp*xip + Fm*xim)/2 + Bn[i] = np.sum(Fp*xip - Fm*xim)/2 + ell[i] = res['4'][np.argmax(res['5'])] + + results[('source_%d'%zi,'source_%d'%zj)] = Bn + results['ell'] = ell + + self.results = results + + else: + sys.exit("The only method implemented right now are 'hybrideb' or 'namaster' ") + + + print('Saving pureB Cls in sacc file') + self.save_power_spectra(nbin_source) + + + def save_power_spectra(self, nbin_source): + import sacc + import datetime + + S = sacc.Sacc() + S.metadata['nbin_source'] = nbin_source + S.metadata['creation'] = datetime.datetime.now().isoformat() + S.metadata['method'] = self.config['method'] + S.metadata['info'] = 'ClBB using pureB' + + CBB = sacc.standard_types.galaxy_shear_cl_bb + + with self.open_input("shear_photoz_stack", wrapper=True) as f: + for i in range(nbin_source): + z, Nz = f.get_bin_n_of_z(i) + S.add_tracer("NZ", f"source_{i}", z, Nz) + + for zi in range(0,nbin_source): + for zj in range(0,nbin_source): + tracer1 = "source_%d"%zi + tracer2 = "source_%d"%zj + #import pdb; pdb.set_trace() + val = self.results[(tracer1,tracer2)] + ell = self.results['ell'] + for k in range(0,len(val)): + S.add_data_point(CBB, (tracer1, tracer2), value=val[k], ell=ell[k]) + + # And we're all done! + output_filename = self.get_output("twopoint_data_fourier_pureB") + S.save_fits(output_filename, overwrite=True) + + From 99a2d4d0a6755217434875555a8610cbdde61a63 Mon Sep 17 00:00:00 2001 From: yomori Date: Fri, 21 Jun 2024 07:19:30 -0700 Subject: [PATCH 12/17] allow for none pureB --- txpipe/twopoint_null_tests.py | 207 ++++++++++++++++++++++++---------- 1 file changed, 146 insertions(+), 61 deletions(-) diff --git a/txpipe/twopoint_null_tests.py b/txpipe/twopoint_null_tests.py index 7f9a8377f..e8ecdaba1 100644 --- a/txpipe/twopoint_null_tests.py +++ b/txpipe/twopoint_null_tests.py @@ -778,9 +778,9 @@ class TXpureB(PipelineStage): parallel = False inputs = [ - ("shear_photoz_stack", QPNOfZFile), - ("source_maps", MapsFile), - ("mask" , MapsFile), + ("shear_photoz_stack" , QPNOfZFile), + ("source_maps" , MapsFile), + ("mask" , MapsFile), ("twopoint_data_real_raw", SACCFile), ] @@ -796,89 +796,144 @@ class TXpureB(PipelineStage): 'theta_min' : 2.5, 'theta_max' : 250, 'Ntheta' : 1000, - 'precomputed_hybrideb_weights' : "" + 'Nsim' : 1000 } def run(self): + import os import pymaster as nmt import healpy as hp import sacc import pyccl import hybrideb import datetime + import pickle + from tqdm import tqdm - if self.config["method"] == 'namaster': - ''' - B-mode method that is already implemented in NaMaster - ''' + if self.config["method"] == 'namaster_pureB' or self.config["method"] == 'namaster_nopureB': + '''B-mode method that is already implemented in NaMaster''' print('----------------namaster-----------') - - # Open mask - with self.open_input("mask", wrapper=True) as f: - mask = f.read_map("mask") - + if self.config["method"] == 'namaster_pureB': + print("WARNING: Namaster's B-mode purification requires the mask to be heavily apodized.") + print("For a realistic LSS mask, this will most likely not work, but this function is ") + print("implemented for completeness nonetheless.") + purify_b = True + else: + print("WARNING: We are not running B-mode purification (this is what was used in DES-Y3).") + purify_b = False + # Open source maps (g1,g2,weights) with self.open_input("source_maps", wrapper=True) as f: nbin_source = f.file["maps"].attrs["nbin_source"] g1_maps = [f.read_map(f"g1_{b}") for b in range(nbin_source)] g2_maps = [f.read_map(f"g2_{b}") for b in range(nbin_source)] weights = [f.read_map(f"lensing_weight_{b}") for b in range(nbin_source)] - + + # Open mask + with self.open_input("mask", wrapper=True) as f: + mask = f.read_map("mask") + # Define binning b = nmt.NmtBin.from_nside_linear(hp.npix2nside(mask.shape[0]), self.config["Nell"]) # Initialize the fields fields = {} for i in range(nbin_source): - fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=True) + g1_maps[i][g1_maps[i]==hp.UNSEEN]=0 + g2_maps[i][g2_maps[i]==hp.UNSEEN]=0 + fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b) # Compute Cls and store them in dictionrary - results = {} - for zi,f1 in enumerate(fields.keys()) : - for zj,f2 in enumerate(fields.keys()): - field1 = fields[f1] - field2 = fields[f2] + tmp = np.array([]) + ret=np.zeros((nbin_source,nbin_source,len(b.get_effective_ells()))) + + for zi in range(nbin_source): + for zj in range(zi,nbin_source): + + field1 = fields[zi] + field2 = fields[zj] w_yp = nmt.NmtWorkspace() w_yp.compute_coupling_matrix(field1, field2, b) cl_coupled = nmt.compute_coupled_cell(field1,field2) cl_decoupled = w_yp.decouple_cell(cl_coupled) - results[('source_%d'%zi,'source_%d'%zj)] = cl_decoupled + ret[zj,zi,:]= cl_decoupled[3] + tmp = np.concatenate([tmp,cl_decoupled[3]]) + + + # Compute covariance in a somewhat adhoc way by shuffling the pixel values. + covarr = np.zeros((len(tmp),self.config['Nsims'])) + + for k in tqdm(range(self.config['Nsims'])): + fields = {} + for i in range(nbin_source): + idx1 = np.where(g1_maps[i]!=0)[0] + idx2 = np.random.permutation(idx1) + g1_maps[i][idx1]=g1_maps[i][idx2] + g2_maps[i][idx1]=g2_maps[i][idx2] + fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b) + + tmp = np.array([]) + for zi in range(nbin_source): + for zj in range(zi,nbin_source): + + field1 = fields[zi] + field2 = fields[zj] + w_yp = nmt.NmtWorkspace() + w_yp.compute_coupling_matrix(field1, field2, b) + + cl_coupled = nmt.compute_coupled_cell(field1,field2) + cl_decoupled = w_yp.decouple_cell(cl_coupled) + tmp = np.concatenate([tmp,cl_decoupled[3]]) + + covarr[:,k] = tmp + - results['ell'] = b.get_effective_ells() - self.results = results + self.ell = b.get_effective_ells() + self.results = ret + self.cov = np.cov(covarr) elif self.config["method"] == 'hybrideb': ''' B-mode method of Becker and Rozo 2015 http://arxiv.org/abs/1412.3851 ''' - #print(self.config['precomputed_hybrideb_weights'] ); sys.exit() print('--------------hybrideb-----------') - if self.config["precomputed_hybrideb_weights"] == "": - # Recomputing all weight functinons -- This is a slow rocedure + + # Check if nbins is less than 1000, and throw warning. + if self.config['Ntheta']<1000: + print("WARNING: Calculating hybridEB using Ntheta<1000, which may lead to inaccurate results.") + + # Computing the weights takes a few minutes so its a lot faster + # to precompute and load them in subsequent runs. + file_precomputed_weights = self.get_output("twopoint_data_fourier_pureB")+ f".precomputedweights.pkl" + + if os.path.exists(file_precomputed_weights): + print(f"{self.rank} WARNING: Using precomputed weights from revious run: {file_precomputed_weights}") + with open(file_precomputed_weights, "rb") as f: + geb_dict = pickle.load(f) + + else: heb = hybrideb.HybridEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) geb = hybrideb.GaussEB(beb, heb) - geb_dict = {} for i in range(0,self.config['Nell']): geb_dict['%d'%(i+1)] = {} for j in range(0,6): geb_dict['%d'%(i+1)]['%d'%(j+1)]=geb(i)[j] + + with open(file_precomputed_weights, 'wb') as handle: + pickle.dump(geb_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) - np.savez('geb_nosep_dict.npz', **geb_dict) - - else: - # Loading precomputed weight functions to speed up computation - geb = np.load(self.config['precomputed_hybrideb_weights'],allow_pickle=True) - - En = np.zeros(self.config["Nell"]) - Bn = np.zeros(self.config["Nell"]) - ell = np.zeros(self.config["Nell"]) + En0 = np.zeros(self.config["Nell"]) + Bn0 = np.zeros(self.config["Nell"]) + En = np.zeros((self.config["Nell"],self.config["Nsims"])) + Bn = np.zeros((self.config["Nell"],self.config["Nsims"])) + ell = np.zeros(self.config["Nell"]) filename = self.get_input("twopoint_data_real_raw") - s = sacc.Sacc.load_fits(filename) + s = sacc.Sacc.load_fits(filename) Qxip = sacc.standard_types.galaxy_shear_xi_plus @@ -889,33 +944,62 @@ def run(self): nbin_source = max(len(source_tracers), 1) - results= {} + corvarr = np.zeros( (int(nbin_source*(nbin_source+1)/2*self.config["Nell"]), self.config["Nsims"]) ) - for zi in range(0,nbin_source): - for zj in range(0,nbin_source): - xip = s.get_mean("galaxy_shear_xi_plus",('source_0', 'source_0') ) - xim = s.get_mean("galaxy_shear_xi_plus",('source_0', 'source_0') ) + ret=np.zeros((nbin_source,nbin_source,self.config["Nell"])) + c = 0 + for zi in range(0,nbin_source): + for zj in range(zi,nbin_source): + ii = self.config["Nell"]*c + ff = self.config["Nell"]*(c+1) + + # Need all 4 block of the covariance + filename = self.get_input("twopoint_data_real_raw") + tmp = sacc.Sacc.load_fits(filename) + + # From the sacc file only load relevant tracer combinations + tmp.keep_selection(tracers=('source_%d'%zj, 'source_%d'%zi)) + dvec = tmp.mean + xip = dvec[:int(self.config["Ntheta"])] + xim = dvec[int(self.config["Ntheta"]):] + + #random draws based on mean dn covariance + x = np.random.multivariate_normal(mean = tmp.mean, cov =tmp.covariance.covmat , size = self.config["Nsims"]) + Rxip = x[:,:int(self.config["Ntheta"])] + Rxim = x[:,int(self.config["Ntheta"]):] + + # Compute the uncertainties from random draws + for n in range(self.config["Nsims"]): + for i in range(int(self.config["Nell"])): + res = geb_dict['%d'%(i+1)] + Fp = res['2'] + Fm = res['3'] + En[i,n] = np.sum(Fp*Rxip[:,n] + Fm*Rxim[:,n])/2 + Bn[i,n] = np.sum(Fp*Rxip[:,n] - Fm*Rxim[:,n])/2 + + corvarr[ii:ff,:] = Bn + + # Compute actual data vector for i in range(int(self.config["Nell"])): - - # with-separation calculation - res = geb['%d'%(i+1)].tolist() + res = geb_dict['%d'%(i+1)] Fp = res['2'] Fm = res['3'] - En[i] = np.sum(Fp*xip + Fm*xim)/2 - Bn[i] = np.sum(Fp*xip - Fm*xim)/2 + En0[i] = np.sum(Fp*xip + Fm*xim)/2 + Bn0[i] = np.sum(Fp*xip - Fm*xim)/2 ell[i] = res['4'][np.argmax(res['5'])] - - results[('source_%d'%zi,'source_%d'%zj)] = Bn - results['ell'] = ell - self.results = results - + ret[zj,zi,:]= Bn0[:] + c+=1 + + self.ell = ell + self.results = ret + self.cov = np.cov(corvarr) else: - sys.exit("The only method implemented right now are 'hybrideb' or 'namaster' ") + sys.exit("The only method implemented right now are 'hybrideb' or 'namaster_pureB' or 'namaster_nopureB' ") - print('Saving pureB Cls in sacc file') + self.save_power_spectra(nbin_source) @@ -937,16 +1021,17 @@ def save_power_spectra(self, nbin_source): S.add_tracer("NZ", f"source_{i}", z, Nz) for zi in range(0,nbin_source): - for zj in range(0,nbin_source): - tracer1 = "source_%d"%zi - tracer2 = "source_%d"%zj - #import pdb; pdb.set_trace() - val = self.results[(tracer1,tracer2)] - ell = self.results['ell'] + for zj in range(zi,nbin_source): + tracer1 = "source_%d"%zj + tracer2 = "source_%d"%zi + val = self.results[zj,zi,:] + print(val) + ell = self.ell for k in range(0,len(val)): S.add_data_point(CBB, (tracer1, tracer2), value=val[k], ell=ell[k]) - # And we're all done! + S.add_covariance(self.cov) + output_filename = self.get_output("twopoint_data_fourier_pureB") S.save_fits(output_filename, overwrite=True) From 8139ee24d78f0cd82b1b6ce99cd4f02ed2885816 Mon Sep 17 00:00:00 2001 From: yomori Date: Fri, 21 Jun 2024 13:05:59 -0700 Subject: [PATCH 13/17] patch diagnostic.py and ci.yml --- .github/workflows/ci.yml | 1 + examples/desy3/config_Bmodes.yml | 17 ++++++++++++-- examples/desy3/pipeline_Bmodes.yml | 33 ++++++++++++++++---------- txpipe/diagnostics.py | 37 ++++++++++++++++++++++-------- 4 files changed, 65 insertions(+), 23 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3ea10d3e5..ecd65ce66 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -273,6 +273,7 @@ jobs: ceci --dry-run examples/buzzard/pipeline.yml ceci --dry-run examples/cosmodc2/pipeline.yml ceci --dry-run examples/skysim/pipeline.yml + ceci --dry-run examples/desy3/pipeline_Bmodes.yml Test_Auto_Installer: diff --git a/examples/desy3/config_Bmodes.yml b/examples/desy3/config_Bmodes.yml index 099a95aee..877d275d0 100644 --- a/examples/desy3/config_Bmodes.yml +++ b/examples/desy3/config_Bmodes.yml @@ -7,7 +7,7 @@ global: chunk_rows: 100000 # These mapping options are also read by a range of stages pixelization: healpix - nside: 128 + nside: 512 sparse: True # Generate sparse maps - faster if using small areas TXGCRTwoCatalogInput: @@ -193,9 +193,10 @@ TXTwoPoint: flip_g2: True # use true when using metacal shears min_sep: 2.5 max_sep: 60.0 - nbins: 10 + nbins: 1000 verbose: 0 subtract_mean_shear: True + var_method: "jackknife" TXGammaTBrightStars: {} @@ -398,3 +399,15 @@ Inform_NZDirLens: aliases: input: spectroscopic_catalog model: lens_direct_calibration_model + +TXpureB: + method : 'namaster' # 'namaster' or 'hybrideb' + Nell : 20 # Number of ell bins + lmin : 100 # ell min + lmax : 2000 # ell max + lspacing : 'log' # ell spacing for binning + bin_file : # Load predfined bin edges instead + theta_min : 2.5 # (only for hybrideb) theta min in arcmin + theta_max : 250 # (only for hybrideb) theta max in arcmin + Ntheta : 1000 # (only for hybrideb) Number of theta bins + Nsims : 3 # (only for hybrideb) Number of Gaussian draws to use to estimate errors diff --git a/examples/desy3/pipeline_Bmodes.yml b/examples/desy3/pipeline_Bmodes.yml index e1db26c2d..70e29d44b 100644 --- a/examples/desy3/pipeline_Bmodes.yml +++ b/examples/desy3/pipeline_Bmodes.yml @@ -36,6 +36,10 @@ stages: - name: TXLensMaps # make source lens and n_gal maps - name: TXDensityMaps # turn mask and ngal maps into overdensity maps - name: TXTwoPointFourier # Compute power spectra C_ell + - name: TXTwoPoint # Compute real-space 2-point correlations + threads_per_process: 128 + - name: TXpureB # Compute pureBB + #- name: TXStarCatalogSplitter # Split the star catalog into separate bins (psf/non-psf) #- name: TXSourceMaps # make source g1 and g2 maps #- name: TXLensMaps # make source lens and n_gal maps @@ -47,14 +51,14 @@ stages: #- name: TXDensityMaps # turn mask and ngal maps into overdensity maps #- name: TXTracerMetadata # collate metadata #- name: TXRandomCat # generate lens bin random catalogs - #- name: TXJackknifeCenters # Split the area into jackknife regions + - name: TXJackknifeCenters # Split the area into jackknife regions #- name: TXTwoPoint # Compute real-space 2-point correlations # threads_per_process: 2 #- name: TXBlinding # Blind the data following Muir et al # threads_per_process: 2 #- name: TXTwoPointTheoryReal # compute theory using CCL to save in sacc file and plot later #- name: TXTwoPointPlotsTheory # Make plots of 2pt correlations - #- name: TXSourceDiagnosticPlots # Make a suite of diagnostic plots + - name: TXSourceDiagnosticPlots # Make a suite of diagnostic plots #- name: TXLensDiagnosticPlots # Make a suite of diagnostic plots #- name: TXGammaTFieldCenters # Compute and plot gamma_t around center points # threads_per_process: 2 @@ -64,10 +68,10 @@ stages: # threads_per_process: 2 #- name: TXGalaxyStarDensity # Compute and plot the star-galaxy density cross-correlation #- name: TXGalaxyStarShear # Compute and plot the star-galaxy shear cross-correlation - - name: TXPhotozPlotSource # Plot the bin n(z) - classname: TXPhotozPlot - - name: TXPhotozPlotLens # Plot the bin n(z) - classname: TXPhotozPlot + #- name: TXPhotozPlotSource # Plot the bin n(z) + # classname: TXPhotozPlot + #- name: TXPhotozPlotLens # Plot the bin n(z) + # classname: TXPhotozPlot #- name: TXConvergenceMaps # Make convergence kappa maps from g1, g2 maps #- name: TXConvergenceMapPlots # Plot the convergence map #- name: TXMapCorrelations # plot the correlations between systematics and data @@ -98,7 +102,7 @@ python_paths: - submodules/WLMassMap/python/desc/ # Where to put outputs -output_dir: data/desy3a/outputs +output_dir: data/des-y3/outputs # How to run the pipeline: mini, parsl, or cwl launcher: @@ -108,7 +112,7 @@ launcher: # Where to run the pipeline: cori-interactive, cori-batch, or local site: name: local - max_threads: 2 + max_threads: 128 # configuration settings config: examples/desy3/config_Bmodes.yml @@ -118,10 +122,15 @@ inputs: # See README for paths to download these files shear_catalog : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/shear_catalog_desy3_unmasked_withfakez_v2.h5 star_catalog : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/DES_psf_y3_catalog.hdf5 + shear_photoz_stack: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3//shear_photoz_stack.hdf5 + random_cats : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/randoms_desy3_RM.hdf5 + binned_random_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/binned_randoms_desy3_RM.hdf5 + binned_random_catalog_sub: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/binned_randoms_desy3_RM.hdf5 + #patch_centers : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/binned_randoms_desy3_RM.hdf5 + #lens_tomography_catalog: /global/cfs/cdirs/lsst/groups/WL/users/yomori/repo/aaa/TXPipe/data/desy3a/outputs/lens_tomography_catalog_unweighted.hdf5 ##############<----- manually add this line # This file comes with the code fiducial_cosmology: data/fiducial_cosmology.yml - shear_photoz_stack: data/example/outputs/shear_photoz_stack_manual.hdf5 photometry_catalog: data/example/inputs/photometry_catalog.hdf5 calibration_table : data/example/inputs/sample_cosmodc2_w10year_errors.dat exposures : data/example/inputs/exposures.hdf5 @@ -147,9 +156,9 @@ inputs: # if interrupted resume: True # where to put output logs for individual stages -log_dir: data/desy3a/logs +log_dir: data/des-y3/logs # where to put an overall parsl pipeline log -pipeline_log: data/desy3a/log.txt +pipeline_log: data/des-y3/log.txt @@ -179,4 +188,4 @@ pipeline_log: data/desy3a/log.txt # f['qp/meta/pdf_name'] = np.array([b'hist'], dtype='|S4') # f['qp/meta/pdf_version'] = [0] # f['qp/data/pdfs'] = (np.c_[nz1,nz2,nz3,nz4]).T - \ No newline at end of file + diff --git a/txpipe/diagnostics.py b/txpipe/diagnostics.py index f8de05ab2..6d75dd45c 100644 --- a/txpipe/diagnostics.py +++ b/txpipe/diagnostics.py @@ -300,8 +300,11 @@ def plot_psf_shear(self): fig.close() f = self.open_output("g_psf_g_out") - data =[mu1,mu2,mean11,mean12,mean21,mean22,std11,std12,std21,std22,line11,line12,line21,line22] - f.write(''.join([str(i) + '\n' for i in data])) + data = np.c_[mu1,mu2,mean11,mean12,mean21,mean22,std11,std12,std21,std22,line11,line12,line21,line22] + f.write('#[0]mu1 [1]mu2 [2]mean11 [3]mean12 [4]mean21 [5]mean22 [6]std11 [7]std12 [8]std21 [9]std22 [10]line11 [11]line12 [12]line21 [13]line22\n') + for row in data: + line = ' '.join(map(str, row)) + '\n' + f.write(line) f.close() def plot_psf_size_shear(self): @@ -372,9 +375,14 @@ def plot_psf_size_shear(self): fig.close() f = self.open_output("g_psf_T_out") - data =[mu,mean1,mean2,std1,std2,line1,line2] - f.write(''.join([str(i) + '\n' for i in data])) + f.write('#[0]mu [1]mean1 [2]mean2 [3]std1 [4]std2 [5]line1 [6]line2\n') + data = np.c_[mu,mean1,mean2,std1,std2,line1,line2] + for row in data: + line = ' '.join(map(str, row)) + '\n' + f.write(line) f.close() + + def plot_snr_shear(self): @@ -445,11 +453,17 @@ def plot_snr_shear(self): plt.tight_layout() fig.close() + f = self.open_output("g_snr_out") - data =[mu,mean1,mean2,std1,std2,line1,line2] - f.write(''.join([str(i) + '\n' for i in data])) + f.write('#[0]mu [1]mean1 [2]mean2 [3]std1 [4]std2 [5]line1 [6]line2\n') + data = np.c_[mu,mean1,mean2,std1,std2,line1,line2] + for row in data: + line = ' '.join(map(str, row)) + '\n' + f.write(line) f.close() - + + + def plot_size_shear(self): # mean shear in bins of galaxy size print("Making mean shear galaxy size plot") @@ -519,10 +533,15 @@ def plot_size_shear(self): plt.tight_layout() fig.close() + f = self.open_output("g_T_out") - data =[mu,mean1,mean2,std1,std2,line1,line2] - f.write(''.join([str(i) + '\n' for i in data])) + f.write('#[0]mu [1]mean1 [2]mean2 [3]std1 [4]std2 [5]line1 [6]line2\n') + data = np.c_[mu,mean1,mean2,std1,std2,line1,line2] + for row in data: + line = ' '.join(map(str, row)) + '\n' + f.write(line) f.close() + def plot_mag_shear(self): # mean shear in bins of magnitude From 8f364eea8b6376089d00f9ab06aaef310f0c5734 Mon Sep 17 00:00:00 2001 From: yomori Date: Fri, 5 Jul 2024 11:18:12 -0700 Subject: [PATCH 14/17] Incorporate first part of Joe's comments --- examples/desy3/config_Bmodes.yml | 196 +---------------------------- examples/desy3/pipeline_Bmodes.yml | 159 +++++------------------ txpipe/twopoint_null_tests.py | 98 ++++++++------- 3 files changed, 85 insertions(+), 368 deletions(-) diff --git a/examples/desy3/config_Bmodes.yml b/examples/desy3/config_Bmodes.yml index 877d275d0..329dd4312 100644 --- a/examples/desy3/config_Bmodes.yml +++ b/examples/desy3/config_Bmodes.yml @@ -10,34 +10,6 @@ global: nside: 512 sparse: True # Generate sparse maps - faster if using small areas -TXGCRTwoCatalogInput: - metacal_dir: /global/cscratch1/sd/desc/DC2/data/Run2.2i/dpdd/Run2.2i-t3828/metacal_table_summary - photo_dir: /global/cscratch1/sd/desc/DC2/data/Run2.2i/dpdd/Run2.2i-t3828/object_table_summary - -TXMetacalGCRInput: - cat_name: dc2_object_run2.1i_dr1b_with_metacal_griz - -TXExposureInfo: - dc2_name: '1.2p' - - -TXCosmoDC2Mock: - cat_name: cosmoDC2_v1.1.4_image - visits_per_band: 16 - extra_cols: redshift_true size_true shear_1 shear_2 - flip_g2: True # to match metacal - -TXIngestRedmagic: - lens_zbin_edges: [0.1, 0.3, 0.5] - -PZPDFMLZ: - nz: 301 - zmax: 3.0 - -TXLensTrueNumberDensity: - zmax: 3.0 - nz: 301 - PZPrepareEstimatorLens: name: PZPrepareEstimatorLens classname: Inform_BPZ_lite @@ -64,32 +36,6 @@ PZPrepareEstimatorLens: hdf5_groupname: photometry -PZPrepareEstimatorSource: - name: PZPrepareEstimatorSource - classname: Inform_BPZ_lite - aliases: - input: spectroscopic_catalog - model: source_photoz_model - zmin: 0.0 - zmax: 3.0 - nzbins: 301 - columns_file: ./data/bpz_ugrizy.columns - data_path: ./data/example/rail-bpz-inputs - spectra_file: CWWSB4.list - prior_band: i - ref_band: i - # Not sure about this - prior_file: hdfn_gen - p_min: 0.005 - gauss_kernel: 0.0 - mag_err_min: 0.005 - inform_options: {'save_train': False, 'load_model': False, 'modelfile': 'BPZpriormodel.out'} - madau_reddening: no - bands: riz - zp_errors: [0.01, 0.01, 0.01, 0.01, 0.01, 0.01] - hdf5_groupname: photometry - - PZEstimatorLens: name: PZEstimatorLens classname: BPZ_lite @@ -125,14 +71,6 @@ PZEstimatorLens: mag_z: 27.98 mag_y: 27.05 - - -# Mock version of stacking: -TXSourceTrueNumberDensity: - nz: 301 - zmax: 3.0 - - TXSourceSelectorMetacal: input_pz: True true_z: False @@ -145,20 +83,6 @@ TXSourceSelectorMetacal: shear_prefix: mcal_ use_diagonal_response : True -TXTruthLensSelector: - # Mag cuts - input_pz: False - true_z: True - lens_zbin_edges: [0.1, 0.3, 0.5] - cperp_cut: 0.2 - r_cpar_cut: 13.5 - r_lo_cut: 16.0 - r_hi_cut: 21.6 - i_lo_cut: 17.5 - i_hi_cut: 21.9 - r_i_cut: 2.0 - # may also need one for r_cpar_cut - TXMeanLensSelector: # Mag cuts lens_zbin_edges: [0.0,0.2,0.4] @@ -198,25 +122,6 @@ TXTwoPoint: subtract_mean_shear: True var_method: "jackknife" -TXGammaTBrightStars: {} - -TXGammaTDimStars: {} - -TXGammaTRandoms: {} - -TXGammaTFieldCenters: {} - -TXBlinding: - seed: 1972 ## seed uniquely specifies the shift in parameters - Omega_b: [0.0485, 0.001] ## fiducial_model_value, shift_sigma - Omega_c: [0.2545, 0.01] - w0: [-1.0, 0.1] - h: [0.682, 0.02] - sigma8: [0.801, 0.01] - n_s: [0.971, 0.03] - b0: 0.95 ## we use bias of the form b0/g - delete_unblinded: True - TXSourceDiagnosticPlots: psf_prefix: mcal_psf_ shear_prefix: mcal_ @@ -230,21 +135,6 @@ TXSourceDiagnosticPlots: s2n_min: 1.25 s2n_max: 300 -TXLensDiagnosticPlots: {} - -TXDiagnosticMaps: - sparse: True # Generate sparse maps - faster if using small areas - snr_threshold: 10.0 - snr_delta: 1.0 - # pixelization: gnomonic - pixel_size: 0.2 - ra_cent: 62. - dec_cent: -35. - npix_x: 60 - npix_y: 60 - depth_cut: 23.0 - psf_prefix: mcal_psf_ - TXSourceMaps: sparse: True # Generate sparse maps - faster if using small areas @@ -252,25 +142,9 @@ TXLensMaps: sparse: True # Generate sparse maps - faster if using small areas nside: 128 -# For redmagic mapping -TXExternalLensMaps: - chunk_rows: 100000 - sparse: True - pixelization: healpix - nside: 512 - - -TXSourceMaps: {} -TXLensMaps: {} - TXDensityMaps: nside: 128 - -TXAuxiliarySourceMaps: - flag_exponent_max: 8 - psf_prefix: psf_ - TXAuxiliaryLensMaps: flag_exponent_max: 8 bright_obj_threshold: 22.0 # The magnitude threshold for a object to be counted as bright @@ -278,34 +152,13 @@ TXAuxiliaryLensMaps: snr_threshold: 10.0 # The S/N value to generate maps for (e.g. 5 for 5-sigma depth) snr_delta: 1.0 # The range threshold +/- delta is used for finding objects at the boundary -TXRealGaussianCovariance: - min_sep: 2.5 - max_sep: 60. - nbins: 10 - pickled_wigner_transform: data/example/inputs/wigner.pkl - - TXJackknifeCenters: npatch: 5 - -TXTwoPointFourier: - flip_g2: True - ell_min: 30 - ell_max: 150 - bandwidth: 20 - cache_dir: ./cache/workspaces - TXSimpleMask: depth_cut : 23.5 bright_object_max: 10.0 -TXMapCorrelations: - supreme_path_root: data/example/inputs/supreme - outlier_fraction: 0.05 - nbin: 20 - - PZRailSummarizeLens: leafsize: 20 zmin: 0.0 @@ -321,34 +174,6 @@ PZRailSummarizeLens: photoz_stack: lens_photoz_stack model: None - -PZRailSummarizeSource: - leafsize: 20 - zmin: 0.0 - zmax: 3.0 - nzbins: 50 - mag_prefix: "/shear/mcal_mag_" - tomography_name: "source" - name: PZRailSummarizeSource - aliases: - tomography_catalog: shear_tomography_catalog - photometry_catalog: shear_catalog - model: source_direct_calibration_model - photoz_stack: shear_photoz_stack - - -TXPhotozPlotLens: - name: TXPhotozPlotLens - aliases: - photoz_stack: lens_photoz_stack - nz_plot: lens_nz - -TXPhotozPlotSource: - name: TXPhotozPlotSource - aliases: - photoz_stack: shear_photoz_stack - nz_plot: source_nz - FlowCreator: n_samples: 1000000 seed: 5763248 @@ -358,25 +183,6 @@ FlowCreator: output: ideal_specz_catalog model: flow -InvRedshiftIncompleteness: - pivot_redshift: 0.8 - aliases: - input: ideal_specz_catalog - output: specz_catalog_pq - -GridSelection: - aliases: - input: ideal_specz_catalog - output: specz_catalog_pq - redshift_cut: 5.1 - ratio_file: data/example/inputs/hsc_ratios_and_specz.hdf5 - settings_file: data/example/inputs/HSC_grid_settings.pkl - random_seed: 66 - pessimistic_redshift_cut: 1.0 - - - - TXParqetToHDF: hdf_group: photometry aliases: @@ -400,7 +206,7 @@ Inform_NZDirLens: input: spectroscopic_catalog model: lens_direct_calibration_model -TXpureB: +TXPureB: method : 'namaster' # 'namaster' or 'hybrideb' Nell : 20 # Number of ell bins lmin : 100 # ell min diff --git a/examples/desy3/pipeline_Bmodes.yml b/examples/desy3/pipeline_Bmodes.yml index 70e29d44b..e08a54956 100644 --- a/examples/desy3/pipeline_Bmodes.yml +++ b/examples/desy3/pipeline_Bmodes.yml @@ -1,34 +1,28 @@ # Stages to run stages: - #- name: TXBrighterFatterPlot # Make plots tracking the brighter-fatter effect - - name: TXPSFDiagnostics # Compute and plots other PSF diagnostics - #- name: TXRoweStatistics # Compute and plot Rowe statistics - # threads_per_process: 2 - - name: TXAuxiliaryLensMaps # make depth and bright object maps - - name: TXSimpleMask # combine maps to make a simple mask - - name: TXSourceSelectorMetacal # select and split objects into source bins - - name: TXSourceNoiseMaps # Compute shear noise using rotations - - name: TXShearCalibration # Calibrate and split the source sample tomographically - - name: TXSourceMaps # make source g1 and g2 maps - - name: TXConvergenceMaps # Make convergence kappa maps from g1, g2 maps - #- name: TXAuxiliarySourceMaps # make PSF and flag maps + - name: TXPSFDiagnostics # Compute and plots other PSF diagnostics + - name: TXAuxiliaryLensMaps # make depth and bright object maps + - name: TXSimpleMask # combine maps to make a simple mask + - name: TXSourceSelectorMetacal # select and split objects into source bins + - name: TXSourceNoiseMaps # Compute shear noise using rotations + - name: TXShearCalibration # Calibrate and split the source sample tomographically + - name: TXSourceMaps # make source g1 and g2 maps + - name: TXConvergenceMaps # Make convergence kappa maps from g1, g2 maps - name: FlowCreator # Simulate a spectroscopic population - - name: GridSelection # Simulate a spectroscopic sample - - name: TXParqetToHDF # Convert the spec sample format - - name: PZPrepareEstimatorLens # Prepare the p(z) estimator + - name: GridSelection # Simulate a spectroscopic sample + - name: TXParqetToHDF # Convert the spec sample format + - name: PZPrepareEstimatorLens # Prepare the p(z) estimator classname: Inform_BPZ_lite - - name: PZEstimatorLens # Measure lens galaxy PDFs + - name: PZEstimatorLens # Measure lens galaxy PDFs classname: BPZ_lite - - name: TXMeanLensSelector # select objects for lens bins from the PDFs - - name: Inform_NZDirLens # Prepare the DIR method inputs for the lens sample + - name: TXMeanLensSelector # select objects for lens bins from the PDFs + - name: Inform_NZDirLens # Prepare the DIR method inputs for the lens sample classname: Inform_NZDir - - name: Inform_NZDirSource # Prepare the DIR method inputs for the source sample + - name: Inform_NZDirSource # Prepare the DIR method inputs for the source sample classname: Inform_NZDir - - name: PZRailSummarizeLens # Run the DIR method on the lens sample to find n(z) + - name: PZRailSummarizeLens # Run the DIR method on the lens sample to find n(z) classname: PZRailSummarize - #- name: PZRailSummarizeSource # Run the DIR method on the lens sample to find n(z) - # classname: PZRailSummarize - name: TXLensCatalogSplitter # Split the lens sample tomographically - name: TXLSSWeightsUnit # add systematic weights to the lens sample (weight=1 for this example) - name: TXTracerMetadata # collate metadata @@ -38,54 +32,9 @@ stages: - name: TXTwoPointFourier # Compute power spectra C_ell - name: TXTwoPoint # Compute real-space 2-point correlations threads_per_process: 128 - - name: TXpureB # Compute pureBB - - #- name: TXStarCatalogSplitter # Split the star catalog into separate bins (psf/non-psf) - #- name: TXSourceMaps # make source g1 and g2 maps - #- name: TXLensMaps # make source lens and n_gal maps - #- name: TXAuxiliarySourceMaps # make PSF and flag maps - #- name: TXAuxiliaryLensMaps # make depth and bright object maps - #- name: TXSimpleMask # combine maps to make a simple mask - #- name: TXLSSWeightsUnit # add systematic weights to the lens sample (weight=1 for this example) - #- name: TXSourceNoiseMaps # Compute shear noise using rotations - #- name: TXDensityMaps # turn mask and ngal maps into overdensity maps - #- name: TXTracerMetadata # collate metadata - #- name: TXRandomCat # generate lens bin random catalogs - - name: TXJackknifeCenters # Split the area into jackknife regions - #- name: TXTwoPoint # Compute real-space 2-point correlations - # threads_per_process: 2 - #- name: TXBlinding # Blind the data following Muir et al - # threads_per_process: 2 - #- name: TXTwoPointTheoryReal # compute theory using CCL to save in sacc file and plot later - #- name: TXTwoPointPlotsTheory # Make plots of 2pt correlations - - name: TXSourceDiagnosticPlots # Make a suite of diagnostic plots - #- name: TXLensDiagnosticPlots # Make a suite of diagnostic plots - #- name: TXGammaTFieldCenters # Compute and plot gamma_t around center points - # threads_per_process: 2 - #- name: TXGammaTStars # Compute and plot gamma_t around bright stars - # threads_per_process: 2 - #- name: TXGammaTRandoms # Compute and plot gamma_t around randoms - # threads_per_process: 2 - #- name: TXGalaxyStarDensity # Compute and plot the star-galaxy density cross-correlation - #- name: TXGalaxyStarShear # Compute and plot the star-galaxy shear cross-correlation - #- name: TXPhotozPlotSource # Plot the bin n(z) - # classname: TXPhotozPlot - #- name: TXPhotozPlotLens # Plot the bin n(z) - # classname: TXPhotozPlot - #- name: TXConvergenceMaps # Make convergence kappa maps from g1, g2 maps - #- name: TXConvergenceMapPlots # Plot the convergence map - #- name: TXMapCorrelations # plot the correlations between systematics and data - #- name: TXApertureMass # Compute aperture-mass statistics - # threads_per_process: 2 - #- name: TXTwoPointFourier # Compute power spectra C_ell - # Disablig this since not yet synchronised with new Treecorr MPI - # - name: TXSelfCalibrationIA # Self-calibration intrinsic alignments of galaxies - # Disabling these as they take too long for a quick test. - # - name: TXRealGaussianCovariance # Compute covariance of real-space correlations - # - name: TXTwoPointFourier # Compute power spectra C_ell - # - name: TXFourierNamasterCovariance # Compute the C_ell covariance - # - name: TXFourierTJPCovariance # Compute the C_ell covariance with TJPCov - + - name: TXPureB # Compute pure BB + - name: TXJackknifeCenters # Split the area into jackknife regions + - name: TXSourceDiagnosticPlots # Make a suite of diagnostic plots # modules and packages to import that have pipeline # stages defined in them @@ -120,38 +69,19 @@ config: examples/desy3/config_Bmodes.yml # These are overall inputs to the whole pipeline, not generated within it inputs: # See README for paths to download these files - shear_catalog : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/shear_catalog_desy3_unmasked_withfakez_v2.h5 - star_catalog : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/DES_psf_y3_catalog.hdf5 - shear_photoz_stack: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3//shear_photoz_stack.hdf5 - random_cats : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/randoms_desy3_RM.hdf5 - binned_random_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/binned_randoms_desy3_RM.hdf5 + shear_catalog : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/shear_catalog_desy3_unmasked_withfakez_v2.h5 + star_catalog : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/DES_psf_y3_catalog.hdf5 + shear_photoz_stack : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/shear_photoz_stack.hdf5 + random_cats : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/randoms_desy3_RM.hdf5 + binned_random_catalog : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/binned_randoms_desy3_RM.hdf5 binned_random_catalog_sub: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/binned_randoms_desy3_RM.hdf5 - #patch_centers : /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/binned_randoms_desy3_RM.hdf5 - - #lens_tomography_catalog: /global/cfs/cdirs/lsst/groups/WL/users/yomori/repo/aaa/TXPipe/data/desy3a/outputs/lens_tomography_catalog_unweighted.hdf5 ##############<----- manually add this line - # This file comes with the code - fiducial_cosmology: data/fiducial_cosmology.yml - photometry_catalog: data/example/inputs/photometry_catalog.hdf5 - calibration_table : data/example/inputs/sample_cosmodc2_w10year_errors.dat - exposures : data/example/inputs/exposures.hdf5 - flow : data/example/inputs/example_flow.pkl - random_cats_source: Null + fiducial_cosmology : data/fiducial_cosmology.yml + photometry_catalog : data/example/inputs/photometry_catalog.hdf5 + calibration_table : data/example/inputs/sample_cosmodc2_w10year_errors.dat + exposures : data/example/inputs/exposures.hdf5 + flow : data/example/inputs/example_flow.pkl + random_cats_source : Null -# These are overall inputs to the whole pipeline, not generated within it -#inputs: -# shear_catalog: //global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/shear_catalog_desy3_unmasked_withfakez_v2.h5 -# photometry_catalog: -# #shear_tomography_catalog: /global/cfs/cdirs/desc-wl/projects/txpipe-sys-tests/des-y3/shear_tomography_desy3_unmasked_test.h5 -# lens_tomography_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/lens_tomography_catalog_desy1_RM.h5 -# lens_photoz_stack: -# mask: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y1/mask_desy1.h5 -# star_catalog: /global/cfs/cdirs/lsst/groups/WL/projects/txpipe-sys-tests/des-y3/DES_psf_y3_catalog.h5 -# calibration_table: data/example/inputs/sample_cosmodc2_w10year_errors.dat -# fiducial_cosmology: data/fiducial_cosmology.yml -# random_cats_source: Null -# flow: data/example/inputs/example_flow.pkl - - # if supported by the launcher, restart the pipeline where it left off # if interrupted resume: True @@ -160,32 +90,3 @@ log_dir: data/des-y3/logs # where to put an overall parsl pipeline log pipeline_log: data/des-y3/log.txt - - - - -########################################################################################### - -#'/global/cfs/cdirs/desc-wl/users/yomori/repo/aaa/TXPipe/data/desy3a/outputs/manual_shear_photoz_stack.ipynb' - -#from astropy.io import fits - -## http://desdr-server.ncsa.illinois.edu/despublic/y3a2_files/datavectors/2pt_NG_final_2ptunblind_02_26_21_wnz_maglim_covupdate.fits -#d=fits.open('2pt_NG_final_2ptunblind_02_26_21_wnz_maglim_covupdate.fits') - -#bins=np.append(d['nz_source'].data['Z_LOW'],d['nz_source'].data['Z_HIGH'][-1]) - -#nz1 = d['nz_source'].data['BIN1'] -#nz2 = d['nz_source'].data['BIN2'] -#nz3 = d['nz_source'].data['BIN3'] -#nz4 = d['nz_source'].data['BIN4'] - -#with h5py.File('shear_photoz_stack_manual.hdf5', 'w') as f: -# f.create_group("qp") -# f.create_group("qp/data") -# f.create_group("qp/meta") -# f['qp/meta/bins'] = bins[np.newaxis] -# f['qp/meta/pdf_name'] = np.array([b'hist'], dtype='|S4') -# f['qp/meta/pdf_version'] = [0] -# f['qp/data/pdfs'] = (np.c_[nz1,nz2,nz3,nz4]).T - diff --git a/txpipe/twopoint_null_tests.py b/txpipe/twopoint_null_tests.py index e8ecdaba1..9d4cf5865 100644 --- a/txpipe/twopoint_null_tests.py +++ b/txpipe/twopoint_null_tests.py @@ -770,11 +770,16 @@ def write_output(self, source_list, lens_list, meta, results): -class TXpureB(PipelineStage): +class TXPureB(PipelineStage): ''' - Make shear B-mode measurements + Make shear B-mode measurements. + This stage computes xip and xim in very narrow angular bins and transforms them into + Fourier B-modes using narrow window functions. This allows us to compute B-modes without + being affected by (a) the mask (which is an issue if we use https://arxiv.org/abs/astro-ph/0511629, + which is implemented in NaMaster, since masks for LSS is more complicated than CMB) and + (b) noise bias (since the measurements we make here are real-space measurements). ''' - name = "TXpureB" + name = "TXPureB" parallel = False inputs = [ @@ -800,7 +805,7 @@ class TXpureB(PipelineStage): } def run(self): - import os + import os,sys import pymaster as nmt import healpy as hp import sacc @@ -811,7 +816,7 @@ def run(self): from tqdm import tqdm if self.config["method"] == 'namaster_pureB' or self.config["method"] == 'namaster_nopureB': - '''B-mode method that is already implemented in NaMaster''' + #B-mode method that is already implemented in NaMaster''' print('----------------namaster-----------') if self.config["method"] == 'namaster_pureB': print("WARNING: Namaster's B-mode purification requires the mask to be heavily apodized.") @@ -827,8 +832,7 @@ def run(self): nbin_source = f.file["maps"].attrs["nbin_source"] g1_maps = [f.read_map(f"g1_{b}") for b in range(nbin_source)] g2_maps = [f.read_map(f"g2_{b}") for b in range(nbin_source)] - weights = [f.read_map(f"lensing_weight_{b}") for b in range(nbin_source)] - + # Open mask with self.open_input("mask", wrapper=True) as f: mask = f.read_map("mask") @@ -844,7 +848,6 @@ def run(self): fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b) # Compute Cls and store them in dictionrary - tmp = np.array([]) ret=np.zeros((nbin_source,nbin_source,len(b.get_effective_ells()))) for zi in range(nbin_source): @@ -858,11 +861,9 @@ def run(self): cl_coupled = nmt.compute_coupled_cell(field1,field2) cl_decoupled = w_yp.decouple_cell(cl_coupled) ret[zj,zi,:]= cl_decoupled[3] - tmp = np.concatenate([tmp,cl_decoupled[3]]) - - + # Compute covariance in a somewhat adhoc way by shuffling the pixel values. - covarr = np.zeros((len(tmp),self.config['Nsims'])) + tmparr = np.zeros((int(nbin_source*(nbin_source+1)/2*len(b.get_effective_ells())),self.config['Nsims'])) for k in tqdm(range(self.config['Nsims'])): fields = {} @@ -886,12 +887,17 @@ def run(self): cl_decoupled = w_yp.decouple_cell(cl_coupled) tmp = np.concatenate([tmp,cl_decoupled[3]]) - covarr[:,k] = tmp + tmparr[:,k] = tmp - - self.ell = b.get_effective_ells() - self.results = ret - self.cov = np.cov(covarr) + n_bins = b.get_n_bands() + bin_weights = np.zeros([n_bins, b.lmax]) + for i in range(n_bins): + bin_weights[i, b.get_ell_list(i)] = b.get_weight_list(i) + + self.ell = b.get_effective_ells() + self.bin_weights = bin_weights + self.results = ret + self.cov = np.cov(tmparr) elif self.config["method"] == 'hybrideb': @@ -900,6 +906,10 @@ def run(self): ''' print('--------------hybrideb-----------') + Nell = self.config['Nell'] + Nsims = self.config["Nsims"] + Ntheta = self.config["Ntheta"] + # Check if nbins is less than 1000, and throw warning. if self.config['Ntheta']<1000: print("WARNING: Calculating hybridEB using Ntheta<1000, which may lead to inaccurate results.") @@ -914,23 +924,23 @@ def run(self): geb_dict = pickle.load(f) else: - heb = hybrideb.HybridEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) - beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], self.config['Ntheta']) + heb = hybrideb.HybridEB(self.config['theta_min'], self.config['theta_max'], Ntheta) + beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], Ntheta) geb = hybrideb.GaussEB(beb, heb) geb_dict = {} - for i in range(0,self.config['Nell']): - geb_dict['%d'%(i+1)] = {} + for i in range(0,Nell): + geb_dict[f"{i+1}"] = {} for j in range(0,6): - geb_dict['%d'%(i+1)]['%d'%(j+1)]=geb(i)[j] + geb_dict[f"{i+1}"][f"{j+1}"]=geb(i)[j] with open(file_precomputed_weights, 'wb') as handle: pickle.dump(geb_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) - En0 = np.zeros(self.config["Nell"]) - Bn0 = np.zeros(self.config["Nell"]) - En = np.zeros((self.config["Nell"],self.config["Nsims"])) - Bn = np.zeros((self.config["Nell"],self.config["Nsims"])) - ell = np.zeros(self.config["Nell"]) + En0 = np.zeros(Nell) + Bn0 = np.zeros(Nell) + En = np.zeros((Nell,Nsims)) + Bn = np.zeros((Nell,Nsims)) + ell = np.zeros(Nell) filename = self.get_input("twopoint_data_real_raw") s = sacc.Sacc.load_fits(filename) @@ -944,35 +954,35 @@ def run(self): nbin_source = max(len(source_tracers), 1) - corvarr = np.zeros( (int(nbin_source*(nbin_source+1)/2*self.config["Nell"]), self.config["Nsims"]) ) + corvarr = np.zeros( (int(nbin_source*(nbin_source+1)/2*Nell), Nsims) ) - ret=np.zeros((nbin_source,nbin_source,self.config["Nell"])) + ret=np.zeros((nbin_source,nbin_source,Nell)) c = 0 for zi in range(0,nbin_source): for zj in range(zi,nbin_source): - ii = self.config["Nell"]*c - ff = self.config["Nell"]*(c+1) + ii = Nell*c + ff = Nell*(c+1) # Need all 4 block of the covariance filename = self.get_input("twopoint_data_real_raw") tmp = sacc.Sacc.load_fits(filename) # From the sacc file only load relevant tracer combinations - tmp.keep_selection(tracers=('source_%d'%zj, 'source_%d'%zi)) + tmp.keep_selection(tracers=(f'source_{zj}', f'source_{zi}')) dvec = tmp.mean - xip = dvec[:int(self.config["Ntheta"])] - xim = dvec[int(self.config["Ntheta"]):] + xip = dvec[:int(Ntheta)] + xim = dvec[int(Ntheta):] #random draws based on mean dn covariance - x = np.random.multivariate_normal(mean = tmp.mean, cov =tmp.covariance.covmat , size = self.config["Nsims"]) - Rxip = x[:,:int(self.config["Ntheta"])] - Rxim = x[:,int(self.config["Ntheta"]):] + x = np.random.multivariate_normal(mean = tmp.mean, cov =tmp.covariance.covmat , size = Nsims) + Rxip = x[:,:int(Ntheta)] + Rxim = x[:,int(Ntheta):] # Compute the uncertainties from random draws - for n in range(self.config["Nsims"]): - for i in range(int(self.config["Nell"])): - res = geb_dict['%d'%(i+1)] + for n in range(Nsims): + for i in range(int(Nell)): + res = geb_dict[f"{i+1}"] Fp = res['2'] Fm = res['3'] En[i,n] = np.sum(Fp*Rxip[:,n] + Fm*Rxim[:,n])/2 @@ -981,8 +991,8 @@ def run(self): corvarr[ii:ff,:] = Bn # Compute actual data vector - for i in range(int(self.config["Nell"])): - res = geb_dict['%d'%(i+1)] + for i in range(int(Nell)): + res = geb_dict[f"{i+1}"] Fp = res['2'] Fm = res['3'] En0[i] = np.sum(Fp*xip + Fm*xim)/2 @@ -1022,8 +1032,8 @@ def save_power_spectra(self, nbin_source): for zi in range(0,nbin_source): for zj in range(zi,nbin_source): - tracer1 = "source_%d"%zj - tracer2 = "source_%d"%zi + tracer1 = f"source_{zj}" + tracer2 = f"source_{zi}" val = self.results[zj,zi,:] print(val) ell = self.ell From 66778a538ac9300724ce2268a992055d05ef6e22 Mon Sep 17 00:00:00 2001 From: yomori Date: Fri, 19 Jul 2024 21:16:57 -0700 Subject: [PATCH 15/17] second push --- txpipe/twopoint_null_tests.py | 382 +++++++++++++++++++--------------- 1 file changed, 211 insertions(+), 171 deletions(-) diff --git a/txpipe/twopoint_null_tests.py b/txpipe/twopoint_null_tests.py index 9d4cf5865..77527cf2d 100644 --- a/txpipe/twopoint_null_tests.py +++ b/txpipe/twopoint_null_tests.py @@ -770,7 +770,7 @@ def write_output(self, source_list, lens_list, meta, results): -class TXPureB(PipelineStage): +class TXShearBmode(PipelineStage): ''' Make shear B-mode measurements. This stage computes xip and xim in very narrow angular bins and transforms them into @@ -779,7 +779,7 @@ class TXPureB(PipelineStage): which is implemented in NaMaster, since masks for LSS is more complicated than CMB) and (b) noise bias (since the measurements we make here are real-space measurements). ''' - name = "TXPureB" + name = "TXShearBmode" parallel = False inputs = [ @@ -793,6 +793,7 @@ class TXPureB(PipelineStage): config_options = { 'method' : 'hybrideb', + 'purify_b' : False, 'Nell' : 20, 'lmin' : 200, 'lmax' : 2000, @@ -815,44 +816,85 @@ def run(self): import pickle from tqdm import tqdm - if self.config["method"] == 'namaster_pureB' or self.config["method"] == 'namaster_nopureB': - #B-mode method that is already implemented in NaMaster''' - print('----------------namaster-----------') - if self.config["method"] == 'namaster_pureB': - print("WARNING: Namaster's B-mode purification requires the mask to be heavily apodized.") - print("For a realistic LSS mask, this will most likely not work, but this function is ") - print("implemented for completeness nonetheless.") - purify_b = True - else: - print("WARNING: We are not running B-mode purification (this is what was used in DES-Y3).") - purify_b = False - - # Open source maps (g1,g2,weights) - with self.open_input("source_maps", wrapper=True) as f: - nbin_source = f.file["maps"].attrs["nbin_source"] - g1_maps = [f.read_map(f"g1_{b}") for b in range(nbin_source)] - g2_maps = [f.read_map(f"g2_{b}") for b in range(nbin_source)] - - # Open mask - with self.open_input("mask", wrapper=True) as f: - mask = f.read_map("mask") + if self.config["method"] == 'namaster': + self.run_namaster(self.config["purify_b"]) + + elif self.config["method"] == 'hybrideb': + self.run_hybrideb() + + else: + raise ValueError("Method must 'namaster' or 'hybrideb'") - # Define binning - b = nmt.NmtBin.from_nside_linear(hp.npix2nside(mask.shape[0]), self.config["Nell"]) + + + def run_namaster(self,purify_b): + ''' + B-mode calculation that is already implemented in NaMaster + ''' + print('running namaster') + import pymaster as nmt + import healpy as hp + from tqdm import tqdm + + Nell = self.config['Nell'] + Nsims = self.config["Nsims"] + + if purify_b: + print("WARNING: Namaster's B-mode purification requires the mask to be heavily apodized.") + print("For a realistic LSS mask, this will most likely not work, but this function is ") + print("implemented for completeness nonetheless.") + + # Open source maps (g1,g2,weights) + with self.open_input("source_maps", wrapper=True) as f: + nbin_source = f.file["maps"].attrs["nbin_source"] + g1_maps = [f.read_map(f"g1_{b}") for b in range(nbin_source)] + g2_maps = [f.read_map(f"g2_{b}") for b in range(nbin_source)] - # Initialize the fields + # Open mask + with self.open_input("mask", wrapper=True) as f: + mask = f.read_map("mask") + + # Define binning + b = nmt.NmtBin.from_nside_linear(hp.npix2nside(mask.shape[0]), Nell) + + # Initialize the fields + fields = {} + for i in range(nbin_source): + g1_maps[i][g1_maps[i]==hp.UNSEEN]=0 + g2_maps[i][g2_maps[i]==hp.UNSEEN]=0 + fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b) + + # Compute Cls and store them in dictionrary + ret=np.zeros((nbin_source,nbin_source,len(b.get_effective_ells()))) + + for zi in range(nbin_source): + for zj in range(zi,nbin_source): + + field1 = fields[zi] + field2 = fields[zj] + w_yp = nmt.NmtWorkspace() + w_yp.compute_coupling_matrix(field1, field2, b) + + cl_coupled = nmt.compute_coupled_cell(field1,field2) + cl_decoupled = w_yp.decouple_cell(cl_coupled) + ret[zj,zi,:]= cl_decoupled[3] + + # Compute covariance in a somewhat adhoc way by shuffling the pixel values. + tmparr = np.zeros((int(nbin_source*(nbin_source+1)/2*len(b.get_effective_ells())),Nsims)) + + for k in tqdm(range(Nsims)): fields = {} for i in range(nbin_source): - g1_maps[i][g1_maps[i]==hp.UNSEEN]=0 - g2_maps[i][g2_maps[i]==hp.UNSEEN]=0 + idx1 = np.where(g1_maps[i]!=0)[0] + idx2 = np.random.permutation(idx1) + g1_maps[i][idx1]=g1_maps[i][idx2] + g2_maps[i][idx1]=g2_maps[i][idx2] fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b) - # Compute Cls and store them in dictionrary - ret=np.zeros((nbin_source,nbin_source,len(b.get_effective_ells()))) - + tmp = np.array([]) for zi in range(nbin_source): for zj in range(zi,nbin_source): - + field1 = fields[zi] field2 = fields[zj] w_yp = nmt.NmtWorkspace() @@ -860,160 +902,158 @@ def run(self): cl_coupled = nmt.compute_coupled_cell(field1,field2) cl_decoupled = w_yp.decouple_cell(cl_coupled) - ret[zj,zi,:]= cl_decoupled[3] - - # Compute covariance in a somewhat adhoc way by shuffling the pixel values. - tmparr = np.zeros((int(nbin_source*(nbin_source+1)/2*len(b.get_effective_ells())),self.config['Nsims'])) - - for k in tqdm(range(self.config['Nsims'])): - fields = {} - for i in range(nbin_source): - idx1 = np.where(g1_maps[i]!=0)[0] - idx2 = np.random.permutation(idx1) - g1_maps[i][idx1]=g1_maps[i][idx2] - g2_maps[i][idx1]=g2_maps[i][idx2] - fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b) - - tmp = np.array([]) - for zi in range(nbin_source): - for zj in range(zi,nbin_source): - - field1 = fields[zi] - field2 = fields[zj] - w_yp = nmt.NmtWorkspace() - w_yp.compute_coupling_matrix(field1, field2, b) + tmp = np.concatenate([tmp,cl_decoupled[3]]) + + tmparr[:,k] = tmp + + n_bins = b.get_n_bands() + bin_weights = np.zeros([n_bins, b.lmax]) + + for i in range(n_bins): + bin_weights[i, b.get_ell_list(i)] = b.get_weight_list(i) + + ell = b.get_effective_ells() + bin_weights = bin_weights + results = ret + cov = np.cov(tmparr) + + print('Saving pureB Cls in sacc file') + self.save_power_spectra(nbin_source, ell, results, cov) + + + def run_hybrideb(self): + ''' + B-mode method of Becker and Rozo 2015 http://arxiv.org/abs/1412.3851 + ''' + import hybrideb + import pickle + import sacc + import os - cl_coupled = nmt.compute_coupled_cell(field1,field2) - cl_decoupled = w_yp.decouple_cell(cl_coupled) - tmp = np.concatenate([tmp,cl_decoupled[3]]) - - tmparr[:,k] = tmp - - n_bins = b.get_n_bands() - bin_weights = np.zeros([n_bins, b.lmax]) - for i in range(n_bins): - bin_weights[i, b.get_ell_list(i)] = b.get_weight_list(i) - - self.ell = b.get_effective_ells() - self.bin_weights = bin_weights - self.results = ret - self.cov = np.cov(tmparr) + print('running hybrideb') + Nell = self.config['Nell'] + Nsims = self.config["Nsims"] + Ntheta = self.config["Ntheta"] + + # Check if nbins is less than 1000, and throw warning. + if self.config['Ntheta']<1000: + print("WARNING: Calculating hybridEB using Ntheta<1000, which may lead to inaccurate results.") + + # Computing the weights takes a few minutes so its a lot faster + # to precompute them and load them again in subsequent runs. + file_precomputed_weights = self.get_output("twopoint_data_fourier_pureB")+ f".precomputedweights.pkl" + + if os.path.exists(file_precomputed_weights): + print(f"{self.rank} WARNING: Using precomputed weights from revious run: {file_precomputed_weights}") + with open(file_precomputed_weights, "rb") as f: + geb_dict = pickle.load(f) - - elif self.config["method"] == 'hybrideb': - ''' - B-mode method of Becker and Rozo 2015 http://arxiv.org/abs/1412.3851 - ''' - print('--------------hybrideb-----------') - - Nell = self.config['Nell'] - Nsims = self.config["Nsims"] - Ntheta = self.config["Ntheta"] - - # Check if nbins is less than 1000, and throw warning. - if self.config['Ntheta']<1000: - print("WARNING: Calculating hybridEB using Ntheta<1000, which may lead to inaccurate results.") - - # Computing the weights takes a few minutes so its a lot faster - # to precompute and load them in subsequent runs. - file_precomputed_weights = self.get_output("twopoint_data_fourier_pureB")+ f".precomputedweights.pkl" - - if os.path.exists(file_precomputed_weights): - print(f"{self.rank} WARNING: Using precomputed weights from revious run: {file_precomputed_weights}") - with open(file_precomputed_weights, "rb") as f: - geb_dict = pickle.load(f) + else: + # To run this method we need to first compute the Fourier and real-space windows + # hybrideb.HybridEB: Sets up the estimator by creating top hat filters in real-space + # hybrideb.BinEB : Calculates the intermediate filter quantities, namely fa, fb, M+, M- [Eqs 6-11] + # hybrideb.GaussEB : Creates Gaussian windows in Fourier space + heb = hybrideb.HybridEB(self.config['theta_min'], self.config['theta_max'], Ntheta) + beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], Ntheta) + geb = hybrideb.GaussEB(beb, heb) - else: - heb = hybrideb.HybridEB(self.config['theta_min'], self.config['theta_max'], Ntheta) - beb = hybrideb.BinEB(self.config['theta_min'], self.config['theta_max'], Ntheta) - geb = hybrideb.GaussEB(beb, heb) - geb_dict = {} - for i in range(0,Nell): - geb_dict[f"{i+1}"] = {} - for j in range(0,6): - geb_dict[f"{i+1}"][f"{j+1}"]=geb(i)[j] + # geb is in a fromat that can not be naturally saved so we convert it to a regular dictionary. + geb_dict = {} + for i in range(0,Nell): + geb_dict[f"{i+1}"] = {} + for j in range(0,6): + geb_dict[f"{i+1}"][f"{j+1}"]=geb(i)[j] - with open(file_precomputed_weights, 'wb') as handle: - pickle.dump(geb_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) + # geb_dict stores the following information in a regular dictionary format + # 1: theta in radians + # 2: Fp plus component of the real-space window function + # 3: Fm minus component of the real-space window function + # 4: ell + # 5: Wp plus component of Fourier-space window function + # 6: Wm minus component of Fourier-space window function - En0 = np.zeros(Nell) - Bn0 = np.zeros(Nell) - En = np.zeros((Nell,Nsims)) - Bn = np.zeros((Nell,Nsims)) - ell = np.zeros(Nell) + # 2 and 3 are used to get Xp/Xm = sum((Fp*xip_hat +/- Fm*xim_hat)/2) [eq1] + # 5 and 6 are used to get / = \int ell factors(ell) (Wp*Pe + Wm*Pb) [eq5] - filename = self.get_input("twopoint_data_real_raw") - s = sacc.Sacc.load_fits(filename) + # Save this for subsequent runs since the windows don't change. - Qxip = sacc.standard_types.galaxy_shear_xi_plus + with open(file_precomputed_weights, 'wb') as handle: + pickle.dump(geb_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) - source_tracers = set() - for b1, b2 in s.get_tracer_combinations(Qxip): - source_tracers.add(b1) - source_tracers.add(b2) + En0 = np.zeros(Nell) + Bn0 = np.zeros(Nell) + En = np.zeros((Nell,Nsims)) + Bn = np.zeros((Nell,Nsims)) + ell = np.zeros(Nell) - nbin_source = max(len(source_tracers), 1) + # Load xip/xim that were computed in the TXTwopoint stage and check length required for covariance + filename = self.get_input("twopoint_data_real_raw") + data_twopt = sacc.Sacc.load_fits(filename) + Qxip = sacc.standard_types.galaxy_shear_xi_plus - corvarr = np.zeros( (int(nbin_source*(nbin_source+1)/2*Nell), Nsims) ) + source_tracers = set() + for b1, b2 in data_twopt.get_tracer_combinations(Qxip): + source_tracers.add(b1) + source_tracers.add(b2) - ret=np.zeros((nbin_source,nbin_source,Nell)) + nbin_source = max(len(source_tracers), 1) - c = 0 - for zi in range(0,nbin_source): - for zj in range(zi,nbin_source): - ii = Nell*c - ff = Nell*(c+1) - - # Need all 4 block of the covariance - filename = self.get_input("twopoint_data_real_raw") - tmp = sacc.Sacc.load_fits(filename) - - # From the sacc file only load relevant tracer combinations - tmp.keep_selection(tracers=(f'source_{zj}', f'source_{zi}')) - dvec = tmp.mean - xip = dvec[:int(Ntheta)] - xim = dvec[int(Ntheta):] - - #random draws based on mean dn covariance - x = np.random.multivariate_normal(mean = tmp.mean, cov =tmp.covariance.covmat , size = Nsims) - Rxip = x[:,:int(Ntheta)] - Rxim = x[:,int(Ntheta):] - - # Compute the uncertainties from random draws - for n in range(Nsims): - for i in range(int(Nell)): - res = geb_dict[f"{i+1}"] - Fp = res['2'] - Fm = res['3'] - En[i,n] = np.sum(Fp*Rxip[:,n] + Fm*Rxim[:,n])/2 - Bn[i,n] = np.sum(Fp*Rxip[:,n] - Fm*Rxim[:,n])/2 - - corvarr[ii:ff,:] = Bn - - # Compute actual data vector + corvarr = np.zeros((int(nbin_source*(nbin_source+1)/2*Nell), Nsims)) + results = np.zeros((nbin_source,nbin_source,Nell)) + + # Loop over all bin combinations + c = 0 + for zi in range(0,nbin_source): + for zj in range(zi,nbin_source): + + # Initial and final row indices of the resulting array (covarr) to fill. + # For each tomographic bin we have Nell bins to fill. + ii = Nell*c + ff = Nell*(c+1) + + tmp = data_twopt.copy() + + # From the sacc file only load relevant tracer combinations + tmp.keep_selection(tracers=(f'source_{zj}', f'source_{zi}')) + dvec = tmp.mean + xip = dvec[:int(Ntheta)] + xim = dvec[int(Ntheta):] + + # Make random draws based on mean and covariance of xip/xim measurements + x = np.random.multivariate_normal(mean = tmp.mean, cov =tmp.covariance.covmat , size = Nsims) + Rxip = x[:,:int(Ntheta)] + Rxim = x[:,int(Ntheta):] + + # Convert each random draw into B-mode measurement and compute covariance + for n in range(Nsims): for i in range(int(Nell)): res = geb_dict[f"{i+1}"] Fp = res['2'] Fm = res['3'] - En0[i] = np.sum(Fp*xip + Fm*xim)/2 - Bn0[i] = np.sum(Fp*xip - Fm*xim)/2 - ell[i] = res['4'][np.argmax(res['5'])] - - ret[zj,zi,:]= Bn0[:] - c+=1 - - self.ell = ell - self.results = ret - self.cov = np.cov(corvarr) - else: - sys.exit("The only method implemented right now are 'hybrideb' or 'namaster_pureB' or 'namaster_nopureB' ") + En[i,n] = np.sum(Fp*Rxip[n,:] + Fm*Rxim[n,:])/2 + Bn[i,n] = np.sum(Fp*Rxip[n,:] - Fm*Rxim[n,:])/2 + + corvarr[ii:ff,:] = Bn + + # Compute actual data vector + for i in range(int(Nell)): + res = geb_dict[f"{i+1}"] + Fp = res['2'] + Fm = res['3'] + En0[i] = np.sum(Fp*xip + Fm*xim)/2 + Bn0[i] = np.sum(Fp*xip - Fm*xim)/2 + ell[i] = res['4'][np.argmax(res['5'])] # setting ell to the peak of the Gaussian window function + + results[zj,zi,:]= Bn0[:] + c+=1 + + cov = np.cov(corvarr) - print('Saving pureB Cls in sacc file') - - self.save_power_spectra(nbin_source) + print('Saving pureB Cls in sacc file') + self.save_power_spectra(nbin_source, ell, results, cov) - def save_power_spectra(self, nbin_source): + def save_power_spectra(self, nbin_source, ell, results, cov): import sacc import datetime @@ -1021,7 +1061,7 @@ def save_power_spectra(self, nbin_source): S.metadata['nbin_source'] = nbin_source S.metadata['creation'] = datetime.datetime.now().isoformat() S.metadata['method'] = self.config['method'] - S.metadata['info'] = 'ClBB using pureB' + S.metadata['info'] = 'ClBB' CBB = sacc.standard_types.galaxy_shear_cl_bb @@ -1034,13 +1074,13 @@ def save_power_spectra(self, nbin_source): for zj in range(zi,nbin_source): tracer1 = f"source_{zj}" tracer2 = f"source_{zi}" - val = self.results[zj,zi,:] + val = results[zj,zi,:] print(val) - ell = self.ell + ell = ell for k in range(0,len(val)): S.add_data_point(CBB, (tracer1, tracer2), value=val[k], ell=ell[k]) - S.add_covariance(self.cov) + S.add_covariance(cov) output_filename = self.get_output("twopoint_data_fourier_pureB") S.save_fits(output_filename, overwrite=True) From 1a631069e47e5b74d7f5b4c1dc1caef6242f1a1e Mon Sep 17 00:00:00 2001 From: yomori Date: Mon, 22 Jul 2024 09:14:21 -0700 Subject: [PATCH 16/17] Change from shuffle to rotate shear --- txpipe/twopoint_null_tests.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/txpipe/twopoint_null_tests.py b/txpipe/twopoint_null_tests.py index 77527cf2d..c9d2ca9d4 100644 --- a/txpipe/twopoint_null_tests.py +++ b/txpipe/twopoint_null_tests.py @@ -885,10 +885,12 @@ def run_namaster(self,purify_b): for k in tqdm(range(Nsims)): fields = {} for i in range(nbin_source): - idx1 = np.where(g1_maps[i]!=0)[0] - idx2 = np.random.permutation(idx1) - g1_maps[i][idx1]=g1_maps[i][idx2] - g2_maps[i][idx1]=g2_maps[i][idx2] + # Rotate shear maps randomly to create noise maps + idx = np.where(g1_maps[i]!=0)[0] + psi = np.random.uniform(0, 2 * np.pi,size=len(idx)) + g1_maps[i][idx] = g1_maps[i][idx]*np.cos(2*psi) + g2_maps[i][idx]*np.sin(2*psi) + g2_maps[i][idx] = -g1_maps[i][idx]*np.sin(2*psi) + g2_maps[i][idx]*np.cos(2*psi) + fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b) tmp = np.array([]) From a03c421b214fd1130a749a49ede03f04bf5b04a4 Mon Sep 17 00:00:00 2001 From: yomori Date: Sun, 4 Aug 2024 21:38:55 -0700 Subject: [PATCH 17/17] add custom binning, and add file caching --- txpipe/twopoint_null_tests.py | 91 +++++++++++++++++++++++++---------- 1 file changed, 66 insertions(+), 25 deletions(-) diff --git a/txpipe/twopoint_null_tests.py b/txpipe/twopoint_null_tests.py index c9d2ca9d4..63260e881 100644 --- a/txpipe/twopoint_null_tests.py +++ b/txpipe/twopoint_null_tests.py @@ -789,7 +789,8 @@ class TXShearBmode(PipelineStage): ("twopoint_data_real_raw", SACCFile), ] - outputs = [("twopoint_data_fourier_pureB", SACCFile)] + outputs = [("twopoint_data_fourier_shearbmode", SACCFile), + ] config_options = { 'method' : 'hybrideb', @@ -835,9 +836,14 @@ def run_namaster(self,purify_b): import pymaster as nmt import healpy as hp from tqdm import tqdm + import pickle + + lmin = self.config['lmin'] + lmax = self.config['lmax'] Nell = self.config['Nell'] Nsims = self.config["Nsims"] + lspacing = self.config['lspacing'] if purify_b: print("WARNING: Namaster's B-mode purification requires the mask to be heavily apodized.") @@ -846,40 +852,71 @@ def run_namaster(self,purify_b): # Open source maps (g1,g2,weights) with self.open_input("source_maps", wrapper=True) as f: - nbin_source = f.file["maps"].attrs["nbin_source"] - g1_maps = [f.read_map(f"g1_{b}") for b in range(nbin_source)] - g2_maps = [f.read_map(f"g2_{b}") for b in range(nbin_source)] + + # Get the number of tomographic bins + # +1 comes from also loading the non-tomographic sample + nbin_source = f.file["maps"].attrs["nbin_source"]+1 + + # Load the tomographic samples + g1_maps = [f.read_map(f"g1_{b}") for b in range(nbin_source-1)] + g2_maps = [f.read_map(f"g2_{b}") for b in range(nbin_source-1)] + # Load non-tomographic sample + g1_maps.append(f.read_map(f"g1_2D")) + g2_maps.append(f.read_map(f"g2_2D")) + + if lmax>3*hp.npix2nside(len(g1_maps[0])): + raise ValueError("lmax must be smaller than 3*nside") + # Open mask with self.open_input("mask", wrapper=True) as f: - mask = f.read_map("mask") + mask = f.read_map("mask") # Define binning - b = nmt.NmtBin.from_nside_linear(hp.npix2nside(mask.shape[0]), Nell) + if lspacing=='lin': + bine = np.linspace(lmin+1, lmax+1, Nell+1, dtype=np.int64) + elif lspacing=='log': + bine = np.geomspace(lmin+1, lmax+1, Nell+1, dtype=np.int64) + else: + raise ValueError("lspacing must be either 'log' or 'lin'") + b = nmt.NmtBin.from_edges(bine[:-1], bine[1:]) + # Initialize the fields fields = {} for i in range(nbin_source): g1_maps[i][g1_maps[i]==hp.UNSEEN]=0 g2_maps[i][g2_maps[i]==hp.UNSEEN]=0 - fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b) + fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b,lmax=lmax) + + # To speed the covariance calculation up, one can use multiple nodes to do this calculation externally + products_dict = {'mask':mask,'Nell':Nell,'nbin_source':nbin_source, 'g1_maps': g1_maps, 'g2_maps': g2_maps} + file_namaster_intermediate = self.get_output("twopoint_data_fourier_shearbmode")[:-5]+ f"_namaster.pkl" + + with open(file_namaster_intermediate, 'wb') as handle: + pickle.dump(products_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) + # Compute Cls and store them in dictionrary ret=np.zeros((nbin_source,nbin_source,len(b.get_effective_ells()))) for zi in range(nbin_source): for zj in range(zi,nbin_source): - - field1 = fields[zi] - field2 = fields[zj] - w_yp = nmt.NmtWorkspace() - w_yp.compute_coupling_matrix(field1, field2, b) - - cl_coupled = nmt.compute_coupled_cell(field1,field2) - cl_decoupled = w_yp.decouple_cell(cl_coupled) - ret[zj,zi,:]= cl_decoupled[3] - - # Compute covariance in a somewhat adhoc way by shuffling the pixel values. + + if zi!=zj and (zi==nbin_source-1 or zj==nbin_source-1): + # No need to take cross-correlation between tomographic and non-tomographic sample + continue + else: + field1 = fields[zi] + field2 = fields[zj] + w_yp = nmt.NmtWorkspace() + w_yp.compute_coupling_matrix(field1, field2, b) + + cl_coupled = nmt.compute_coupled_cell(field1,field2) + cl_decoupled = w_yp.decouple_cell(cl_coupled) + ret[zj,zi,:]= cl_decoupled[3] + + # Compute covariance by randomly rotating shear values in pixels. tmparr = np.zeros((int(nbin_source*(nbin_source+1)/2*len(b.get_effective_ells())),Nsims)) for k in tqdm(range(Nsims)): @@ -891,7 +928,7 @@ def run_namaster(self,purify_b): g1_maps[i][idx] = g1_maps[i][idx]*np.cos(2*psi) + g2_maps[i][idx]*np.sin(2*psi) g2_maps[i][idx] = -g1_maps[i][idx]*np.sin(2*psi) + g2_maps[i][idx]*np.cos(2*psi) - fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b) + fields[i] = nmt.NmtField(mask, [g1_maps[i], g2_maps[i]], purify_e=False, purify_b=purify_b, lmax=lmax) tmp = np.array([]) for zi in range(nbin_source): @@ -909,8 +946,8 @@ def run_namaster(self,purify_b): tmparr[:,k] = tmp n_bins = b.get_n_bands() - bin_weights = np.zeros([n_bins, b.lmax]) - + bin_weights = np.zeros([n_bins, b.lmax+1]) + for i in range(n_bins): bin_weights[i, b.get_ell_list(i)] = b.get_weight_list(i) @@ -919,7 +956,7 @@ def run_namaster(self,purify_b): results = ret cov = np.cov(tmparr) - print('Saving pureB Cls in sacc file') + print('Saving ShearBmode Cls in sacc file') self.save_power_spectra(nbin_source, ell, results, cov) @@ -1069,8 +1106,12 @@ def save_power_spectra(self, nbin_source, ell, results, cov): with self.open_input("shear_photoz_stack", wrapper=True) as f: for i in range(nbin_source): - z, Nz = f.get_bin_n_of_z(i) - S.add_tracer("NZ", f"source_{i}", z, Nz) + if i==nbin_source-1: + z,Nz = f.get_2d_n_of_z() + S.add_tracer("NZ", f"source_{i}", z, Nz) + else: + z, Nz = f.get_bin_n_of_z(i) + S.add_tracer("NZ", f"source_{i}", z, Nz) for zi in range(0,nbin_source): for zj in range(zi,nbin_source): @@ -1084,7 +1125,7 @@ def save_power_spectra(self, nbin_source, ell, results, cov): S.add_covariance(cov) - output_filename = self.get_output("twopoint_data_fourier_pureB") + output_filename = self.get_output("twopoint_data_fourier_shearbmode") S.save_fits(output_filename, overwrite=True)