Skip to content

Commit

Permalink
first run with Planck and mpi4py
Browse files Browse the repository at this point in the history
  • Loading branch information
kwolz committed Apr 26, 2024
1 parent 0dd86a7 commit 88442aa
Show file tree
Hide file tree
Showing 3 changed files with 280 additions and 1 deletion.
189 changes: 189 additions & 0 deletions paramfiles/paramfile_SAT_bbpower.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
# Here is a sample .yaml file that can be used
# to store the metadata necessary to run the
# SO BB pipeline.

# Define the directories
## Let's start with directories of data products
data_dirs:
root: "/pscratch/sd/k/kwolz/bbdev/SOOPERCOOL/data_planck"
map_directory: "maps"
beam_directory: "beams"
bandpasses_directory: "bandpasses"
mock_directory: "mock_data"
## Then directories in which we will store outputs
output_dirs:
root: "/pscratch/sd/k/kwolz/bbdev/SOOPERCOOL/outputs_planck"
mask_directory: "masks"
pre_process_directory: "pre_processing"
sims_directory: "sims"
cell_transfer_directory: "cells_transfer"
cell_data_directory: "cells_data"
cell_sims_directory: "cells_sims"
coupling_directory: "couplings"
covmat_directory: "covariances"
sacc_directory: "sacc_files"

##################################
# Metadata related to maps #
# ------------------------------ #
# #
# The structure of input splits #
# is the following: #
# #
# tag: #
# file_root: ... #
# n_splits: ... #
# freq_tag: ... #
# exp_tag: ... #
##################################
map_sets:
planck_f030:
file_root: "planck_f030"
n_splits: 2
freq_tag: 30
exp_tag: "planck"
filtering_tag: "none"
planck_f100:
file_root: "planck_f100"
n_splits: 2
freq_tag: 100
exp_tag: "planck"
filtering_tag: "none"
planck_f143:
file_root: "planck_f143"
n_splits: 2
freq_tag: 143
exp_tag: "planck"
filtering_tag: "none"
planck_f217:
file_root: "planck_f217"
n_splits: 2
freq_tag: 217
exp_tag: "planck"
filtering_tag: "none"
planck_f353:
file_root: "planck_f353"
n_splits: 2
freq_tag: 353
exp_tag: "planck"
filtering_tag: "none"

####################
# Masking metadata #
####################
masks:
# Load nhits map from disk? Give absolute location here.
# If left blank, the mask handler will download the nominal nhits map.
input_nhits_path:

# Copies of all masks are stored under these names inside mask_directory
analysis_mask: "analysis_mask.fits"
nhits_map: "nhits_map.fits"
binary_mask: "binary_mask.fits"
galactic_mask_root: "planck_galactic_mask"
point_source_mask: "point_source_mask.fits"

include_in_mask: []
gal_mask_mode: "gal070"
apod_radius: 10.0
apod_radius_point_source: 4.0
apod_type: "C1"

####################################
# Metadata related to the analysis #
####################################
## General parameters
general_pars:
nside: 256
lmin: 30
lmax: 600
deltal: 10
binning_file: "binning.npz"
pure_B: True
# Where the beam window is lower than beam_floor, set it to beam_floor
beam_floor: 1.e-2

## Simulation related
sim_pars:
anisotropic_noise: False
null_e_modes: False
num_sims: 100
## Used for cosmo TF validation and cov sims
cosmology:
cosmomc_theta: 0.0104085
As: 2.1e-9
ombh2: 0.02237
omch2: 0.1200
ns: 0.9649
Alens: 1.0
tau: 0.0544
r: 0.01
noise:
survey_years: 5.
sensitivity_mode: "baseline"
one_over_f_mode: "optimistic"

mock_nsrcs: 80
mock_srcs_hole_radius: 40

## Filtering related parameters
filtering:

slurm: False # Run TOAST filtering locally or with SLURM scheduller
# `slurm_autosubmit` only works if `slurm` is True.
# `slurm_autosubmit` set to True to auto-submit generated sbatch scripts.
# Set to False would generate scripts but not submitted, give you a chance to check generated scripts.
slurm_autosubmit: False
scripts_dir: "../sbatch" # directory of filtering scripts

## Define filtering tags
tags_settings:

# If true, beams will be applied only on the validation simulations. By default (false), beam are applied to both the estimation and validation sims,
# to account for potential effect of the beam on the TF (e.g. commutativity)
do_not_beam_est_sims: False

none: # placeholder for a no-filtering operation
filtering_type: "m_filterer"
m_cut: 0
beam: null # beam null means no beam is applied to TF estimation steps

mcut30:
filtering_type: "m_filterer"
m_cut: 30
beam: null # beam null means no beam is applied to TF estimation steps

mcut15:
filtering_type: "m_filterer"
m_cut: 15
beam: null

toast_SAT1_f090:
filtering_type: "toast"
template: "../paramfiles/run_toast.sh.j2" # TOAST template script.
config: "../paramfiles/defaults.toml" # TOAST toml config file
schedule: "../outputs/schedules/schedule_sat.txt" # TOAST schedule file
tf_instrument: "SAT1" # Instrument used for transfer function calculation
tf_band: "SAT_f090" # Band used for transfer function calculation
beam: null

## Number of sims for tf estimation and validation
tf_est_num_sims: 30

## Parameters of the PL sims used for TF estimation
power_law_pars_tf_est:
amp: 1.0
delta_ell: 10
power_law_index: 2.

## Parameters of the PL sims used for TF validation
power_law_pars_tf_val:
amp:
TT: 10.
EE: 1.
BB: 0.05
TE: 2.5
TB: 0.
EB: 0.
delta_ell: 10
power_law_index: 0.5
90 changes: 90 additions & 0 deletions pipeline/run_planck_mpi4py.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
#!/usr/bin/env bash

paramfile='../paramfiles/paramfile_SAT_bbpower.yaml'

echo "Running pipeline with paramfile: ${paramfile}"

#OpenMP settings:
export OMP_NUM_THREADS=1
export OMP_PLACES=threads
export OMP_PROC_BIND=spread

# Run serially
echo "Pre-processing real data"
echo "-----------------------------"
python pre_processer.py --globals ${paramfile} --verbose
python mask_handler.py --globals ${paramfile} --plots --verbose
python pre_processer_ext.py --globals ${paramfile} --planck --data --plots
python pre_processer_ext.py --globals ${paramfile} --planck --noise
python pre_processer_ext.py --globals ${paramfile} --planck --sims

echo "Running filterer for data"
echo "-------------------------"
python filterer.py --globals ${paramfile} --data

echo "Running mcm..."
echo "--------------"
python mcmer.py --globals ${paramfile} --plot



# run in parallel with salloc -N 1 -C cpu -q interactive -t 00:30:00
module load python
mamba activate my_mpi4py_env

echo "Generating transfer sims" # 1m20 for 30 sims
echo "-----------------------------"
srun -n 30 -c 8 python pre_processer.py --globals ${paramfile} --verbose --sims

echo "Running filterer for transfer" # 1m20 for 30 sims
echo "-----------------------------"
srun -n 30 -c 8 --cpu_bind=cores python filterer.py --globals ${paramfile} --transfer

echo "Running filterer for sims" # 2m50 for 100 sims / 10 splits
echo "-------------------------"
srun -n 25 -c 10 --cpu_bind=cores python filterer.py --globals ${paramfile} --sims

echo "Running cl estimation for tf estimation" # 1m50 for 30 sims
echo "---------------------------------------"
srun -n 30 -c 8 --cpu_bind=cores python pcler.py --globals ${paramfile} --tf_est --verbose

echo "Running transfer estimation"
echo "---------------------------"
python transfer.py --globals ${paramfile}

echo "Running cl estimation for validation"
echo "------------------------------------"
srun -n 30 -c 8 --cpu_bind=cores python pcler.py --globals ${paramfile} --tf_val --verbose

echo "Transfer validation"
echo "---------------------"
python transfer_validator.py --globals ${paramfile}



# # run in parallel with salloc -N 1 -C cpu -q interactive -t 01:00:00
echo "Running pcler on data"
echo "---------------------"
python pcler.py --globals ${paramfile} --data

echo "Running pcler on sims" # 5m for 100 sims / 21 splits
echo "---------------------"
srun -n 25 -c 10 --cpu_bind=cores python pcler.py --globals ${paramfile} --sims --verbose

echo "Running coadder on data"
echo "---------------------"
python coadder.py --globals ${paramfile} --data

echo "Running coadder on sims" # 0m37s for 100 sims / 21 splits
echo "---------------------"
srun -n 25 -c 10 --cpu_bind=cores python coadder.py --globals ${paramfile} --sims

echo "Running covariance estimation"
echo "-----------------------------"
python covfefe.py --globals ${paramfile}

echo "Create sacc files for sims and data"
echo "-----------------------------------"
python saccer.py --globals ${paramfile} --data
# 2m for 100 sims / 21 splits
srun -n 25 -c 10 --cpu_bind=cores python saccer.py --globals ${paramfile} --sims
2 changes: 1 addition & 1 deletion pipeline/saccer.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def saccer(args):
ifp2*len(lb):(ifp2+1)*len(lb)] = cov_dict[fp1+fp2]

covs[ms1, ms2, ms3, ms4] = thin_covariance(
cov, len(lb), len(field_pairs), order=3
cov, len(lb), len(field_pairs), order=0
)

full_cov_size = len(ps_names)*len(lb)*len(field_pairs)
Expand Down

0 comments on commit 88442aa

Please sign in to comment.