diff --git a/Tools/machines/perlmutter-nersc/perlmutter_cpu.sbatch b/Tools/machines/perlmutter-nersc/perlmutter_cpu.sbatch index a1687b2a3b4..d13c7e3b4e5 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_cpu.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter_cpu.sbatch @@ -33,6 +33,7 @@ INPUTS=inputs_small export SRUN_CPUS_PER_TASK=16 # 8 cores per chiplet, 2x SMP export OMP_PLACES=threads export OMP_PROC_BIND=spread +export OMP_NUM_THREADS=${SRUN_CPUS_PER_TASK} srun --cpu-bind=cores \ ${EXE} ${INPUTS} \ diff --git a/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch b/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch index 873fd30355f..f2ea5fa3e7f 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch @@ -34,6 +34,7 @@ export MPICH_OFI_NIC_POLICY=GPU # threads for OpenMP and threaded compressors per MPI rank # note: 16 avoids hyperthreading (32 virtual cores, 16 physical) export SRUN_CPUS_PER_TASK=16 +export OMP_NUM_THREADS=${SRUN_CPUS_PER_TASK} # GPU-aware MPI optimizations GPU_AWARE_MPI="amrex.use_gpu_aware_mpi=1"