Fix parallel 3D Poisson solver with heffte #86
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: 🐧 CUDA | |
on: [push, pull_request] | |
concurrency: | |
group: ${{ github.ref }}-${{ github.head_ref }}-cuda | |
cancel-in-progress: true | |
jobs: | |
# Ref.: | |
# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/ubuntu18.04/10.1/base/Dockerfile | |
# https://github.com/ComputationalRadiationPhysics/picongpu/blob/0.5.0/share/picongpu/dockerfiles/ubuntu-1604/Dockerfile | |
# https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/ | |
build_nvcc: | |
name: NVCC 11.3 SP | |
runs-on: ubuntu-20.04 | |
if: github.event.pull_request.draft == false | |
env: | |
CXXFLAGS: "-Werror" | |
CMAKE_GENERATOR: Ninja | |
steps: | |
- uses: actions/checkout@v4 | |
- uses: actions/setup-python@v5 | |
name: Install Python | |
with: | |
python-version: '3.x' | |
- name: install dependencies | |
run: | | |
.github/workflows/dependencies/nvcc11-3.sh | |
- name: CCache Cache | |
uses: actions/cache@v4 | |
with: | |
path: ~/.cache/ccache | |
key: ccache-${{ github.workflow }}-${{ github.job }}-git-${{ github.sha }} | |
restore-keys: | | |
ccache-${{ github.workflow }}-${{ github.job }}-git- | |
- name: install openPMD-api | |
run: | | |
export CCACHE_COMPRESS=1 | |
export CCACHE_COMPRESSLEVEL=10 | |
export CCACHE_MAXSIZE=600M | |
ccache -z | |
export CEI_SUDO="sudo" | |
export CEI_TMP="/tmp/cei" | |
export PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} | |
export LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:${LD_LIBRARY_PATH} | |
which nvcc || echo "nvcc not in PATH!" | |
cmake-easyinstall --prefix=/usr/local \ | |
git+https://github.com/openPMD/[email protected] \ | |
-DopenPMD_USE_PYTHON=OFF \ | |
-DBUILD_TESTING=OFF \ | |
-DBUILD_EXAMPLES=OFF \ | |
-DBUILD_CLI_TOOLS=OFF \ | |
-DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ | |
-DCMAKE_VERBOSE_MAKEFILE=ON | |
cmake-easyinstall --prefix=/usr/local \ | |
git+https://github.com/icl-utk-edu/[email protected] \ | |
-DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ | |
-DCMAKE_CXX_STANDARD=17 -DHeffte_ENABLE_DOXYGEN=OFF \ | |
-DHeffte_ENABLE_FFTW=OFF -DHeffte_ENABLE_TESTING=OFF \ | |
-DHeffte_ENABLE_CUDA=ON -DHeffte_ENABLE_ROCM=OFF \ | |
-DHeffte_ENABLE_ONEAPI=OFF -DHeffte_ENABLE_MKL=OFF \ | |
-DHeffte_ENABLE_PYTHON=OFF -DHeffte_ENABLE_FORTRAN=OFF \ | |
-DHeffte_ENABLE_MAGMA=OFF \ | |
-DCMAKE_VERBOSE_MAKEFILE=ON | |
- name: build WarpX | |
run: | | |
export CCACHE_COMPRESS=1 | |
export CCACHE_COMPRESSLEVEL=10 | |
export CCACHE_MAXSIZE=600M | |
export PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} | |
export LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:${LD_LIBRARY_PATH} | |
which nvcc || echo "nvcc not in PATH!" | |
cmake -S . -B build_sp \ | |
-DCMAKE_VERBOSE_MAKEFILE=ON \ | |
-DWarpX_COMPUTE=CUDA \ | |
-DWarpX_EB=ON \ | |
-DWarpX_PYTHON=ON \ | |
-DAMReX_CUDA_ARCH=6.0 \ | |
-DWarpX_OPENPMD=ON \ | |
-DWarpX_openpmd_internal=OFF \ | |
-DWarpX_PRECISION=SINGLE \ | |
-DWarpX_FFT=ON \ | |
-DWarpX_HEFFTE=ON \ | |
-DAMReX_CUDA_ERROR_CROSS_EXECUTION_SPACE_CALL=ON \ | |
-DAMReX_CUDA_ERROR_CAPTURE_THIS=ON | |
cmake --build build_sp -j 4 | |
python3 -m pip install --upgrade pip | |
python3 -m pip install --upgrade build packaging setuptools wheel | |
export WARPX_MPI=ON | |
export PYWARPX_LIB_DIR=$PWD/build_sp/lib/site-packages/pywarpx/ | |
python3 -m pip wheel . | |
python3 -m pip install *.whl | |
ccache -s | |
du -hs ~/.cache/ccache | |
# make sure legacy build system continues to build, i.e., that we don't forget | |
# to add new .cpp files | |
build_nvcc_gnumake: | |
name: NVCC 11.8.0 GNUmake | |
runs-on: ubuntu-20.04 | |
if: github.event.pull_request.draft == false | |
steps: | |
- uses: actions/checkout@v4 | |
- name: install dependencies | |
run: | | |
.github/workflows/dependencies/nvcc11-8.sh | |
- name: CCache Cache | |
uses: actions/cache@v4 | |
with: | |
path: ~/.cache/ccache | |
key: ccache-${{ github.workflow }}-${{ github.job }}-git-${{ github.sha }} | |
restore-keys: | | |
ccache-${{ github.workflow }}-${{ github.job }}-git- | |
- name: build WarpX | |
run: | | |
export CCACHE_COMPRESS=1 | |
export CCACHE_COMPRESSLEVEL=10 | |
export CCACHE_MAXSIZE=600M | |
ccache -z | |
export PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} | |
export LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:${LD_LIBRARY_PATH} | |
which nvcc || echo "nvcc not in PATH!" | |
git clone https://github.com/AMReX-Codes/amrex.git ../amrex | |
cd ../amrex && git checkout --detach 028638564f7be0694b9898f8d4088cdbf9a6f9f5 && cd - | |
make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 | |
ccache -s | |
du -hs ~/.cache/ccache | |
build_nvhpc24-1-nvcc: | |
name: [email protected] NVCC/NVC++ Release [tests] | |
runs-on: ubuntu-20.04 | |
if: github.event.pull_request.draft == false | |
#env: | |
# # For NVHPC, Ninja is slower than the default: | |
# CMAKE_GENERATOR: Ninja | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Dependencies | |
run: .github/workflows/dependencies/nvhpc.sh | |
- name: CCache Cache | |
uses: actions/cache@v4 | |
with: | |
path: ~/.cache/ccache | |
key: ccache-${{ github.workflow }}-${{ github.job }}-git-${{ github.sha }} | |
restore-keys: | | |
ccache-${{ github.workflow }}-${{ github.job }}-git- | |
- name: Build & Install | |
run: | | |
export CCACHE_COMPRESS=1 | |
export CCACHE_COMPRESSLEVEL=10 | |
export CCACHE_MAXSIZE=600M | |
ccache -z | |
source /etc/profile.d/modules.sh | |
module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/24.1 | |
which nvcc || echo "nvcc not in PATH!" | |
which nvc++ || echo "nvc++ not in PATH!" | |
which nvc || echo "nvc not in PATH!" | |
nvcc --version | |
nvc++ --version | |
nvc --version | |
cmake --version | |
export CC=$(which nvc) | |
export CXX=$(which nvc++) | |
export CUDACXX=$(which nvcc) | |
export CUDAHOSTCXX=${CXX} | |
cmake -S . -B build \ | |
-DCMAKE_VERBOSE_MAKEFILE=ON \ | |
-DWarpX_COMPUTE=CUDA \ | |
-DWarpX_EB=ON \ | |
-DWarpX_PYTHON=OFF \ | |
-DAMReX_CUDA_ARCH=8.0 \ | |
-DWarpX_OPENPMD=ON \ | |
-DWarpX_FFT=ON \ | |
-DAMReX_CUDA_ERROR_CROSS_EXECUTION_SPACE_CALL=ON \ | |
-DAMReX_CUDA_ERROR_CAPTURE_THIS=ON | |
cmake --build build -j 4 | |
# work-around for mpi4py 3.1.1 build system issue with using | |
# a GNU-built Python executable with non-GNU Python modules | |
# https://github.com/mpi4py/mpi4py/issues/114 | |
#export CFLAGS="-noswitcherror" | |
#python3 -m pip install --upgrade pip | |
#python3 -m pip install --upgrade build packaging setuptools wheel | |
#export WARPX_MPI=ON | |
#export PYWARPX_LIB_DIR=$PWD/build/lib/site-packages/pywarpx/ | |
#python3 -m pip wheel . | |
#python3 -m pip install *.whl | |
ccache -s | |
du -hs ~/.cache/ccache |