Skip to content
This repository has been archived by the owner on Mar 21, 2024. It is now read-only.

Add docker configs for new compilers. #251

Merged
merged 4 commits into from
Feb 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
# Dockerfile for libcudacxx_base:host_x86_64_ubuntu_20.04__target_x86_64_ubuntu_20.04__gcc_9_cxx_11

FROM ubuntu:20.04

MAINTAINER Bryce Adelstein Lelbach <[email protected]>

ARG LIBCUDACXX_SKIP_BASE_TESTS_BUILD
ARG LIBCUDACXX_COMPUTE_ARCHS
ARG COMPILER_CXX_DIALECT

ENV COMPILER_CXX_DIALECT=$COMPILER_CXX_DIALECT

###############################################################################
# BUILD: The following is invoked when the image is built.

SHELL ["/usr/bin/env", "bash", "-c"]

RUN apt-get -y update\
&& DEBIAN_FRONTEND="noninteractive" TZ="US/Pacific"\
apt-get -y install clang-11 python3-pip \
&& python3 -m pip install lit\
&& mkdir -p /sw/gpgpu/libcudacxx/build\
&& mkdir -p /sw/gpgpu/libcudacxx/libcxx/build

# Update CMake to 3.18
ADD https://github.com/Kitware/CMake/releases/download/v3.18.5/cmake-3.18.5-Linux-x86_64.sh /tmp/cmake.sh
RUN sh /tmp/cmake.sh --skip-license --prefix=/usr

# For debugging\.
#RUN apt-get -y install gdb strace vim

# We use ADD here because it invalidates the cache for subsequent steps, which
# is what we want, as we need to rebuild if the sources have changed.

# Copy NVCC and the CUDA runtime from the source tree.
ADD bin /sw/gpgpu/bin

# Copy the core CUDA headers from the source tree.
ADD cuda/import/*.h* /sw/gpgpu/cuda/import/
ADD cuda/common/*.h* /sw/gpgpu/cuda/common/
ADD cuda/tools/ /sw/gpgpu/cuda/tools/
ADD opencl/import/cl_rel/CL/*.h* /sw/gpgpu/opencl/import/cl_rel/CL/

# Copy libcu++ sources from the source tree.
ADD libcudacxx /sw/gpgpu/libcudacxx

# List out everything in /sw before the build.
RUN echo "Contents of /sw:" && cd /sw/ && find

# Build libc++ and configure libc++ tests.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx/libcxx/build\
&& cmake ../..\
-DLIBCUDACXX_ENABLE_STATIC_LIBRARY=OFF\
-DLIBCUDACXX_ENABLE_LIBCUDACXX_TESTS=OFF\
-DLIBCUDACXX_ENABLE_LIBCXX_TESTS=ON\
-DLIBCXX_TEST_STANDARD_VER=c++$COMPILER_CXX_DIALECT\
-DCMAKE_CXX_COMPILER=clang-11\
-DCMAKE_CUDA_COMPILER=/sw/gpgpu/bin/x86_64_Linux_release/nvcc\
&& make -j\
2>&1 | tee /sw/gpgpu/libcudacxx/build/cmake_libcxx.log

# Configure libcu++ tests.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx/build\
&& cmake ..\
-DCMAKE_CXX_COMPILER=clang-11\
-DCMAKE_CUDA_COMPILER=/sw/gpgpu/bin/x86_64_Linux_release/nvcc\
-DLIBCUDACXX_ENABLE_LIBCUDACXX_TESTS=ON\
-DLIBCUDACXX_ENABLE_LIBCXX_TESTS=OFF\
-DLIBCUDACXX_TEST_STANDARD_VER=c++$COMPILER_CXX_DIALECT\
2>&1 | tee /sw/gpgpu/libcudacxx/build/cmake_libcudacxx.log

# Build tests if requested.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx\
&& LIBCUDACXX_COMPUTE_ARCHS=$LIBCUDACXX_COMPUTE_ARCHS\
LIBCUDACXX_SKIP_BASE_TESTS_BUILD=$LIBCUDACXX_SKIP_BASE_TESTS_BUILD\
/sw/gpgpu/libcudacxx/utils/nvidia/linux/perform_tests.bash\
--skip-tests-runs\
2>&1 | tee /sw/gpgpu/libcudacxx/build/build_lit_all.log

# Build tests for sm6x if requested.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx\
&& LIBCUDACXX_COMPUTE_ARCHS="60 61 62"\
LIBCUDACXX_SKIP_BASE_TESTS_BUILD=$LIBCUDACXX_SKIP_BASE_TESTS_BUILD\
/sw/gpgpu/libcudacxx/utils/nvidia/linux/perform_tests.bash\
--skip-tests-runs\
--skip-libcxx-tests\
2>&1 | tee /sw/gpgpu/libcudacxx/build/build_lit_sm6x.log

# Build tests for sm7x if requested.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx\
&& LIBCUDACXX_COMPUTE_ARCHS="70 72 75"\
LIBCUDACXX_SKIP_BASE_TESTS_BUILD=$LIBCUDACXX_SKIP_BASE_TESTS_BUILD\
/sw/gpgpu/libcudacxx/utils/nvidia/linux/perform_tests.bash\
--skip-tests-runs\
--skip-libcxx-tests\
2>&1 | tee /sw/gpgpu/libcudacxx/build/build_lit_sm7x.log

# Build tests for sm8x if requested.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx\
&& LIBCUDACXX_COMPUTE_ARCHS="80"\
LIBCUDACXX_SKIP_BASE_TESTS_BUILD=$LIBCUDACXX_SKIP_BASE_TESTS_BUILD\
/sw/gpgpu/libcudacxx/utils/nvidia/linux/perform_tests.bash\
--skip-tests-runs\
--skip-libcxx-tests\
2>&1 | tee /sw/gpgpu/libcudacxx/build/build_lit_sm8x.log

# Package the logs up, because `docker container cp` doesn't support wildcards,
# and I want to limit the number of places we have to make changes when we ship
# new architectures.
RUN cd /sw/gpgpu/libcudacxx/build && tar -cvf logs.tar *.log

WORKDIR /sw/gpgpu/libcudacxx

Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Dockerfile for libcudacxx:host_x86_64_ubuntu_20.04__target_x86_64_ubuntu_20.04__clang_11_cxx_11

FROM libcudacxx_base:host_x86_64_ubuntu_20.04__target_x86_64_ubuntu_20.04__clang_11_cxx_11

MAINTAINER Bryce Adelstein Lelbach <[email protected]>

###############################################################################
# BUILD: The following is invoked when the CUDA driver is attached.

# We use ADD here because it invalidates the cache for subsequent steps, which
# is what we want, as we need to rebuild if the sources have changed.

ADD nvidia-modprobe /usr/bin/
# Restore setuid.
RUN chmod +s /usr/bin/nvidia-modprobe

ADD libcuda.so* /usr/lib/x86_64-linux-gnu/
ADD libnvidia-ptxjitcompiler.so* /usr/lib/x86_64-linux-gnu/

###############################################################################
# CMD: The following is invoked when the image is run.

RUN useradd -ms /bin/bash libcudacxx && chown -R libcudacxx:libcudacxx /sw
USER libcudacxx

WORKDIR /sw/gpgpu/libcudacxx

CMD /sw/gpgpu/libcudacxx/utils/nvidia/linux/perform_tests.bash

Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
#! /bin/bash

HOST_ARCH=x86_64
HOST_OS_KIND=ubuntu
HOST_OS_VERSION=20.04
TARGET_ARCH=x86_64
TARGET_OS_KIND=ubuntu
TARGET_OS_VERSION=20.04
COMPILER_KIND=clang
COMPILER_VERSION=11
COMPILER_CXX_DIALECT=11

Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
# Dockerfile for libcudacxx_base:host_x86_64_ubuntu_20.04__target_x86_64_ubuntu_20.04__gcc_9_cxx_14

FROM ubuntu:20.04

MAINTAINER Bryce Adelstein Lelbach <[email protected]>

ARG LIBCUDACXX_SKIP_BASE_TESTS_BUILD
ARG LIBCUDACXX_COMPUTE_ARCHS
ARG COMPILER_CXX_DIALECT

ENV COMPILER_CXX_DIALECT=$COMPILER_CXX_DIALECT

###############################################################################
# BUILD: The following is invoked when the image is built.

SHELL ["/usr/bin/env", "bash", "-c"]

RUN apt-get -y update\
&& DEBIAN_FRONTEND="noninteractive" TZ="US/Pacific"\
apt-get -y install clang-11 python3-pip \
&& python3 -m pip install lit\
&& mkdir -p /sw/gpgpu/libcudacxx/build\
&& mkdir -p /sw/gpgpu/libcudacxx/libcxx/build

# Update CMake to 3.18
ADD https://github.com/Kitware/CMake/releases/download/v3.18.5/cmake-3.18.5-Linux-x86_64.sh /tmp/cmake.sh
RUN sh /tmp/cmake.sh --skip-license --prefix=/usr

# For debugging\.
#RUN apt-get -y install gdb strace vim

# We use ADD here because it invalidates the cache for subsequent steps, which
# is what we want, as we need to rebuild if the sources have changed.

# Copy NVCC and the CUDA runtime from the source tree.
ADD bin /sw/gpgpu/bin

# Copy the core CUDA headers from the source tree.
ADD cuda/import/*.h* /sw/gpgpu/cuda/import/
ADD cuda/common/*.h* /sw/gpgpu/cuda/common/
ADD cuda/tools/ /sw/gpgpu/cuda/tools/
ADD opencl/import/cl_rel/CL/*.h* /sw/gpgpu/opencl/import/cl_rel/CL/

# Copy libcu++ sources from the source tree.
ADD libcudacxx /sw/gpgpu/libcudacxx

# List out everything in /sw before the build.
RUN echo "Contents of /sw:" && cd /sw/ && find

# Build libc++ and configure libc++ tests.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx/libcxx/build\
&& cmake ../..\
-DLIBCUDACXX_ENABLE_STATIC_LIBRARY=OFF\
-DLIBCUDACXX_ENABLE_LIBCUDACXX_TESTS=OFF\
-DLIBCUDACXX_ENABLE_LIBCXX_TESTS=ON\
-DLIBCXX_TEST_STANDARD_VER=c++$COMPILER_CXX_DIALECT\
-DCMAKE_CXX_COMPILER=clang-11\
-DCMAKE_CUDA_COMPILER=/sw/gpgpu/bin/x86_64_Linux_release/nvcc\
&& make -j\
2>&1 | tee /sw/gpgpu/libcudacxx/build/cmake_libcxx.log

# Configure libcu++ tests.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx/build\
&& cmake ..\
-DCMAKE_CXX_COMPILER=clang-11\
-DCMAKE_CUDA_COMPILER=/sw/gpgpu/bin/x86_64_Linux_release/nvcc\
-DLIBCUDACXX_ENABLE_LIBCUDACXX_TESTS=ON\
-DLIBCUDACXX_ENABLE_LIBCXX_TESTS=OFF\
-DLIBCUDACXX_TEST_STANDARD_VER=c++$COMPILER_CXX_DIALECT\
2>&1 | tee /sw/gpgpu/libcudacxx/build/cmake_libcudacxx.log

# Build tests if requested.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx\
&& LIBCUDACXX_COMPUTE_ARCHS=$LIBCUDACXX_COMPUTE_ARCHS\
LIBCUDACXX_SKIP_BASE_TESTS_BUILD=$LIBCUDACXX_SKIP_BASE_TESTS_BUILD\
/sw/gpgpu/libcudacxx/utils/nvidia/linux/perform_tests.bash\
--skip-tests-runs\
2>&1 | tee /sw/gpgpu/libcudacxx/build/build_lit_all.log

# Build tests for sm6x if requested.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx\
&& LIBCUDACXX_COMPUTE_ARCHS="60 61 62"\
LIBCUDACXX_SKIP_BASE_TESTS_BUILD=$LIBCUDACXX_SKIP_BASE_TESTS_BUILD\
/sw/gpgpu/libcudacxx/utils/nvidia/linux/perform_tests.bash\
--skip-tests-runs\
--skip-libcxx-tests\
2>&1 | tee /sw/gpgpu/libcudacxx/build/build_lit_sm6x.log

# Build tests for sm7x if requested.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx\
&& LIBCUDACXX_COMPUTE_ARCHS="70 72 75"\
LIBCUDACXX_SKIP_BASE_TESTS_BUILD=$LIBCUDACXX_SKIP_BASE_TESTS_BUILD\
/sw/gpgpu/libcudacxx/utils/nvidia/linux/perform_tests.bash\
--skip-tests-runs\
--skip-libcxx-tests\
2>&1 | tee /sw/gpgpu/libcudacxx/build/build_lit_sm7x.log

# Build tests for sm8x if requested.
RUN set -o pipefail; cd /sw/gpgpu/libcudacxx\
&& LIBCUDACXX_COMPUTE_ARCHS="80"\
LIBCUDACXX_SKIP_BASE_TESTS_BUILD=$LIBCUDACXX_SKIP_BASE_TESTS_BUILD\
/sw/gpgpu/libcudacxx/utils/nvidia/linux/perform_tests.bash\
--skip-tests-runs\
--skip-libcxx-tests\
2>&1 | tee /sw/gpgpu/libcudacxx/build/build_lit_sm8x.log

# Package the logs up, because `docker container cp` doesn't support wildcards,
# and I want to limit the number of places we have to make changes when we ship
# new architectures.
RUN cd /sw/gpgpu/libcudacxx/build && tar -cvf logs.tar *.log

WORKDIR /sw/gpgpu/libcudacxx

Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Dockerfile for libcudacxx:host_x86_64_ubuntu_20.04__target_x86_64_ubuntu_20.04__clang_11_cxx_14

FROM libcudacxx_base:host_x86_64_ubuntu_20.04__target_x86_64_ubuntu_20.04__clang_11_cxx_14

MAINTAINER Bryce Adelstein Lelbach <[email protected]>

###############################################################################
# BUILD: The following is invoked when the CUDA driver is attached.

# We use ADD here because it invalidates the cache for subsequent steps, which
# is what we want, as we need to rebuild if the sources have changed.

ADD nvidia-modprobe /usr/bin/
# Restore setuid.
RUN chmod +s /usr/bin/nvidia-modprobe

ADD libcuda.so* /usr/lib/x86_64-linux-gnu/
ADD libnvidia-ptxjitcompiler.so* /usr/lib/x86_64-linux-gnu/

###############################################################################
# CMD: The following is invoked when the image is run.

RUN useradd -ms /bin/bash libcudacxx && chown -R libcudacxx:libcudacxx /sw
USER libcudacxx

WORKDIR /sw/gpgpu/libcudacxx

CMD /sw/gpgpu/libcudacxx/utils/nvidia/linux/perform_tests.bash

Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
#! /bin/bash

HOST_ARCH=x86_64
HOST_OS_KIND=ubuntu
HOST_OS_VERSION=20.04
TARGET_ARCH=x86_64
TARGET_OS_KIND=ubuntu
TARGET_OS_VERSION=20.04
COMPILER_KIND=clang
COMPILER_VERSION=11
COMPILER_CXX_DIALECT=14

Loading