From a29e74ae0a9709b8ef639e1112318726ce0ee982 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cem=20G=C3=B6kmen?= <1408354+cgokmen@users.noreply.github.com> Date: Wed, 20 Nov 2024 14:37:24 -0800 Subject: [PATCH 1/5] New pip install & dockerfile setup with torch, cuda, curobo and ompl --- .github/workflows/examples-as-test.yml | 4 +- .github/workflows/profiling.yml | 2 +- .github/workflows/tests.yml | 2 +- docker/prod.Dockerfile | 53 +++++++++++++------------- setup.py | 6 ++- 5 files changed, 36 insertions(+), 31 deletions(-) diff --git a/.github/workflows/examples-as-test.yml b/.github/workflows/examples-as-test.yml index 7d595ae85..39d9cb51c 100644 --- a/.github/workflows/examples-as-test.yml +++ b/.github/workflows/examples-as-test.yml @@ -28,7 +28,7 @@ jobs: - name: Install working-directory: omnigibson-src - run: pip install -e .[dev] + run: pip install -e .[dev,primitives] - name: Generate example tests working-directory: omnigibson-src @@ -76,7 +76,7 @@ jobs: - name: Install working-directory: omnigibson-src - run: pip install -e .[dev] + run: pip install -e .[dev,primitives] - name: Run tests working-directory: omnigibson-src diff --git a/.github/workflows/profiling.yml b/.github/workflows/profiling.yml index a6ccba29f..d9a687c9c 100644 --- a/.github/workflows/profiling.yml +++ b/.github/workflows/profiling.yml @@ -34,7 +34,7 @@ jobs: - name: Install working-directory: omnigibson-src - run: pip install -e .[dev] + run: pip install -e .[dev,primitives] - name: Run performance benchmark run: bash scripts/profiling.sh diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2c1f8eef0..ff1508e7f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -51,7 +51,7 @@ jobs: - name: Install working-directory: omnigibson-src - run: pip install -e .[dev] + run: pip install -e .[dev,primitives] - name: Print env run: printenv diff --git a/docker/prod.Dockerfile b/docker/prod.Dockerfile index e30aaf5ee..1b6db8c87 100644 --- a/docker/prod.Dockerfile +++ b/docker/prod.Dockerfile @@ -23,6 +23,32 @@ ENV MAMBA_ROOT_PREFIX /micromamba RUN micromamba create -n omnigibson -c conda-forge python=3.10 RUN micromamba shell init --shell=bash +# Install evdev, which is a dependency of telemoma. It cannot be +# installed afterwards because it depends on some C compilation that +# fails if kernel headers ("sysroot") is present inside the conda env. +# The CUDA installation will add the kernel headers to the conda env. +# So we install evdev before installing CUDA. +RUN micromamba run -n omnigibson pip install evdev + +# Remove the ml-archive extension. It includes a version of torch +# that we don't want to use. +RUN rm -rf /isaac-sim/exts/omni.isaac.ml_archive + +# Install CUDA and torch +RUN micromamba run -n omnigibson micromamba install \ + pytorch torchvision pytorch-cuda=12.1 cuda=12.1.0 \ + -c pytorch -c nvidia -c conda-forge + +# Install curobo. This can normally be installed when OmniGibson is pip +# installed, but we need to install it beforehand here so that it doesn't +# have to happen on every time a CI action is run (otherwise it's just +# very slow) +# Here we also compile this such that it is compatible with GPU architectures +# Turing, Ampere, and Ada; which correspond to 20, 30, and 40 series GPUs. +# TORCH_CUDA_ARCH_LIST='7.5;8.0;8.6;8.7;8.9;7.5+PTX;8.0+PTX;8.6+PTX;8.7+PTX;8.9+PTX' +RUN TORCH_CUDA_ARCH_LIST='7.5+PTX' \ + micromamba run -n omnigibson pip install git+https://github.com/StanfordVL/curobo@06d8c79b660db60c2881e9319e60899cbde5c5b5#egg=nvidia_curobo --no-build-isolation + # Make sure isaac gets properly sourced every time omnigibson gets called ARG CONDA_ACT_FILE="/micromamba/envs/omnigibson/etc/conda/activate.d/env_vars.sh" RUN mkdir -p "/micromamba/envs/omnigibson/etc/conda/activate.d" @@ -33,31 +59,6 @@ RUN echo "source /isaac-sim/setup_conda_env.sh" >> $CONDA_ACT_FILE RUN echo "micromamba activate omnigibson" >> /root/.bashrc -# Prepare to build OMPL -ENV CXX="g++" -ENV MAKEFLAGS="-j `nproc`" -RUN micromamba run -n omnigibson micromamba install -c conda-forge boost && \ - micromamba run -n omnigibson pip install pyplusplus && \ - git clone https://github.com/ompl/ompl.git /ompl && \ - mkdir -p /ompl/build/Release && \ - sed -i "s/find_program(PYPY/# find_program(PYPY/g" /ompl/CMakeModules/Findpypy.cmake - -# Build and install OMPL -RUN micromamba run -n omnigibson /bin/bash --login -c 'source /isaac-sim/setup_conda_env.sh && (which python > /root/PYTHON_EXEC) && (echo $PYTHONPATH > /root/PYTHONPATH)' && \ - cd /ompl/build/Release && \ - micromamba run -n omnigibson cmake ../.. \ - -DCMAKE_INSTALL_PREFIX="$CONDA_PREFIX" \ - -DBOOST_ROOT="$CONDA_PREFIX" \ - -DPYTHON_EXEC=$(cat /root/PYTHON_EXEC) \ - -DPYTHONPATH=$(cat /root/PYTHONPATH) && \ - micromamba run -n omnigibson make -j 4 update_bindings && \ - micromamba run -n omnigibson make -j 4 && \ - cd py-bindings && \ - micromamba run -n omnigibson make install - -# Test OMPL -RUN micromamba run -n omnigibson python -c "from ompl import base" - # Copy over omnigibson source ADD . /omnigibson-src WORKDIR /omnigibson-src @@ -72,7 +73,7 @@ ENV DEV_MODE=${DEV_MODE} ARG WORKDIR_PATH=/omnigibson-src RUN if [ "$DEV_MODE" != "1" ]; then \ echo "OMNIGIBSON_NO_OMNIVERSE=1 python omnigibson/download_datasets.py" >> /root/.bashrc; \ - micromamba run -n omnigibson pip install -e .[dev]; \ + micromamba run -n omnigibson pip install -e .[dev,primitives]; \ else \ WORKDIR_PATH=/; \ cd / && rm -rf /omnigibson-src; \ diff --git a/setup.py b/setup.py index 9d7f8d331..45a13272b 100644 --- a/setup.py +++ b/setup.py @@ -62,7 +62,11 @@ "mkdocs-section-index", "mkdocs-literate-nav", "telemoma~=0.1.2", - ] + ], + "primitives": [ + "nvidia-curobo @ git+https://github.com/StanfordVL/curobo@06d8c79b660db60c2881e9319e60899cbde5c5b5", + "ompl @ https://storage.googleapis.com/gibson_scenes/ompl-1.6.0-cp310-cp310-manylinux_2_28_x86_64.whl", + ], }, tests_require=[], python_requires=">=3", From 1f6d74bc7743185dc27fd788e5e3d9879d41c4b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cem=20G=C3=B6kmen?= <1408354+cgokmen@users.noreply.github.com> Date: Wed, 20 Nov 2024 14:57:49 -0800 Subject: [PATCH 2/5] Be more conservative when removing prebundled torch --- docker/prod.Dockerfile | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docker/prod.Dockerfile b/docker/prod.Dockerfile index 1b6db8c87..85776a7df 100644 --- a/docker/prod.Dockerfile +++ b/docker/prod.Dockerfile @@ -7,6 +7,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ && rm -rf /var/lib/apt/lists/* RUN rm -rf /isaac-sim/exts/omni.isaac.ml_archive/pip_prebundle/gym* +RUN rm -rf /isaac-sim/exts/omni.isaac.ml_archive/pip_prebundle/torch* RUN rm -rf /isaac-sim/kit/extscore/omni.kit.pip_archive/pip_prebundle/numpy* RUN /isaac-sim/python.sh -m pip install click~=8.1.3 @@ -30,10 +31,6 @@ RUN micromamba shell init --shell=bash # So we install evdev before installing CUDA. RUN micromamba run -n omnigibson pip install evdev -# Remove the ml-archive extension. It includes a version of torch -# that we don't want to use. -RUN rm -rf /isaac-sim/exts/omni.isaac.ml_archive - # Install CUDA and torch RUN micromamba run -n omnigibson micromamba install \ pytorch torchvision pytorch-cuda=12.1 cuda=12.1.0 \ From 2eddd246b084a4ceca47ced6858438e226261a28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cem=20G=C3=B6kmen?= <1408354+cgokmen@users.noreply.github.com> Date: Wed, 20 Nov 2024 15:20:03 -0800 Subject: [PATCH 3/5] Lower our cuda version --- docker/prod.Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docker/prod.Dockerfile b/docker/prod.Dockerfile index 85776a7df..a549919c9 100644 --- a/docker/prod.Dockerfile +++ b/docker/prod.Dockerfile @@ -8,6 +8,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ RUN rm -rf /isaac-sim/exts/omni.isaac.ml_archive/pip_prebundle/gym* RUN rm -rf /isaac-sim/exts/omni.isaac.ml_archive/pip_prebundle/torch* +RUN rm -rf /isaac-sim/exts/omni.isaac.ml_archive/pip_prebundle/functorch* RUN rm -rf /isaac-sim/kit/extscore/omni.kit.pip_archive/pip_prebundle/numpy* RUN /isaac-sim/python.sh -m pip install click~=8.1.3 @@ -33,7 +34,7 @@ RUN micromamba run -n omnigibson pip install evdev # Install CUDA and torch RUN micromamba run -n omnigibson micromamba install \ - pytorch torchvision pytorch-cuda=12.1 cuda=12.1.0 \ + pytorch torchvision pytorch-cuda=11.8 cuda=11.8.0 \ -c pytorch -c nvidia -c conda-forge # Install curobo. This can normally be installed when OmniGibson is pip @@ -43,7 +44,7 @@ RUN micromamba run -n omnigibson micromamba install \ # Here we also compile this such that it is compatible with GPU architectures # Turing, Ampere, and Ada; which correspond to 20, 30, and 40 series GPUs. # TORCH_CUDA_ARCH_LIST='7.5;8.0;8.6;8.7;8.9;7.5+PTX;8.0+PTX;8.6+PTX;8.7+PTX;8.9+PTX' -RUN TORCH_CUDA_ARCH_LIST='7.5+PTX' \ +RUN TORCH_CUDA_ARCH_LIST='7.5,8.0;8.6;8.7;8.9+PTX' \ micromamba run -n omnigibson pip install git+https://github.com/StanfordVL/curobo@06d8c79b660db60c2881e9319e60899cbde5c5b5#egg=nvidia_curobo --no-build-isolation # Make sure isaac gets properly sourced every time omnigibson gets called From 7e91c3a16e0879c8446158fea1b64c3ccd42bb17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cem=20G=C3=B6kmen?= <1408354+cgokmen@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:35:25 -0800 Subject: [PATCH 4/5] Update prod.Dockerfile --- docker/prod.Dockerfile | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/docker/prod.Dockerfile b/docker/prod.Dockerfile index a549919c9..8bd49c78d 100644 --- a/docker/prod.Dockerfile +++ b/docker/prod.Dockerfile @@ -32,11 +32,17 @@ RUN micromamba shell init --shell=bash # So we install evdev before installing CUDA. RUN micromamba run -n omnigibson pip install evdev -# Install CUDA and torch +# Install torch RUN micromamba run -n omnigibson micromamba install \ - pytorch torchvision pytorch-cuda=11.8 cuda=11.8.0 \ + pytorch torchvision pytorch-cuda=11.8 \ -c pytorch -c nvidia -c conda-forge +# Install cuda for compiling curobo +RUN wget -O /cuda.run https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run && \ + sh /cuda.run --silent --toolkit && rm /cuda.run +ENV PATH=/usr/local/cuda-11.8/bin:$PATH +ENV LD_LIBRARY_PATH=/usr/local/cuda-11.8/lib64:$LD_LIBRARY_PATH + # Install curobo. This can normally be installed when OmniGibson is pip # installed, but we need to install it beforehand here so that it doesn't # have to happen on every time a CI action is run (otherwise it's just @@ -44,7 +50,7 @@ RUN micromamba run -n omnigibson micromamba install \ # Here we also compile this such that it is compatible with GPU architectures # Turing, Ampere, and Ada; which correspond to 20, 30, and 40 series GPUs. # TORCH_CUDA_ARCH_LIST='7.5;8.0;8.6;8.7;8.9;7.5+PTX;8.0+PTX;8.6+PTX;8.7+PTX;8.9+PTX' -RUN TORCH_CUDA_ARCH_LIST='7.5,8.0;8.6;8.7;8.9+PTX' \ +RUN TORCH_CUDA_ARCH_LIST='7.5;8.0;8.6+PTX' \ micromamba run -n omnigibson pip install git+https://github.com/StanfordVL/curobo@06d8c79b660db60c2881e9319e60899cbde5c5b5#egg=nvidia_curobo --no-build-isolation # Make sure isaac gets properly sourced every time omnigibson gets called From 457f678697df3367c4d53a85718afec821c1028f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cem=20G=C3=B6kmen?= <1408354+cgokmen@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:41:23 -0800 Subject: [PATCH 5/5] Update prod.Dockerfile --- docker/prod.Dockerfile | 8 -------- 1 file changed, 8 deletions(-) diff --git a/docker/prod.Dockerfile b/docker/prod.Dockerfile index 8bd49c78d..0b9a99fc8 100644 --- a/docker/prod.Dockerfile +++ b/docker/prod.Dockerfile @@ -25,13 +25,6 @@ ENV MAMBA_ROOT_PREFIX /micromamba RUN micromamba create -n omnigibson -c conda-forge python=3.10 RUN micromamba shell init --shell=bash -# Install evdev, which is a dependency of telemoma. It cannot be -# installed afterwards because it depends on some C compilation that -# fails if kernel headers ("sysroot") is present inside the conda env. -# The CUDA installation will add the kernel headers to the conda env. -# So we install evdev before installing CUDA. -RUN micromamba run -n omnigibson pip install evdev - # Install torch RUN micromamba run -n omnigibson micromamba install \ pytorch torchvision pytorch-cuda=11.8 \ @@ -49,7 +42,6 @@ ENV LD_LIBRARY_PATH=/usr/local/cuda-11.8/lib64:$LD_LIBRARY_PATH # very slow) # Here we also compile this such that it is compatible with GPU architectures # Turing, Ampere, and Ada; which correspond to 20, 30, and 40 series GPUs. -# TORCH_CUDA_ARCH_LIST='7.5;8.0;8.6;8.7;8.9;7.5+PTX;8.0+PTX;8.6+PTX;8.7+PTX;8.9+PTX' RUN TORCH_CUDA_ARCH_LIST='7.5;8.0;8.6+PTX' \ micromamba run -n omnigibson pip install git+https://github.com/StanfordVL/curobo@06d8c79b660db60c2881e9319e60899cbde5c5b5#egg=nvidia_curobo --no-build-isolation