-
Notifications
You must be signed in to change notification settings - Fork 58
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
7 changed files
with
172 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
ARG SAGEMAKER_DISTRIBUTION_IMAGE | ||
FROM $SAGEMAKER_DISTRIBUTION_IMAGE | ||
|
||
ARG MAMBA_DOCKERFILE_ACTIVATE=1 | ||
|
||
# Execute cuda valudaiton script: | ||
# 1. Check if TensorFlow is installed with CUDA support for GPU image | ||
# 2. Check if Pytorch is installed with CUDA support for GPU image | ||
COPY --chown=$MAMBA_USER:$MAMBA_USER scripts/cuda_validation.py . | ||
RUN chmod +x cuda_validation.py | ||
RUN python3 cuda_validation.py |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
# Verify Tensorflow CUDA | ||
import tensorflow as tf | ||
|
||
cuda_available = tf.test.is_built_with_cuda() | ||
if not cuda_available: | ||
raise Exception("TensorFlow is installed without CUDA support for GPU image build.") | ||
print("TensorFlow is built with CUDA support.") | ||
|
||
|
||
# Verify Pytorch is installed with CUDA version | ||
import subprocess | ||
|
||
# Run the micromamba list command and capture the output | ||
result = subprocess.run(["micromamba", "list"], stdout=subprocess.PIPE, text=True) | ||
|
||
# Split the output into lines | ||
package_lines = result.stdout.strip().split("\n") | ||
|
||
# Find the PyTorch entry | ||
pytorch_entry = None | ||
for line in package_lines: | ||
dependency_info = line.strip().split() | ||
if dependency_info and dependency_info[0] == "pytorch": | ||
pytorch_entry = line.split() | ||
break | ||
|
||
# If PyTorch is installed, print its information | ||
if pytorch_entry: | ||
package_name = pytorch_entry[0] | ||
package_version = pytorch_entry[1] | ||
package_build = pytorch_entry[2] | ||
print(f"PyTorch: {package_name} {package_version} {package_build}") | ||
# Raise exception if CUDA is not detected | ||
if "cuda" not in package_build: | ||
raise Exception("Pytorch is installed without CUDA support for GPU image build.") | ||
|
||
# Verify Pytorch has CUDA working properly | ||
# Because this function only works on a GPU instance, so it may fail in local test | ||
# To test manually on a GPU instance, run: "docker run --gpus all <image id>" | ||
import torch | ||
|
||
if not torch.cuda.is_available(): | ||
raise Exception( | ||
"Pytorch is installed with CUDA support but not working in current environment. \ | ||
Make sure to execute this test case in GPU environment if you are not" | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
ARG SAGEMAKER_DISTRIBUTION_IMAGE | ||
FROM $SAGEMAKER_DISTRIBUTION_IMAGE | ||
|
||
ARG MAMBA_DOCKERFILE_ACTIVATE=1 | ||
|
||
# Execute cuda valudaiton script: | ||
# 1. Check if TensorFlow is installed with CUDA support for GPU image | ||
# 2. Check if Pytorch is installed with CUDA support for GPU image | ||
COPY --chown=$MAMBA_USER:$MAMBA_USER scripts/cuda_validation.py . | ||
RUN chmod +x cuda_validation.py | ||
RUN python3 cuda_validation.py |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
# Verify Tensorflow CUDA | ||
import tensorflow as tf | ||
|
||
cuda_available = tf.test.is_built_with_cuda() | ||
if not cuda_available: | ||
raise Exception("TensorFlow is installed without CUDA support for GPU image build.") | ||
print("TensorFlow is built with CUDA support.") | ||
|
||
|
||
# Verify Pytorch is installed with CUDA version | ||
import subprocess | ||
|
||
# Run the micromamba list command and capture the output | ||
result = subprocess.run(["micromamba", "list"], stdout=subprocess.PIPE, text=True) | ||
|
||
# Split the output into lines | ||
package_lines = result.stdout.strip().split("\n") | ||
|
||
# Find the PyTorch entry | ||
pytorch_entry = None | ||
for line in package_lines: | ||
dependency_info = line.strip().split() | ||
if dependency_info and dependency_info[0] == "pytorch": | ||
pytorch_entry = line.split() | ||
break | ||
|
||
# If PyTorch is installed, print its information | ||
if pytorch_entry: | ||
package_name = pytorch_entry[0] | ||
package_version = pytorch_entry[1] | ||
package_build = pytorch_entry[2] | ||
print(f"PyTorch: {package_name} {package_version} {package_build}") | ||
# Raise exception if CUDA is not detected | ||
if "cuda" not in package_build: | ||
raise Exception("Pytorch is installed without CUDA support for GPU image build.") | ||
|
||
# Verify Pytorch has CUDA working properly | ||
# Because this function only works on a GPU instance, so it may fail in local test | ||
# To test manually on a GPU instance, run: "docker run --gpus all <image id>" | ||
import torch | ||
|
||
if not torch.cuda.is_available(): | ||
raise Exception( | ||
"Pytorch is installed with CUDA support but not working in current environment. \ | ||
Make sure to execute this test case in GPU environment if you are not" | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
ARG SAGEMAKER_DISTRIBUTION_IMAGE | ||
FROM $SAGEMAKER_DISTRIBUTION_IMAGE | ||
|
||
ARG MAMBA_DOCKERFILE_ACTIVATE=1 | ||
|
||
# Execute cuda valudaiton script: | ||
# 1. Check if TensorFlow is installed with CUDA support for GPU image | ||
# 2. Check if Pytorch is installed with CUDA support for GPU image | ||
COPY --chown=$MAMBA_USER:$MAMBA_USER scripts/cuda_validation.py . | ||
RUN chmod +x cuda_validation.py | ||
RUN python3 cuda_validation.py |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
# Verify Tensorflow CUDA | ||
import tensorflow as tf | ||
|
||
cuda_available = tf.test.is_built_with_cuda() | ||
if not cuda_available: | ||
raise Exception("TensorFlow is installed without CUDA support for GPU image build.") | ||
print("TensorFlow is built with CUDA support.") | ||
|
||
|
||
# Verify Pytorch is installed with CUDA version | ||
import subprocess | ||
|
||
# Run the micromamba list command and capture the output | ||
result = subprocess.run(["micromamba", "list"], stdout=subprocess.PIPE, text=True) | ||
|
||
# Split the output into lines | ||
package_lines = result.stdout.strip().split("\n") | ||
|
||
# Find the PyTorch entry | ||
pytorch_entry = None | ||
for line in package_lines: | ||
dependency_info = line.strip().split() | ||
if dependency_info and dependency_info[0] == "pytorch": | ||
pytorch_entry = line.split() | ||
break | ||
|
||
# If PyTorch is installed, print its information | ||
if pytorch_entry: | ||
package_name = pytorch_entry[0] | ||
package_version = pytorch_entry[1] | ||
package_build = pytorch_entry[2] | ||
print(f"PyTorch: {package_name} {package_version} {package_build}") | ||
# Raise exception if CUDA is not detected | ||
if "cuda" not in package_build: | ||
raise Exception("Pytorch is installed without CUDA support for GPU image build.") | ||
|
||
# Verify Pytorch has CUDA working properly | ||
# Because this function only works on a GPU instance, so it may fail in local test | ||
# To test manually on a GPU instance, run: "docker run --gpus all <image id>" | ||
import torch | ||
|
||
if not torch.cuda.is_available(): | ||
raise Exception( | ||
"Pytorch is installed with CUDA support but not working in current environment. \ | ||
Make sure to execute this test case in GPU environment if you are not" | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters