diff --git a/.github/workflows/scripts/unix_test.sh b/.github/workflows/scripts/unix_test.sh index 93f5f4edbe2987..75995a8b43c249 100755 --- a/.github/workflows/scripts/unix_test.sh +++ b/.github/workflows/scripts/unix_test.sh @@ -22,11 +22,17 @@ python3 -m pip install dist/*.whl if [ -z "$GPU_TEST" ]; then python3 -m pip install -r requirements_test.txt python3 -m pip install "torch; python_version < '3.10'" + if [[ $PLATFORM == *"linux"* ]]; then + python3 -m pip install "paddlepaddle==0.0.0; python_version < '3.10'" -f https://www.paddlepaddle.org.cn/whl/linux/cpu-mkl/develop.html + fi else ## Only GPU machine uses system python. export PATH=$PATH:$HOME/.local/bin # pip will skip packages if already installed python3 -m pip install -r requirements_test.txt + if [[ $PLATFORM == *"linux"* ]]; then + python3 -m pip install "paddlepaddle-gpu==0.0.0.post112; python_version < '3.10'" -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html + fi fi ti diagnose ti changelog @@ -38,27 +44,35 @@ TI_LIB_DIR="$TI_PATH/_lib/runtime" ./build/taichi_cpp_tests if [ -z "$GPU_TEST" ]; then if [[ $PLATFORM == *"m1"* ]]; then # Split per arch to avoid flaky test - python3 tests/run_tests.py -vr2 -t4 -k "not torch" -a cpu + python3 tests/run_tests.py -vr2 -t4 -k "not torch and not paddle" -a cpu # Run metal and vulkan separately so that they don't use M1 chip simultaneously. - python3 tests/run_tests.py -vr2 -t4 -k "not torch" -a vulkan - python3 tests/run_tests.py -vr2 -t2 -k "not torch" -a metal + python3 tests/run_tests.py -vr2 -t4 -k "not torch and not paddle" -a vulkan + python3 tests/run_tests.py -vr2 -t2 -k "not torch and not paddle" -a metal python3 tests/run_tests.py -vr2 -t1 -k "torch" -a "$TI_WANTED_ARCHS" + # Paddle's paddle.fluid.core.Tensor._ptr() is only available on develop branch + # Not support Apple M1 and no GPU version on MAC else - python3 tests/run_tests.py -vr2 -t4 -a "$TI_WANTED_ARCHS" + python3 tests/run_tests.py -vr2 -t4 -k "not paddle" -a "$TI_WANTED_ARCHS" + if [[ $PLATFORM == *"linux"* ]]; then + python3 tests/run_tests.py -vr2 -t4 -k "paddle" -a "$TI_WANTED_ARCHS" + fi fi else # Split per arch to increase parallelism for linux GPU tests if [[ $TI_WANTED_ARCHS == *"cuda"* ]]; then - python3 tests/run_tests.py -vr2 -t4 -k "not torch" -a cuda + python3 tests/run_tests.py -vr2 -t4 -k "not torch and not paddle" -a cuda fi if [[ $TI_WANTED_ARCHS == *"cpu"* ]]; then - python3 tests/run_tests.py -vr2 -t8 -k "not torch" -a cpu + python3 tests/run_tests.py -vr2 -t8 -k "not torch and not paddle" -a cpu fi if [[ $TI_WANTED_ARCHS == *"vulkan"* ]]; then - python3 tests/run_tests.py -vr2 -t8 -k "not torch" -a vulkan + python3 tests/run_tests.py -vr2 -t8 -k "not torch and not paddle" -a vulkan fi if [[ $TI_WANTED_ARCHS == *"opengl"* ]]; then - python3 tests/run_tests.py -vr2 -t4 -k "not torch" -a opengl + python3 tests/run_tests.py -vr2 -t4 -k "not torch and not paddle" -a opengl fi python3 tests/run_tests.py -vr2 -t1 -k "torch" -a "$TI_WANTED_ARCHS" + if [[ $PLATFORM == *"linux"* ]]; then + python3 tests/run_tests.py -vr2 -t1 -k "paddle" -a "$TI_WANTED_ARCHS" + fi fi diff --git a/.github/workflows/scripts/win_test.ps1 b/.github/workflows/scripts/win_test.ps1 index 40ab79826257d8..b173560e5b1aa1 100644 --- a/.github/workflows/scripts/win_test.ps1 +++ b/.github/workflows/scripts/win_test.ps1 @@ -9,20 +9,23 @@ pip install -r requirements_test.txt # TODO relax this when torch supports 3.10 if ("$env:TI_WANTED_ARCHS".Contains("cuda")) { pip install "torch==1.10.1+cu113; python_version < '3.10'" -f https://download.pytorch.org/whl/cu113/torch_stable.html + pip install paddlepaddle-gpu==0.0.0.post112 -f https://www.paddlepaddle.org.cn/whl/windows/gpu/develop.html } else { pip install "torch; python_version < '3.10'" + pip install paddlepaddle==0.0.0 -f https://www.paddlepaddle.org.cn/whl/windows/cpu-mkl-avx/develop.html } if ("$env:TI_WANTED_ARCHS".Contains("cuda")) { - python tests/run_tests.py -vr2 -t4 -k "not torch" -a cuda + python tests/run_tests.py -vr2 -t4 -k "not torch and not paddle" -a cuda if (-not $?) { exit 1 } } if ("$env:TI_WANTED_ARCHS".Contains("cpu")) { - python tests/run_tests.py -vr2 -t6 -k "not torch" -a cpu + python tests/run_tests.py -vr2 -t6 -k "not torch and not paddle" -a cpu if (-not $?) { exit 1 } } if ("$env:TI_WANTED_ARCHS".Contains("opengl")) { - python tests/run_tests.py -vr2 -t4 -k "not torch" -a opengl + python tests/run_tests.py -vr2 -t4 -k "not torch and not paddle" -a opengl if (-not $?) { exit 1 } } python tests/run_tests.py -vr2 -t2 -k "torch" -a "$env:TI_WANTED_ARCHS" +python tests/run_tests.py -vr2 -t2 -k "paddle" -a "$env:TI_WANTED_ARCHS" if (-not $?) { exit 1 } diff --git a/ci/scripts/ubuntu_build_test.sh b/ci/scripts/ubuntu_build_test.sh index ed5acc79c99a88..0d04e958237d4b 100755 --- a/ci/scripts/ubuntu_build_test.sh +++ b/ci/scripts/ubuntu_build_test.sh @@ -24,6 +24,8 @@ git checkout $SHA python3 -m pip install -r requirements_dev.txt -i http://repo.taichigraphics.com/repository/pypi/simple --trusted-host repo.taichigraphics.com # Update Torch version, otherwise cuda tests fail. See #2969. python3 -m pip install torch==1.9.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html -i http://repo.taichigraphics.com/repository/pypi/simple --trusted-host repo.taichigraphics.com +# Paddle's paddle.fluid.core.Tensor._ptr() is only available on PaddlePaddle's develop branch +python3 -m pip install paddlepaddle-gpu==0.0.0.post112 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html TAICHI_CMAKE_ARGS="-DTI_WITH_VULKAN:BOOL=ON -DTI_WITH_CUDA:BOOL=ON -DTI_WITH_OPENGL:BOOL=ON" python3 setup.py install # Add Docker specific ENV @@ -31,5 +33,5 @@ export TI_IN_DOCKER=true # Run tests ti diagnose -python tests/run_tests.py -vr2 -t2 -k "not ndarray and not torch" -python tests/run_tests.py -vr2 -t1 -k "ndarray or torch" +python tests/run_tests.py -vr2 -t2 -k "not ndarray and not torch and not paddle" +python tests/run_tests.py -vr2 -t1 -k "ndarray or torch or paddle" diff --git a/ci/scripts/ubuntu_build_test_cpu.sh b/ci/scripts/ubuntu_build_test_cpu.sh index feba31b80e874f..b675dbb13799fb 100755 --- a/ci/scripts/ubuntu_build_test_cpu.sh +++ b/ci/scripts/ubuntu_build_test_cpu.sh @@ -22,6 +22,8 @@ git clone --recursive https://github.com/taichi-dev/taichi --branch=master cd taichi git checkout $SHA python3 -m pip install -r requirements_dev.txt -i http://repo.taichigraphics.com/repository/pypi/simple --trusted-host repo.taichigraphics.com +# Paddle's paddle.fluid.core.Tensor._ptr() is only available on PaddlePaddle's develop branch +python3 -m pip install paddlepaddle==0.0.0 -f https://www.paddlepaddle.org.cn/whl/linux/cpu-mkl/develop.html TAICHI_CMAKE_ARGS="-DTI_WITH_VULKAN:BOOL=OFF -DTI_WITH_CUDA:BOOL=OFF -DTI_WITH_OPENGL:BOOL=OFF" python3 setup.py install # Add Docker specific ENV @@ -29,5 +31,5 @@ export TI_IN_DOCKER=true # Run tests ti diagnose -python tests/run_tests.py -vr2 -t2 -k "not ndarray and not torch" -python tests/run_tests.py -vr2 -t1 -k "ndarray or torch" +python tests/run_tests.py -vr2 -t2 -k "not ndarray and not torch and not paddle" +python tests/run_tests.py -vr2 -t1 -k "ndarray or torch or paddle" diff --git a/ci/windows/win_build_test.ps1 b/ci/windows/win_build_test.ps1 index eea924a966dce2..7d374b45510ea5 100644 --- a/ci/windows/win_build_test.ps1 +++ b/ci/windows/win_build_test.ps1 @@ -59,5 +59,5 @@ python setup.py develop WriteInfo("Build finished") WriteInfo("Testing Taichi") -python tests/run_tests.py -vr2 -t2 -k "not torch" -a cpu +python tests/run_tests.py -vr2 -t2 -k "not torch and not paddle" -a cpu WriteInfo("Test finished") diff --git a/python/taichi/lang/field.py b/python/taichi/lang/field.py index 466f9c70bc81fd..449cf4a6642699 100644 --- a/python/taichi/lang/field.py +++ b/python/taichi/lang/field.py @@ -298,9 +298,8 @@ def to_paddle(self, device=None): import paddle # pylint: disable=C0415 # pylint: disable=E1101 - arr = paddle.zeros(size=self.shape, - dtype=to_paddle_type(self.dtype), - device=device) + arr = paddle.zeros(shape=self.shape, + dtype=to_paddle_type(self.dtype)) from taichi._kernels import tensor_to_ext_arr # pylint: disable=C0415 tensor_to_ext_arr(self, arr) taichi.lang.runtime_ops.sync() diff --git a/python/taichi/lang/kernel_impl.py b/python/taichi/lang/kernel_impl.py index b42c6206633aff..0b820c11eac04a 100644 --- a/python/taichi/lang/kernel_impl.py +++ b/python/taichi/lang/kernel_impl.py @@ -676,7 +676,7 @@ def func__(*args): actual_argument_slot, int(tmp.data_ptr()), tmp.element_size() * tmp.nelement(), v.shape) else: - # For now, paddle.fluid.core.Tensor._ptr() is only available on PaddlePaddle's develop branch + # For now, paddle.fluid.core.Tensor._ptr() is only available on develop branch tmp, paddle_callbacks = self.get_paddle_callbacks( v, has_pp) callbacks += paddle_callbacks diff --git a/tests/python/test_get_external_tensor_shape.py b/tests/python/test_get_external_tensor_shape.py index a4f90c48e22ba7..c16b6d2e5d8eaa 100644 --- a/tests/python/test_get_external_tensor_shape.py +++ b/tests/python/test_get_external_tensor_shape.py @@ -83,7 +83,7 @@ def test_get_external_tensor_shape_access_paddle(size): def func(x: ti.types.ndarray(), index: ti.template()) -> ti.i32: return x.shape[index] - x_hat = paddle.ones(size, dtype=paddle.int32, device=paddle.CPUPlace()) + x_hat = paddle.ones([size], dtype=paddle.int32) for idx, y_ref in enumerate(size): y_hat = func(x_hat, idx) assert y_ref == y_hat, "Size of axis {} should equal {} and not {}.".format(