Skip to content

Commit

Permalink
Support Scalar in Tensor Compute Library (#14)
Browse files Browse the repository at this point in the history
* fill_any_like kernel refactor

* remove useless code of full_like c++ api

* Support Scalar in Tensor Compute Library

* add scalar in dygraph and static graph mode

* keep the basic type for attr, instead of using scalar for all

* merge the code
  • Loading branch information
zyfncg authored Oct 14, 2021
1 parent e30ca2a commit 073aef3
Show file tree
Hide file tree
Showing 12 changed files with 235 additions and 44 deletions.
50 changes: 37 additions & 13 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1959,27 +1959,51 @@ pt::KernelContext OperatorWithKernel::ConstructPtKernelContext(
op_kernel_ctx.EmplaceBackOutputs(tmp_outputs);
}

for (size_t i = 0; i < attr_pairs.size(); ++i) {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
switch (attr_pairs[i].second) {
case framework::proto::AttrType::INT:
for (size_t i = 0; i < attr_defs.size(); ++i) {
paddle::any attr_item;
if (attr_defs[i].type_index == std::type_index(typeid(pt::Scalar))) {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
switch (attr_pairs[i].second) {
case framework::proto::AttrType::INT:
op_kernel_ctx.EmplaceBackAttr(
pt::Scalar(Attr<int>(attr_pairs[i].first)));
break;
case framework::proto::AttrType::FLOAT:
op_kernel_ctx.EmplaceBackAttr(
pt::Scalar(Attr<float>(attr_pairs[i].first)));
break;
case framework::proto::AttrType::BOOLEAN:
op_kernel_ctx.EmplaceBackAttr(
pt::Scalar(Attr<double>(attr_pairs[i].first)));
break;
default:
// TODO(chenweihang): support other attrs type
PADDLE_THROW(platform::errors::Unimplemented(
"unsupported cast op attribute `%s` when construct "
"KernelContext.",
attr_pairs[i].first));
}
} else {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
if (attr_defs[i].type_index == std::type_index(typeid(int))) {
op_kernel_ctx.EmplaceBackAttr(Attr<int>(attr_pairs[i].first));
break;
case framework::proto::AttrType::FLOAT:
} else if (attr_defs[i].type_index == std::type_index(typeid(float))) {
op_kernel_ctx.EmplaceBackAttr(Attr<float>(attr_pairs[i].first));
break;
case framework::proto::AttrType::BOOLEAN:
} else if (attr_defs[i].type_index == std::type_index(typeid(bool))) {
op_kernel_ctx.EmplaceBackAttr(Attr<bool>(attr_pairs[i].first));
break;
default:
} else {
// TODO(chenweihang): support other attrs type
PADDLE_THROW(platform::errors::Unimplemented(
"unsupported cast op attribute `%s` when construct "
"KernelContext.",
attr_pairs[i].first));
}
}
}

Expand Down
49 changes: 36 additions & 13 deletions paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -365,30 +365,53 @@ static pt::KernelContext BuildDygraphKernelContext(
op_kernel_ctx.EmplaceBackOutputs(tmp_outputs);
}

for (size_t i = 0; i < attr_pairs.size(); ++i) {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
switch (attr_pairs[i].second) {
case framework::proto::AttrType::INT:
for (size_t i = 0; i < attr_defs.size(); ++i) {
if (attr_defs[i].type_index == std::type_index(typeid(pt::Scalar))) {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
switch (attr_pairs[i].second) {
case framework::proto::AttrType::INT:
op_kernel_ctx.EmplaceBackAttr(pt::Scalar(
GetAttr<int>(attrs, default_attrs, attr_pairs[i].first)));
break;
case framework::proto::AttrType::FLOAT:
op_kernel_ctx.EmplaceBackAttr(pt::Scalar(
GetAttr<float>(attrs, default_attrs, attr_pairs[i].first)));
break;
case framework::proto::AttrType::BOOLEAN:
op_kernel_ctx.EmplaceBackAttr(pt::Scalar(
GetAttr<bool>(attrs, default_attrs, attr_pairs[i].first)));
break;
default:
// TODO(chenweihang): support other attrs type
PADDLE_THROW(platform::errors::Unimplemented(
"unsupported cast op attribute `%s` when construct "
"KernelContext.",
attr_pairs[i].first));
}
} else {
// TODO(chenweihang): support other attrs
// In principle, the attr required by the dynamic mode should be
// passed in from the Python side, and there is no need to look up
// from the default_map, but now this nor work
if (attr_defs[i].type_index == std::type_index(typeid(int))) {
op_kernel_ctx.EmplaceBackAttr(
GetAttr<int>(attrs, default_attrs, attr_pairs[i].first));
break;
case framework::proto::AttrType::FLOAT:
} else if (attr_defs[i].type_index == std::type_index(typeid(float))) {
op_kernel_ctx.EmplaceBackAttr(
GetAttr<float>(attrs, default_attrs, attr_pairs[i].first));
break;
case framework::proto::AttrType::BOOLEAN:
} else if (attr_defs[i].type_index == std::type_index(typeid(bool))) {
op_kernel_ctx.EmplaceBackAttr(
GetAttr<bool>(attrs, default_attrs, attr_pairs[i].first));
break;
default:
} else {
// TODO(chenweihang): support other attrs type
PADDLE_THROW(platform::errors::Unimplemented(
"unsupported cast op attribute `%s` when construct "
"KernelContext.",
attr_pairs[i].first));
}
}
}

Expand Down
1 change: 1 addition & 0 deletions paddle/tcmpt/api/include/core.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,5 @@ limitations under the License. */
#include "paddle/tcmpt/core/kernel_context.h"
#include "paddle/tcmpt/core/kernel_factory.h"
#include "paddle/tcmpt/core/mkldnn_dense_tensor.h"
#include "paddle/tcmpt/core/scalar.h"
#include "paddle/tcmpt/core/selected_rows_tensor.h"
2 changes: 2 additions & 0 deletions paddle/tcmpt/core/kernel_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "paddle/tcmpt/core/dense_tensor.h"
#include "paddle/tcmpt/core/kernel_context.h"
#include "paddle/tcmpt/core/kernel_def.h"
#include "paddle/tcmpt/core/scalar.h"
#include "paddle/tcmpt/core/selected_rows_tensor.h"

// See Note [ Why still include the fluid headers? ]
Expand Down Expand Up @@ -162,6 +163,7 @@ struct KernelImpl<Return (*)(Args...), kernel_fn> {
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(int);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(int64_t);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(paddle::platform::float16);
PT_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(const pt::Scalar&);

/* Output Helpers */

Expand Down
63 changes: 63 additions & 0 deletions paddle/tcmpt/core/scalar.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

namespace pt {

class Scalar {
public:
// Constructor support implicit
Scalar(float val) : tag(Tag::HAS_F) { data_.f = val; } // NOLINT

Scalar(double val) : tag(Tag::HAS_D) { data_.d = val; } // NOLINT

Scalar(int32_t val) : tag(Tag::HAS_I32) { data_.i32 = val; } // NOLINT

Scalar(int64_t val) : tag(Tag::HAS_I64) { data_.i64 = val; } // NOLINT

Scalar(bool val) : tag(Tag::HAS_B) { data_.b = val; } // NOLINT

template <typename T>
inline T to() const {
switch (tag) {
case Tag::HAS_F:
return static_cast<T>(data_.f);
case Tag::HAS_D:
return static_cast<T>(data_.d);
case Tag::HAS_I32:
return static_cast<T>(data_.i32);
case Tag::HAS_I64:
return static_cast<T>(data_.i64);
case Tag::HAS_B:
return static_cast<T>(data_.b);
default:
throw std::runtime_error("Invalid Scalar type.");
}
}

private:
enum class Tag { HAS_F, HAS_D, HAS_I32, HAS_I64, HAS_B };
Tag tag;

union data {
float f;
double d;
int32_t i32;
int64_t i64;
bool b;
} data_;
};

} // namespace pt
8 changes: 2 additions & 6 deletions paddle/tcmpt/cpu/creation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,9 @@ namespace pt {
template <typename T>
void FillAnyLike(const CPUContext& dev_ctx,
const DenseTensor& x,
float val,
const Scalar& val,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
std::isnan(val),
false,
paddle::platform::errors::InvalidArgument("The filled value is NaN."));
eigen::fill<CPUContext, T>(dev_ctx, out, val);
eigen::fill<CPUContext, T>(dev_ctx, out, val.to<T>());
}

} // namespace pt
Expand Down
3 changes: 2 additions & 1 deletion paddle/tcmpt/cpu/creation.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#pragma once

#include "paddle/tcmpt/core/dense_tensor.h"
#include "paddle/tcmpt/core/scalar.h"

#include "paddle/fluid/platform/device_context.h"

Expand All @@ -25,7 +26,7 @@ using CPUContext = paddle::platform::CPUDeviceContext;
template <typename T>
void FillAnyLike(const CPUContext& dev_ctx,
const DenseTensor& x,
float val,
const Scalar& val,
DenseTensor* out);

} // namespace pt
8 changes: 2 additions & 6 deletions paddle/tcmpt/cuda/creation.cu
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,9 @@ namespace pt {
template <typename T>
void FillAnyLike(const CUDAContext& dev_ctx,
const DenseTensor& x,
float val,
const Scalar& val,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
std::isnan(val),
false,
paddle::platform::errors::InvalidArgument("The filled value is NaN."));
eigen::fill<CUDAContext, T>(dev_ctx, out, val);
eigen::fill<CUDAContext, T>(dev_ctx, out, val.to<T>());
}

} // namespace pt
Expand Down
3 changes: 2 additions & 1 deletion paddle/tcmpt/cuda/creation.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)

#include "paddle/tcmpt/core/dense_tensor.h"
#include "paddle/tcmpt/core/scalar.h"

#include "paddle/fluid/platform/device_context.h"

Expand All @@ -28,7 +29,7 @@ using CUDAContext = paddle::platform::CUDADeviceContext;
template <typename T>
void FillAnyLike(const CUDAContext& dev_ctx,
const DenseTensor& x,
float val,
const Scalar& val,
DenseTensor* out);

} // namespace pt
Expand Down
10 changes: 9 additions & 1 deletion paddle/tcmpt/hapi/include/creation.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,20 @@

#pragma once

#include "paddle/tcmpt/core/dtype.h"
#include "paddle/tcmpt/core/scalar.h"
#include "paddle/tcmpt/hapi/include/tensor.h"

namespace paddle {
namespace experimental {

Tensor full_like(const Tensor& x, float value);
Tensor full_like(const Tensor& x,
const pt::Scalar& value,
pt::DataType dtype = pt::DataType::kUndef);

Tensor ones_like(const Tensor& x, pt::DataType dtype = pt::DataType::kUndef);

Tensor zeros_like(const Tensor& x, pt::DataType dtype = pt::DataType::kUndef);

} // namespace experimental
} // namespace paddle
14 changes: 13 additions & 1 deletion paddle/tcmpt/hapi/lib/creation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ limitations under the License. */
namespace paddle {
namespace experimental {

Tensor full_like(const Tensor& x, float value) {
Tensor full_like(const Tensor& x, const pt::Scalar& value, pt::DataType dtype) {
// 1. Get kernel signature and kernel
auto kernel_signature = ParseKernelNameAndKeyByArgs("fill_any_like", x);
VLOG(1) << kernel_signature.first;
Expand All @@ -52,6 +52,10 @@ Tensor full_like(const Tensor& x, float value) {
// 5. Prepare outputs
Tensor out;
auto out_def = kernel.args_def().output_defs()[0];
// InferDataType
if (dtype != pt::DataType::kUndef) {
out_def.SetDataType(dtype);
}
auto dense_out = std::make_shared<pt::DenseTensor>(
pt::TensorMeta(out_dims, out_def.backend, out_def.dtype, out_def.layout),
pt::TensorStatus());
Expand All @@ -64,5 +68,13 @@ Tensor full_like(const Tensor& x, float value) {
return out;
}

Tensor ones_like(const Tensor& x, pt::DataType dtype) {
return full_like(x, 1, dtype);
}

Tensor zeros_like(const Tensor& x, pt::DataType dtype) {
return full_like(x, 0, dtype);
}

} // namespace experimental
} // namespace paddle
Loading

1 comment on commit 073aef3

@paddle-bot-old
Copy link

@paddle-bot-old paddle-bot-old bot commented on 073aef3 Oct 14, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍 PR: #14 Commit ID: 073aef3 contains failed CI.

🔹 Failed: PR-CI-OP-benchmark

test_failed
2021-10-14 16:39:02 + '[' -z matmul,matmul,matmul.json,True ']'
2021-10-14 16:39:02 + '[' 8 -ne 0 ']'
2021-10-14 16:39:02 + LOG '[INFO] See https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual for details.'
2021-10-14 16:39:02 + echo '[tools/test_ci_op_benchmark.sh:275] [INFO] See https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual for details.'
2021-10-14 16:39:02 [tools/test_ci_op_benchmark.sh:275] [INFO] See https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual for details.
2021-10-14 16:39:02 + LOG '[INFO] Or you can apply for one RD (Avin0323(Recommend), Xreki, luotao1) approval to pass this PR.'
2021-10-14 16:39:02 + echo '[tools/test_ci_op_benchmark.sh:276] [INFO] Or you can apply for one RD (Avin0323(Recommend), Xreki, luotao1) approval to pass this PR.'
2021-10-14 16:39:02 [tools/test_ci_op_benchmark.sh:276] [INFO] Or you can apply for one RD (Avin0323(Recommend), Xreki, luotao1) approval to pass this PR.
2021-10-14 16:39:02 + exit 8
2021-10-14 16:39:02 + EXCODE=8
2021-10-14 16:39:02 + echo 'EXCODE: 8'
2021-10-14 16:39:02 EXCODE: 8
2021-10-14 16:39:02 + echo 'ipipe_log_param_EXCODE: 8'
2021-10-14 16:39:02 ipipe_log_param_EXCODE: 8
2021-10-14 16:39:02 + '[' 8 -eq 0 ']'
2021-10-14 16:39:02 + set +x
2021-10-14 16:39:02 Sorry, some tests failed.
2021-10-14 16:39:02 + exit 8
2021-10-14 16:39:02 {build code state=8}

🔹 Failed: PR-CI-Kunlun

http_proxy_failed
2021-10-14 16:50:16 Note: checking out '9f75c5aa851cd877fb0d93ccc31b8567a6706546'.
2021-10-14 16:50:16 You are in 'detached HEAD' state. You can look around, make experimental
2021-10-14 16:50:16 changes and commit them, and you can discard any commits you make in this
2021-10-14 16:50:16 state without impacting any branches by performing another checkout.
2021-10-14 16:50:16 If you want to create a new branch to retain commits you create, you may
2021-10-14 16:50:16 do so (now or later) by using -b with the checkout command again. Example:
2021-10-14 16:50:16 git checkout -b
2021-10-14 16:50:16 HEAD is now at 9f75c5a... Merge pull request #2337 from sergiocampama/deprecation
2021-10-14 16:50:16 Submodule 'third_party/benchmark' (https://github.com/google/benchmark.git) registered for path 'third_party/benchmark'
2021-10-14 16:50:16 Cloning into 'third_party/benchmark'...
2021-10-14 16:50:19 fatal: unable to access 'https://github.com/google/benchmark.git/': Failed to connect to 172.19.57.45 port 3128: Connection timed out
2021-10-14 16:50:19 fatal: clone of 'https://github.com/google/benchmark.git' into submodule path 'third_party/benchmark' failed
2021-10-14 16:50:19 CMake Error at /paddle/build/third_party/protobuf/tmp/extern_protobuf-gitclone.cmake:52 (message):
2021-10-14 16:50:19 Failed to update submodules in:
2021-10-14 16:50:19 '/paddle/build/third_party/protobuf/src/extern_protobuf'
2021-10-14 16:50:19 CMakeFiles/extern_protobuf.dir/build.make:90: recipe for target 'third_party/protobuf/src/extern_protobuf-stamp/extern_protobuf-download' failed
2021-10-14 16:50:19 make[2]: *** [third_party/protobuf/src/extern_protobuf-stamp/extern_protobuf-download] Error 1
2021-10-14 16:50:19 CMakeFiles/Makefile2:4065: recipe for target 'CMakeFiles/extern_protobuf.dir/all' failed
2021-10-14 16:50:19 make[1]: *** [CMakeFiles/extern_protobuf.dir/all] Error 2
2021-10-14 16:50:19 Makefile:140: recipe for target 'all' failed
2021-10-14 16:50:19 make: *** [all] Error 2

🔹 Failed: PR-CI-APPROVAL

approve_failed
2021-10-14 16:50:50 正在保存至: “bk.txt”
2021-10-14 16:50:50 0K 100% 2.80M=0s
2021-10-14 16:50:50 2021-10-14 16:50:50 (2.80 MB/s) - 已保存 “bk.txt” [5/5])
2021-10-14 16:50:58 ****************
2021-10-14 16:50:58 0. You must have one RD (lanxianghit (Recommend), phlrain or luotao1) approval for changing the FLAGS, which manages the environment variables.
2021-10-14 16:50:58 1. You must have Dianhai approval for change 20+ files or add than 1000+ lines of content.
2021-10-14 16:50:58 2. You must have one RD (XiaoguangHu01,chenwhql,zhiqiu,Xreki,luotao1) approval for paddle/fluid/framework/operator.h, which manages the underlying code for fluid.
2021-10-14 16:50:58 3. You must have one RD (zhiqiu (Recommend) , phlrain) approval for the changes of paddle/fluid/pybind/op_function_generator.cc, which manages the logic of automatic generating op functions for dygraph.
2021-10-14 16:50:58 4. You must have one RD (XiaoguangHu01,chenwhql,zhiqiu,Xreki,luotao1) approval for the usage of const_cast.
2021-10-14 16:50:58 5. You must have one RD (Avin0323(Recommend) or zhouwei25 or wanghuancoder or luotao1) approval for modifying unity_build_rule.cmake which the rules of Unity Build.
2021-10-14 16:50:58 There are 6 approved errors.
2021-10-14 16:50:58 ****************
2021-10-14 16:50:58 + EXCODE=6
2021-10-14 16:50:58 + echo 'EXCODE: 6'
2021-10-14 16:50:58 EXCODE: 6
2021-10-14 16:50:58 + echo 'ipipe_log_param_EXCODE: 6'
2021-10-14 16:50:58 ipipe_log_param_EXCODE: 6
2021-10-14 16:50:58 + exit 6

🔹 Failed: PR-CI-Kunlun-Inference-Directed-cyclic-graph

Unknown Failed
2021-10-14 17:17:35 /opt/compiler/gcc-8.2/lib/gcc/x86_64-pc-linux-gnu/8.2.0/../../../../x86_64-pc-linux-gnu/bin/ld: ../../../../third_party/lite/src/extern_lite-build/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so: undefined reference to baidu::xpu::api::search_aligned_mat_mul(baidu::xpu::api::Context*, bool, bool, int, int, int, int, float, float const*, int, float const*, int, float*, int)' 2021-10-14 17:17:35 /opt/compiler/gcc-8.2/lib/gcc/x86_64-pc-linux-gnu/8.2.0/../../../../x86_64-pc-linux-gnu/bin/ld: ../../../../third_party/lite/src/extern_lite-build/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so: undefined reference to baidu::xpu::api::search_noaligned_mat_mul(baidu::xpu::api::Context*, bool, bool, int, int const*, int, int, float, float const*, float const*, float*)'
2021-10-14 17:17:35 /opt/compiler/gcc-8.2/lib/gcc/x86_64-pc-linux-gnu/8.2.0/../../../../x86_64-pc-linux-gnu/bin/ld: ../../../../third_party/lite/src/extern_lite-build/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so: undefined reference to int baidu::xpu::api::reduce_mean<float>(baidu::xpu::api::Context*, float const*, float*, std::vector<int, std::allocator<int> > const&, std::vector<int, std::allocator<int> > const&)' 2021-10-14 17:17:35 /opt/compiler/gcc-8.2/lib/gcc/x86_64-pc-linux-gnu/8.2.0/../../../../x86_64-pc-linux-gnu/bin/ld: ../../../../third_party/lite/src/extern_lite-build/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so: undefined reference to int baidu::xpu::api::cast_v2<int, long>(baidu::xpu::api::Context*, int const*, long*, int)'
2021-10-14 17:17:35 /opt/compiler/gcc-8.2/lib/gcc/x86_64-pc-linux-gnu/8.2.0/../../../../x86_64-pc-linux-gnu/bin/ld: ../../../../third_party/lite/src/extern_lite-build/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so: undefined reference to `int baidu::xpu::api::transformer_encoder<float16, short, short>(baidu::xpu::api::Context*, float16 const*, std::vector<short const*, std::allocator<short const*> > const&, float16*, std::vector<float const*, std::allocator<float const*> > const&, std::vector<float const*, std::allocator<float const*> > const&, std::vector<float const*, std::allocator<float const*> > const&, std::vector<float const*, std::allocator<float const*> > const&, std::vector<float const*, std::allocator<float const*> > const&, baidu::xpu::api::QKVAttnParam const&, float const*)'
2021-10-14 17:17:35 collect2: error: ld returned 1 exit status
2021-10-14 17:17:35 make64[2]: *** [paddle/fluid/inference/lite/test_lite_tensor_utils] Error 1
2021-10-14 17:17:35 make64[1]: *** [paddle/fluid/inference/lite/CMakeFiles/test_lite_tensor_utils.dir/all] Error 2
2021-10-14 17:17:35 make64[1]: *** Waiting for unfinished jobs....
2021-10-14 17:17:35 [ 45%] Building CXX object paddle/fluid/framework/CMakeFiles/executor.dir/downpour_worker_opt.cc.o
2021-10-14 17:17:35 [ 45%] Building CXX object paddle/fluid/framework/CMakeFiles/executor.dir/pull_dense_worker.cc.o
2021-10-14 17:17:36 [ 45%] Building CXX object paddle/fluid/framework/CMakeFiles/executor.dir/section_worker.cc.o
2021-10-14 17:17:36 [ 45%] Building CXX object paddle/fluid/framework/CMakeFiles/executor.dir/device_worker_factory.cc.o
2021-10-14 17:17:36 [ 45%] Building CXX object paddle/fluid/framework/CMakeFiles/executor.dir/data_set.cc.o
2021-10-14 17:17:38 [ 45%] Linking CXX static library liblite_engine_op.a
2021-10-14 17:17:38 [ 45%] Built target lite_engine_op
2021-10-14 17:17:52 [ 45%] Linking CXX static library libexecutor.a
2021-10-14 17:17:52 [ 45%] Built target executor
2021-10-14 17:17:52 make64: *** [all] Error 2

Please sign in to comment.