Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix cinn_instruction_run_op_test when FLAGS_use_system_allocator=True #47731

Merged
merged 1 commit into from
Nov 8, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
139 changes: 66 additions & 73 deletions paddle/fluid/operators/cinn/cinn_instruction_run_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,92 +39,85 @@ namespace paddle::operators {

using framework::paddle2cinn::CinnCompiler;

TEST(CinnInstructionOpTest, TestWithElementwiseAdd) {
paddle::framework::InitDevices();
platform::SetNumThreads(1);
// cache test graph into CinnCompiler
const std::string& test_op_out_name = "cinn_instruction_run_op_out";
const std::string& add_op_out_name = "add_op_out";
auto compilation_key = CinnCompiler::GetInstance()->AddGraph(
CreateOnlyElementwiseAddGraph("x", "y", test_op_out_name));

// create necessary ops
auto cinn_instruction_run_op = paddle::framework::OpRegistry::CreateOp(
"cinn_instruction_run",
{{"X", {"x", "y"}}},
{{"Out", {test_op_out_name}}},
{{"cached_index", 0}, {"instruction_index", 0}});

auto cinn_launch_op = paddle::framework::OpRegistry::CreateOp(
"cinn_launch",
{{"X", {"x", "y"}}},
{{"Out", {test_op_out_name}}},
{{"compilation_key", compilation_key}});

// check case: a compiled object not cached before cinn_launch_op run,
// so a cinn_instruction_run_op will throw an error
framework::Scope scope;
platform::CPUPlace place;
InitVariablesWithRandomValue<float>({"x", "y"}, {10, 20}, place, &scope);
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
ASSERT_THROW(cinn_instruction_run_op->Run(scope, place),
paddle::platform::EnforceNotMet);
// run cinn_launch_op firstly to launch the compilation
// of the above graph and cache two compiled results
// of both type float and int
cinn_launch_op->Run(scope, place);
scope.EraseVars({"x", "y", test_op_out_name});
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
InitVariablesWithRandomValue<int>({"x", "y"}, {30, 40}, place, &scope);
cinn_launch_op->Run(scope, place);

// Run ops and check the computation results
auto run_and_check_fn = [&](const platform::Place& place) {
framework::Scope scope;
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
scope.Var(add_op_out_name)->GetMutable<LoDTensor>();
auto elementwise_add_op =
class TestCinnInstructionRunOp : public ::testing::Test {
public:
const char* test_op_out_name = "test_op_out";
const char* add_op_out_name = "add_op_out";
std::unique_ptr<framework::OperatorBase> cinn_launch_op;
std::unique_ptr<framework::OperatorBase> cinn_instruction_run_op;
std::unique_ptr<framework::OperatorBase> elementwise_add_op;

void SetUp() override {
auto compilation_key = CinnCompiler::GetInstance()->AddGraph(
CreateOnlyElementwiseAddGraph("x", "y", test_op_out_name));

// create necessary ops
cinn_launch_op = paddle::framework::OpRegistry::CreateOp(
"cinn_launch",
{{"X", {"x", "y"}}},
{{"Out", {test_op_out_name}}},
{{"compilation_key", compilation_key}});
cinn_instruction_run_op = paddle::framework::OpRegistry::CreateOp(
"cinn_instruction_run",
{{"X", {"x", "y"}}},
{{"Out", {test_op_out_name}}},
{{"cached_index", 0}, {"instruction_index", 0}});
elementwise_add_op =
paddle::framework::OpRegistry::CreateOp("elementwise_add",
{{"X", {"x"}}, {"Y", {"y"}}},
{{"Out", {add_op_out_name}}},
{{}});
}

void Compile(const platform::Place& place) {
// check case: a compiled object not cached before cinn_launch_op run,
// so a cinn_instruction_run_op will throw an error
framework::Scope scope;
InitVariablesWithRandomValue<float>({"x", "y"}, {10, 20}, place, &scope);
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
ASSERT_THROW(cinn_instruction_run_op->Run(scope, place),
paddle::platform::EnforceNotMet);

// run cinn_launch_op firstly to launch the compilation
// of the above graph and cache two compiled results
// of both type float and int
cinn_launch_op->Run(scope, place);
scope.EraseVars({"x", "y", test_op_out_name});
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
InitVariablesWithRandomValue<int>({"x", "y"}, {30, 40}, place, &scope);
cinn_launch_op->Run(scope, place);
}

// 1. check on type float
void RunAndCheck(const platform::Place& place) {
// Run ops and check the computation results
framework::Scope scope;
InitVariablesWithRandomValue<float>({"x", "y"}, {10, 20}, place, &scope);
cinn_instruction_run_op->SetAttr("cached_index", 0);
cinn_instruction_run_op->Run(scope, place);
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
scope.Var(add_op_out_name)->GetMutable<LoDTensor>();
elementwise_add_op->Run(scope, place);
cinn_launch_op->Run(scope, place);
CompareOpResult<float>(scope.GetVar(test_op_out_name),
scope.GetVar(add_op_out_name));
}

// 2. check on type int to indicate cinn_instruction_run op
// can mutable data according compiled result
scope.EraseVars({"x", "y", test_op_out_name, add_op_out_name});
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
scope.Var(add_op_out_name)->GetMutable<LoDTensor>();
void TearDown() override { CinnCompiler::GetInstance()->Clear(); }
};

InitVariablesWithRandomValue<int>({"x", "y"}, {30, 40}, place, &scope);
cinn_instruction_run_op->SetAttr("cached_index", 1);
cinn_instruction_run_op->Run(scope, place);
// need reconstruct elementwise_add_op to choose a new kernel with type int
elementwise_add_op =
paddle::framework::OpRegistry::CreateOp("elementwise_add",
{{"X", {"x"}}, {"Y", {"y"}}},
{{"Out", {add_op_out_name}}},
{{}});
elementwise_add_op->Run(scope, place);
CompareOpResult<int>(scope.GetVar(test_op_out_name),
scope.GetVar(add_op_out_name));
};
TEST_F(TestCinnInstructionRunOp, CPU) {
platform::CPUPlace place;
Compile(place);
RunAndCheck(place);
// the second run on the same place is to check the cache logic
RunAndCheck(place);
}

// CPU
run_and_check_fn(platform::CPUPlace());
run_and_check_fn(platform::CPUPlace());
#ifdef PADDLE_WITH_CUDA
// GPU
run_and_check_fn(platform::CUDAPlace());
run_and_check_fn(platform::CUDAPlace());
#endif
TEST_F(TestCinnInstructionRunOp, GPU) {
platform::CUDAPlace place;
Compile(place);
RunAndCheck(place);
RunAndCheck(place);
}
#endif

} // namespace paddle::operators
2 changes: 1 addition & 1 deletion paddle/fluid/operators/cinn/cinn_launch_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ using framework::paddle2cinn::CinnCompiler;

class TestCinnLaunchOp : public ::testing::Test {
public:
const char* test_op_out_name = "add_op_out";
const char* test_op_out_name = "test_op_out";
const char* add_op_out_name = "add_op_out";
std::unique_ptr<framework::OperatorBase> cinn_launch_op;
std::unique_ptr<framework::OperatorBase> elementwise_add_op;
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/operators/cinn/test_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ void InitVariablesWithRandomValue(const std::vector<std::string>& var_names,
tmp_tensor.mutable_data<DataType>(common_ddim, platform::CPUPlace());
for (const auto& var_name : var_names) {
auto* tensor = scope->Var(var_name)->GetMutable<LoDTensor>();
tensor->mutable_data<DataType>(common_ddim, place);
for (auto i = 0; i < tensor->numel(); ++i) {
tmp_data[i] = static_cast<DataType>(dist(engine));
}
Expand All @@ -121,9 +122,9 @@ void CompareOpResult(Variable* test_out, Variable* expected_out) {
ASSERT_TRUE(expected_tensor.IsInitialized());
ASSERT_EQ(test_tensor.dims(), expected_tensor.dims());
const auto* test_data = test_tensor.data<DataType>();
const auto* excepted_data = expected_tensor.data<DataType>();
const auto* expected_data = expected_tensor.data<DataType>();
for (auto i = 0; i < expected_tensor.numel(); ++i) {
EXPECT_EQ(test_data[i], excepted_data[i]);
EXPECT_EQ(test_data[i], expected_data[i]);
}
}

Expand Down