Skip to content

Commit

Permalink
add reduce ops (PaddlePaddle#167)
Browse files Browse the repository at this point in the history
  • Loading branch information
gglin001 authored Sep 17, 2021
1 parent d59f5d7 commit 564aab9
Show file tree
Hide file tree
Showing 5 changed files with 302 additions and 22 deletions.
1 change: 1 addition & 0 deletions paddle/fluid/framework/ipu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ set(POPART_CANONICALIZATION_SRC
"popart_canonicalization/nn_ops.cc"
"popart_canonicalization/tensor_ops.cc"
"popart_canonicalization/other_ops.cc"
"popart_canonicalization/reduce_ops.cc"
)

cc_library(ipu_device SRCS device.cc DEPS enforce popart)
Expand Down
58 changes: 53 additions & 5 deletions paddle/fluid/framework/ipu/ipu_compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -113,10 +113,8 @@ void Compiler::LowerBody(const ir::Graph* graph) {
auto op_type = op_desc->Type();
VLOG(10) << "node->type: " << op_type;

auto itr = name_function_.find(op_type);
if (itr != name_function_.end()) {
itr->second(node->Op());
} else if (op_type == "popart_constant") {
// TODO(alleng) abstract duplicate code
if (op_type == "popart_constant") {
auto dims =
BOOST_GET_CONST(std::vector<int64_t>, op_desc->GetAttr("dims"));
auto dtype_ = BOOST_GET_CONST(int, op_desc->GetAttr("dtype"));
Expand Down Expand Up @@ -163,6 +161,50 @@ void Compiler::LowerBody(const ir::Graph* graph) {
builder_->aiOnnxOpset11().reducemean(inputs, axes, keepdims);
SetIpuIndexStage(result, op_desc);
InsertTensors(GetOpOutputs(op_desc), result);
} else if (op_type == "popart_reducemin") {
auto inputs = GetOpInputs(op_desc);
auto axes = nonstd::optional<std::vector<int64_t>>();
if (op_desc->HasAttr("axes")) {
axes = BOOST_GET_CONST(std::vector<int64_t>, op_desc->GetAttr("axes"));
}
auto keepdims = BOOST_GET_CONST(int64_t, op_desc->GetAttr("keepdims"));
popart::TensorId result =
builder_->aiOnnxOpset11().reducemin(inputs, axes, keepdims);
SetIpuIndexStage(result, op_desc);
InsertTensors(GetOpOutputs(op_desc), result);
} else if (op_type == "popart_reducemax") {
auto inputs = GetOpInputs(op_desc);
auto axes = nonstd::optional<std::vector<int64_t>>();
if (op_desc->HasAttr("axes")) {
axes = BOOST_GET_CONST(std::vector<int64_t>, op_desc->GetAttr("axes"));
}
auto keepdims = BOOST_GET_CONST(int64_t, op_desc->GetAttr("keepdims"));
popart::TensorId result =
builder_->aiOnnxOpset11().reducemax(inputs, axes, keepdims);
SetIpuIndexStage(result, op_desc);
InsertTensors(GetOpOutputs(op_desc), result);
} else if (op_type == "popart_reducesum") {
auto inputs = GetOpInputs(op_desc);
auto axes = nonstd::optional<std::vector<int64_t>>();
if (op_desc->HasAttr("axes")) {
axes = BOOST_GET_CONST(std::vector<int64_t>, op_desc->GetAttr("axes"));
}
auto keepdims = BOOST_GET_CONST(int64_t, op_desc->GetAttr("keepdims"));
popart::TensorId result =
builder_->aiOnnxOpset11().reducesum(inputs, axes, keepdims);
SetIpuIndexStage(result, op_desc);
InsertTensors(GetOpOutputs(op_desc), result);
} else if (op_type == "popart_reduceprod") {
auto inputs = GetOpInputs(op_desc);
auto axes = nonstd::optional<std::vector<int64_t>>();
if (op_desc->HasAttr("axes")) {
axes = BOOST_GET_CONST(std::vector<int64_t>, op_desc->GetAttr("axes"));
}
auto keepdims = BOOST_GET_CONST(int64_t, op_desc->GetAttr("keepdims"));
popart::TensorId result =
builder_->aiOnnxOpset11().reduceprod(inputs, axes, keepdims);
SetIpuIndexStage(result, op_desc);
InsertTensors(GetOpOutputs(op_desc), result);
} else if (op_type == "popart_batchnormalization") {
auto inputs = GetOpInputs(op_desc);
auto outputs = GetOpOutputs(op_desc);
Expand All @@ -181,7 +223,13 @@ void Compiler::LowerBody(const ir::Graph* graph) {
SetIpuIndexStage(result, op_desc);
InsertTensors(GetOpOutputs(op_desc), result);
} else {
PADDLE_THROW(platform::errors::NotFound("%s is not registered", op_type));
auto itr = name_function_.find(op_type);
if (itr != name_function_.end()) {
itr->second(node->Op());
} else {
PADDLE_THROW(
platform::errors::NotFound("%s is not registered", op_type));
}
}
}
VLOG(10) << "leave Compiler::LowerBody";
Expand Down
17 changes: 0 additions & 17 deletions paddle/fluid/framework/ipu/popart_canonicalization/math_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,22 +21,6 @@ namespace framework {
namespace ipu {
namespace {

Node *reduce_mean_handler(Graph *graph, Node *node) {
auto *op = node->Op();
auto attrs = AttributeMap{};
auto reduce_all = BOOST_GET_CONST(bool, op->GetAttr("reduce_all"));
if (!reduce_all) {
auto axes_ = BOOST_GET_CONST(std::vector<int>, op->GetAttr("dim"));
auto axes = std::vector<int64_t>{axes_.begin(), axes_.end()};
attrs.emplace("axes", axes);
}
auto keepdims_ = BOOST_GET_CONST(bool, op->GetAttr("keep_dim"));
auto keepdims = int64_t{keepdims_};
attrs.emplace("keepdims", keepdims);
return CreateBaseOp(graph, node, "popart_reducemean", node->inputs,
node->outputs, attrs);
}

Node *mean_handler(Graph *graph, Node *node) {
return CreateBaseOp(graph, node, "popart_reducemean",
{GetInputVarNode("X", node)},
Expand Down Expand Up @@ -260,7 +244,6 @@ Node *cross_entropy2_handler(Graph *graph, Node *node) {
}
}

REGISTER_HANDLER(reduce_mean, reduce_mean_handler);
REGISTER_HANDLER(mean, mean_handler);
REGISTER_HANDLER(pow, pow_handler);
REGISTER_HANDLER(mul, mul_handler);
Expand Down
68 changes: 68 additions & 0 deletions paddle/fluid/framework/ipu/popart_canonicalization/reduce_ops.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/ipu/popart_canonicalization/canonicalization_utils.h"
#include "paddle/fluid/framework/ipu/popart_canonicalization/op_builder.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace framework {
namespace ipu {
namespace {

Node *reduce_op_handler(Graph *graph, Node *node, const std::string &op_name) {
auto *op = node->Op();
auto attrs = AttributeMap{};
auto reduce_all = BOOST_GET_CONST(bool, op->GetAttr("reduce_all"));
if (!reduce_all) {
auto axes_ = BOOST_GET_CONST(std::vector<int>, op->GetAttr("dim"));
auto axes = std::vector<int64_t>{axes_.begin(), axes_.end()};
attrs.emplace("axes", axes);
}
auto keepdims_ = BOOST_GET_CONST(bool, op->GetAttr("keep_dim"));
auto keepdims = int64_t{keepdims_};
attrs.emplace("keepdims", keepdims);
return CreateBaseOp(graph, node, op_name, node->inputs, node->outputs, attrs);
}

Node *reduce_mean_handler(Graph *graph, Node *node) {
return reduce_op_handler(graph, node, "popart_reducemean");
}

Node *reduce_min_handler(Graph *graph, Node *node) {
return reduce_op_handler(graph, node, "popart_reducemin");
}

Node *reduce_sum_handler(Graph *graph, Node *node) {
return reduce_op_handler(graph, node, "popart_reducesum");
}

Node *reduce_max_handler(Graph *graph, Node *node) {
return reduce_op_handler(graph, node, "popart_reducemax");
}

Node *reduce_prod_handler(Graph *graph, Node *node) {
return reduce_op_handler(graph, node, "popart_reduceprod");
}

REGISTER_HANDLER(reduce_mean, reduce_mean_handler);
REGISTER_HANDLER(reduce_min, reduce_min_handler);
REGISTER_HANDLER(reduce_sum, reduce_sum_handler);
REGISTER_HANDLER(reduce_max, reduce_max_handler);
REGISTER_HANDLER(reduce_prod, reduce_prod_handler);

} // namespace
} // namespace ipu
} // namespace framework
} // namespace paddle
180 changes: 180 additions & 0 deletions python/paddle/fluid/tests/unittests/ipu/test_reduce_x_op_ipu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import paddle.optimizer
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, np_dtype_to_fluid_str

paddle.enable_static()


@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestMean(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.init_op()

def init_op(self):
self.op = paddle.fluid.layers.reduce_mean

def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [
np_dtype_to_fluid_str(x.dtype) for x in self.feed.values()
]

def _test_base(self, run_ipu=True):
scope = fluid.core.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = self.SEED
main_prog.random_seed = SEED
startup_prog.random_seed = SEED

with fluid.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
out = self.op(x, **self.attrs)

fetch_list = [out.name]

if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)

if run_ipu:
feed_list = self.feed_list
ipu_strategy = compiler.get_ipu_strategy()
ipu_strategy.is_training = self.is_training
program = compiler.IpuCompiler(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog

result = exe.run(program, feed=self.feed, fetch_list=fetch_list)
return result[0]

def run_test_base(self):
res0 = self._test_base(True)
res1 = self._test_base(False)

self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))

def set_feed0(self):
self.feed = {}
self.feed["in_0"] = np.random.uniform(size=[2, 4]).astype(np.float32)
self.set_feed_attr()

def set_feed1(self):
self.feed = {}
self.feed["in_0"] = np.random.uniform(size=[2, 2, 2]).astype(np.float32)
self.set_feed_attr()

def set_attr0(self):
self.attrs = {}
self.attrs['dim'] = None
self.attrs['keep_dim'] = False

def test_case0(self):
self.set_feed0()
self.set_attr0()
self.run_test_base()

def test_case1(self):
self.set_feed0()
self.set_attr0()
self.attrs['dim'] = 0
self.run_test_base()

def test_case2(self):
self.set_feed0()
self.set_attr0()
self.attrs['dim'] = -1
self.run_test_base()

def test_case3(self):
self.set_feed0()
self.set_attr0()
self.attrs['dim'] = 1
self.run_test_base()

def test_case4(self):
self.set_feed0()
self.attrs = {}
self.attrs['dim'] = 1
self.attrs['keep_dim'] = True
self.run_test_base()

def test_case5(self):
self.set_feed1()
self.attrs = {}
self.attrs['dim'] = [1, 2]
self.attrs['keep_dim'] = False
self.run_test_base()

def test_case6(self):
self.set_feed1()
self.attrs = {}
self.attrs['dim'] = [0, 1]
self.attrs['keep_dim'] = False
self.run_test_base()

def test_case7(self):
self.set_feed1()
self.attrs = {}
self.attrs['dim'] = [0, 1]
self.attrs['keep_dim'] = True
self.run_test_base()


class TestMax(TestMean):
def init_op(self):
self.op = paddle.fluid.layers.reduce_max


class TestMin(TestMean):
def init_op(self):
self.op = paddle.fluid.layers.reduce_min


class TestProd(TestMean):
def init_op(self):
self.op = paddle.fluid.layers.reduce_prod


class TestSum(TestMean):
def init_op(self):
self.op = paddle.fluid.layers.reduce_sum


if __name__ == "__main__":
unittest.main()

0 comments on commit 564aab9

Please sign in to comment.