Skip to content

Commit

Permalink
Merge pull request #18 from mvafin/pdpd/assert_rename
Browse files Browse the repository at this point in the history
Rename MY_ASSERT
  • Loading branch information
nosovmik authored Apr 20, 2021
2 parents 3da2b6a + dc28c3b commit 45ae3ce
Show file tree
Hide file tree
Showing 8 changed files with 16 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
namespace ngraph {
namespace frontend {

inline void MY_ASSERT(bool ex, const std::string& msg = "Unspecified error.") {
inline void PDPD_ASSERT(bool ex, const std::string& msg = "Unspecified error.") {
if (!ex) throw std::runtime_error(msg);
}

Expand Down
4 changes: 2 additions & 2 deletions ngraph/frontend/paddlepaddle/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ std::shared_ptr<ngraph::Node> make_ng_node(std::map<std::string, Output<Node>>&
const auto& op = op_place->getDesc();
std::cout << "Making node: " << op->type() << std::endl;

MY_ASSERT(CREATORS_MAP.find(op->type()) != CREATORS_MAP.end(), "No creator found");
PDPD_ASSERT(CREATORS_MAP.find(op->type()) != CREATORS_MAP.end(), "No creator found");
std::map<std::string, OutputVector> named_inputs;
const auto& input_ports = op_place->getInputPorts();
for (const auto& name_to_ports : input_ports) {
Expand Down Expand Up @@ -82,7 +82,7 @@ std::shared_ptr<Constant> FrontEndPDPD::read_tensor(const std::shared_ptr<Tensor
{
const auto& var_desc = tensor_place->getDesc();
std::cout << "Reading tensor " << var_desc->name() << std::endl;
MY_ASSERT(var_desc->type().type() == paddle::framework::proto::VarType::LOD_TENSOR);
PDPD_ASSERT(var_desc->type().type() == paddle::framework::proto::VarType::LOD_TENSOR);
const auto& tensor = var_desc->type().lod_tensor().tensor();
const auto& tensor_length = std::accumulate(
tensor.dims().cbegin(), tensor.dims().cend(), 1, std::multiplies<int64_t>());
Expand Down
2 changes: 1 addition & 1 deletion ngraph/frontend/paddlepaddle/src/node_context.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class NodeContext
/// Returns exactly one input with a given name; throws if there is no inputs or there are more than one input
Output<Node> get_ng_input (const std::string& name) const
{
MY_ASSERT(name_map.at(name).size() == 1);
PDPD_ASSERT(name_map.at(name).size() == 1);
return name_map.at(name).at(0);
}

Expand Down
4 changes: 2 additions & 2 deletions ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ OutputVector elementwise_ops (const NodeContext& node) {

auto axis = node.get_attribute<int>("axis");

MY_ASSERT(x.get_partial_shape().rank().is_static(), "elementwise_ops: X rank must be static!");
MY_ASSERT(y.get_partial_shape().rank().is_static()), "elementwise_ops: Y rank must be static!";
PDPD_ASSERT(x.get_partial_shape().rank().is_static(), "elementwise_ops: X rank must be static!");
PDPD_ASSERT(y.get_partial_shape().rank().is_static()), "elementwise_ops: Y rank must be static!";
int64_t x_rank = x.get_partial_shape().rank().get_length();
int64_t y_rank = y.get_partial_shape().rank().get_length();

Expand Down
4 changes: 2 additions & 2 deletions ngraph/frontend/paddlepaddle/src/op/mul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ namespace op {
OutputVector mul (const NodeContext& node) {
auto x = node.get_ng_input("X");
auto y = node.get_ng_input("Y");
MY_ASSERT(x.get_partial_shape().rank().is_static(), "matmul: X rank must be static!");
PDPD_ASSERT(x.get_partial_shape().rank().is_static(), "matmul: X rank must be static!");
int64_t x_rank = x.get_partial_shape().rank().get_length();
MY_ASSERT(y.get_partial_shape().rank().is_static() &&
PDPD_ASSERT(y.get_partial_shape().rank().is_static() &&
y.get_partial_shape().rank().get_length() == 2, "matmul: Y rank must be static, and 2!");
if (x_rank > 2) {
auto shape = std::make_shared<ngraph::opset6::ShapeOf>(x);
Expand Down
2 changes: 1 addition & 1 deletion ngraph/frontend/paddlepaddle/src/op/softmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace op {
auto axis = node.get_attribute<int32_t>("axis");
if (axis < 0)
{
MY_ASSERT(data.get_partial_shape().rank().is_static(), "Softmax rank must be static");
PDPD_ASSERT(data.get_partial_shape().rank().is_static(), "Softmax rank must be static");
auto data_rank = data.get_partial_shape().rank().get_length();
axis = data_rank + axis;
}
Expand Down
2 changes: 1 addition & 1 deletion ngraph/frontend/paddlepaddle/src/op/transpose2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ OutputVector transpose2 (const NodeContext& node) {
std::cout << perm.size() << std::endl;
std::cout << data.get_partial_shape().rank() << ":" << rank << std::endl;

MY_ASSERT(perm.size() == rank, "transpose2: axis size must equal to data rank!");
PDPD_ASSERT(perm.size() == rank, "transpose2: axis size must equal to data rank!");

auto input_order = ngraph::opset6::Constant::create(ngraph::element::i64, {rank}, perm);
return {std::make_shared<ngraph::opset6::Transpose>(data, input_order)};
Expand Down
12 changes: 6 additions & 6 deletions ngraph/frontend/paddlepaddle/src/place.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,37 +74,37 @@ std::vector<Place::Ptr> TensorPlacePDPD::getConsumingPorts() const {
if (const auto& locked = consuming_port.lock()) {
consuming_ports.push_back(locked);
} else {
MY_ASSERT(false, "Consuming Port has expired.");
PDPD_ASSERT(false, "Consuming Port has expired.");
}
}
return consuming_ports;
}

Place::Ptr TensorPlacePDPD::getProducingPort() const {
MY_ASSERT(m_producing_ports.size() > 1, "Only one producing port is supported.");
PDPD_ASSERT(m_producing_ports.size() > 1, "Only one producing port is supported.");
if (const auto& producing_port = m_producing_ports[0].lock()) {
return producing_port;
}
MY_ASSERT(false, "Producing Port has expired.");
PDPD_ASSERT(false, "Producing Port has expired.");
}

std::shared_ptr<TensorPlacePDPD> InPortPlacePDPD::getSourceTensorPDPD() const {
if (const auto& tensor = m_source_tensor.lock()) {
return tensor;
}
MY_ASSERT(false, "Source Tensor has expired.");
PDPD_ASSERT(false, "Source Tensor has expired.");
}

std::shared_ptr<OpPlacePDPD> InPortPlacePDPD::getOp() {
if (const auto& op = m_op.lock()) {
return op;
}
MY_ASSERT(false, "Operation has expired.");
PDPD_ASSERT(false, "Operation has expired.");
}

std::shared_ptr<TensorPlacePDPD> OutPortPlacePDPD::getTargetTensorPDPD() const {
if (const auto& target_tensor = m_target_tensor.lock()) {
return target_tensor;
}
MY_ASSERT(false, "Target Tensor has expired.");
PDPD_ASSERT(false, "Target Tensor has expired.");
}

0 comments on commit 45ae3ce

Please sign in to comment.