Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable Conditional Compilation for nGraph evaluate methods #3666

Merged
merged 9 commits into from
Dec 21, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ngraph/core/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ set_target_properties(ngraph PROPERTIES
C_VISIBILITY_PRESET hidden
VISIBILITY_INLINES_HIDDEN ON)

target_link_libraries(ngraph PRIVATE openvino::itt ngraph::builder ngraph::reference)
target_link_libraries(ngraph PRIVATE openvino::conditional_compilation openvino::itt ngraph::builder ngraph::reference)

find_package(Graphviz QUIET)
if (GRAPHVIZ_FOUND)
Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/include/ngraph/op/broadcast.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,10 @@ namespace ngraph
std::pair<bool, AxisSet> get_broadcast_axes() const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;

private:
bool broadcast_evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v3

Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/include/ngraph/op/depth_to_space.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,10 @@ namespace ngraph
std::size_t m_blocksize;
DepthToSpaceMode m_mode;
DepthToSpaceMode mode_from_string(const std::string& mode) const;

private:
bool evaluate_depth_to_space(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
using v0::DepthToSpace;
Expand Down
3 changes: 3 additions & 0 deletions ngraph/core/include/ngraph/op/gather.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,9 @@ namespace ngraph
static const int PARAMS;
static const int INDICES;
static const int AXIS;

bool evaluate_gather(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v1
} // namespace op
Expand Down
2 changes: 2 additions & 0 deletions ngraph/core/include/ngraph/op/interpolate.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,8 @@ namespace ngraph
std::vector<int64_t> get_axes() const;

private:
bool evaluate_interpolate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
InterpolateAttrs m_attrs;

/// \brief Corrects pads_begin and pads_end attributes.
Expand Down
2 changes: 2 additions & 0 deletions ngraph/core/include/ngraph/op/max_pool.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@ namespace ngraph
bool update_auto_padding(const PartialShape& in_shape,
Shape& new_pads_end,
Shape& new_pads_begin) const;
bool evaluate_maxpool(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v1
} // namespace op
Expand Down
2 changes: 2 additions & 0 deletions ngraph/core/include/ngraph/op/pad.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ namespace ngraph

private:
PadMode m_pad_mode;
bool evaluate_pad(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
}
Expand Down
2 changes: 2 additions & 0 deletions ngraph/core/include/ngraph/op/reshape.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ namespace ngraph

protected:
bool m_special_zero;
bool evaluate_reshape(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
}
Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/include/ngraph/op/reverse.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,10 @@ namespace ngraph
/// Alternatively it can contain a boolean mask that indicates which axes should be
/// reversed.
Mode m_mode;

private:
bool evaluate_reverse(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
}
Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/include/ngraph/op/scatter_elements_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,10 @@ namespace ngraph
clone_with_new_inputs(const OutputVector& inputs) const override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;

private:
bool evaluate_scatter_element_update(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
using v3::ScatterElementsUpdate;
Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/include/ngraph/op/scatter_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ namespace ngraph

bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;

private:
bool evaluate_scatter_update(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
}
Expand Down
2 changes: 2 additions & 0 deletions ngraph/core/include/ngraph/op/shuffle_channels.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ namespace ngraph
/// \param data_shape - Shape of the original input data tensor
/// \return A 4D tensor to be used to reshape the input data before shuffling it
Shape get_pre_shuffle_shape(const Shape& data_shape) const;
bool evaluate_shuffle_channels(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;

int64_t m_axis;
int64_t m_group;
Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/include/ngraph/op/space_to_batch.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,10 @@ namespace ngraph

bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;

private:
bool evaluate_space_to_batch(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
using v1::SpaceToBatch;
Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/include/ngraph/op/space_to_depth.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,10 @@ namespace ngraph
protected:
std::size_t m_blocksize;
SpaceToDepthMode m_mode;

private:
bool evaluate_space_to_depth(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
using v0::SpaceToDepth;
Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/include/ngraph/op/tile.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@ namespace ngraph

bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;

private:
bool evaluate_tile(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
}
}
Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/include/ngraph/op/topk.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,10 @@ namespace ngraph
const PartialShape input_partial_shape,
const int64_t k) const;
void set_axis(const Rank input_rank, const int64_t axis);

private:
bool evaluate_topk(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v1

Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/include/ngraph/op/variadic_split.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,10 @@ namespace ngraph
size_t get_default_output_index() const override { return no_default_index(); }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;

private:
bool evaluate_variadic_split(const HostTensorVector& outputs,
const HostTensorVector& inputs) const;
};
} // namespace v1

Expand Down
30 changes: 29 additions & 1 deletion ngraph/core/src/itt.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@

#pragma once

#include <openvino/cc/factory.h>
#include <openvino/cc/selective_build.h>
#include <openvino/itt.hpp>

namespace ngraph
Expand All @@ -31,7 +33,33 @@ namespace ngraph
{
OV_ITT_DOMAIN(nGraph);
OV_ITT_DOMAIN(nGraphPass_LT);
OV_ITT_DOMAIN(nGraphOp, "nGraph::Op");
OV_ITT_DOMAIN(ngraph_op, "nGraph::Op");
}
}
OV_CC_DOMAINS(ngraph_op);
}

#if defined(SELECTIVE_BUILD) || defined(SELECTIVE_BUILD_ANALYZER)
#define NGRAPH_OP_SCOPE(region, ...) OV_SCOPE(ngraph_op, region, __VA_ARGS__)
#else
#define NGRAPH_OP_SCOPE(region, ...) \
ilyachur marked this conversation as resolved.
Show resolved Hide resolved
OV_ITT_SCOPED_TASK(itt::domains::ngraph_op, #region); \
ilyachur marked this conversation as resolved.
Show resolved Hide resolved
__VA_ARGS__
#endif

#define NGRAPH_TYPE_CASE(region, a, ...) \
case element::Type_t::a: \
{ \
OV_SCOPE( \
ngraph_op, OV_CC_CAT3(region, _, a), rc = evaluate<element::Type_t::a>(__VA_ARGS__)); \
} \
break;

#define NGRAPH_COPY_TENSOR(region, a, ...) \
case element::Type_t::a: \
{ \
OV_SCOPE(ngraph_op, \
OV_CC_CAT3(region, _, a), \
rc = copy_tensor<element::Type_t::a>(__VA_ARGS__)); \
} \
break;
31 changes: 13 additions & 18 deletions ngraph/core/src/op/abs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,22 +57,14 @@ namespace absop

switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
TYPE_CASE(bf16)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_abs, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, f32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_abs, bf16, arg0, out, count);
default: rc = false; break;
}
return rc;
Expand All @@ -81,6 +73,9 @@ namespace absop

bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Abs::evaluate");
return absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0)));
bool rc = false;
NGRAPH_OP_SCOPE(
v0_Abs_evaluate,
rc = absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return rc;
}
28 changes: 12 additions & 16 deletions ngraph/core/src/op/acos.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,20 +66,13 @@ namespace acosop

switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, out, count);
break;
TYPE_CASE(i32)(arg0, out, count);
break;
TYPE_CASE(i64)(arg0, out, count);
break;
TYPE_CASE(u32)(arg0, out, count);
break;
TYPE_CASE(u64)(arg0, out, count);
break;
TYPE_CASE(f16)(arg0, out, count);
break;
TYPE_CASE(f32)(arg0, out, count);
break;
NGRAPH_TYPE_CASE(evaluate_acos, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, u32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, u64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, f16, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, f32, arg0, out, count);
default: rc = false; break;
}
return rc;
Expand All @@ -88,6 +81,9 @@ namespace acosop

bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::Acos::evaluate");
return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
bool rc = false;
NGRAPH_OP_SCOPE(
v0_Acos_evaluate,
rc = acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0))));
return rc;
}
23 changes: 9 additions & 14 deletions ngraph/core/src/op/acosh.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,18 +56,12 @@ namespace acoshop
out->set_unary(arg0);
switch (arg0->get_element_type())
{
TYPE_CASE(i32)(arg0, out);
break;
TYPE_CASE(i64)(arg0, out);
break;
TYPE_CASE(u32)(arg0, out);
break;
TYPE_CASE(u64)(arg0, out);
break;
TYPE_CASE(f16)(arg0, out);
break;
TYPE_CASE(f32)(arg0, out);
break;
NGRAPH_TYPE_CASE(evaluate_acosh, i32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, i64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, u32, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, u64, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, f16, arg0, out);
NGRAPH_TYPE_CASE(evaluate_acosh, f32, arg0, out);
default: rc = false; break;
}
return rc;
Expand All @@ -76,6 +70,7 @@ namespace acoshop

bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v3::Acosh::evaluate");
return acoshop::evaluate_acosh(inputs[0], outputs[0]);
bool rc = false;
NGRAPH_OP_SCOPE(v3_Acosh_evaluate, rc = acoshop::evaluate_acosh(inputs[0], outputs[0]));
return rc;
}
41 changes: 16 additions & 25 deletions ngraph/core/src/op/add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,28 +50,17 @@ namespace add
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(i8)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u8)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
NGRAPH_TYPE_CASE(evaluate_add, i8, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, i16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u8, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, bf16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_add, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
Expand Down Expand Up @@ -104,6 +93,8 @@ shared_ptr<Node> op::v1::Add::clone_with_new_inputs(const OutputVector& new_args

bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
{
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Add::evaluate");
return add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
}
bool rc = false;
NGRAPH_OP_SCOPE(v1_Add_evaluate,
rc = add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob()));
return rc;
}
Loading