Skip to content

Commit

Permalink
Cleanup IE and ngraph
Browse files Browse the repository at this point in the history
    1. InferenceEngine::SizeVector
    2. InferenceEngine::parallel_for
    3. all ngraph namespace except ngraph::op
  • Loading branch information
riverlijunjie committed Nov 1, 2023
1 parent 51cf5fa commit f5fd042
Show file tree
Hide file tree
Showing 228 changed files with 1,158 additions and 1,159 deletions.
1 change: 0 additions & 1 deletion src/plugins/intel_cpu/src/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
m_name{model->get_name()},
m_loaded_from_cache(loaded_from_cache) {
bool isFloatModel = !ov::op::util::has_op_with_type<ngraph::op::FakeQuantize>(m_model);

m_mutex = std::make_shared<std::mutex>();
const auto& core = m_plugin->get_core();
if (!core)
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ using namespace std;
[this](const ov::snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr<snippets::Emitter> { \
return std::make_shared<e_type>(h.get(), isa, expr); \
}, \
[](const std::shared_ptr<ngraph::Node>& n) -> std::set<std::vector<element::Type>> { \
[](const std::shared_ptr<ov::Node>& n) -> std::set<std::vector<element::Type>> { \
return e_type::get_supported_precisions(n); \
} \
};
Expand All @@ -40,7 +40,7 @@ using namespace std;
[this](const ov::snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr<snippets::Emitter> { \
return std::make_shared<e_type>(h.get(), isa, expr->get_node()); \
}, \
[](const std::shared_ptr<ngraph::Node>& n) -> std::set<std::vector<element::Type>> { \
[](const std::shared_ptr<ov::Node>& n) -> std::set<std::vector<element::Type>> { \
return e_type::get_supported_precisions(n); \
} \
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ using namespace Xbyak;
namespace ov {
namespace intel_cpu {

jit_convert_emitter::jit_convert_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& node, Precision exec_prc)
jit_convert_emitter::jit_convert_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& node, Precision exec_prc)
: jit_emitter(host, host_isa, exec_prc) {
input_type = node->get_input_element_type(0);
output_type = node->get_output_element_type(0);
Expand Down Expand Up @@ -58,7 +58,7 @@ void jit_convert_emitter::float2bfloat(const std::vector<size_t> &in_vec_idxs, c
}

jit_convert_truncation_emitter::jit_convert_truncation_emitter(jit_generator *host, cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& node, Precision exec_prc)
const std::shared_ptr<ov::Node>& node, Precision exec_prc)
: jit_convert_emitter(host, host_isa, node, exec_prc) {
prepare_table();
}
Expand Down Expand Up @@ -193,7 +193,7 @@ void jit_convert_truncation_emitter::dword2int8(const std::vector<size_t> &in_ve
}

jit_convert_saturation_emitter::jit_convert_saturation_emitter(jit_generator *host, cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& node, Precision exec_prc)
const std::shared_ptr<ov::Node>& node, Precision exec_prc)
: jit_convert_emitter(host, host_isa, node, exec_prc) {
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ namespace intel_cpu {
class jit_convert_emitter : public jit_emitter {
public:
jit_convert_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
const std::shared_ptr<ov::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);

size_t get_inputs_num() const override;

Expand Down Expand Up @@ -47,7 +47,7 @@ class jit_convert_emitter : public jit_emitter {
class jit_convert_truncation_emitter : public jit_convert_emitter {
public:
jit_convert_truncation_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
const std::shared_ptr<ov::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);

private:
void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override;
Expand All @@ -68,7 +68,7 @@ class jit_convert_truncation_emitter : public jit_convert_emitter {
class jit_convert_saturation_emitter : public jit_convert_emitter {
public:
jit_convert_saturation_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
const std::shared_ptr<ov::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);

private:
void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override;
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/emitters/x64/jit_dnnl_emitters.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ using namespace Xbyak;
namespace ov {
namespace intel_cpu {

std::set<std::vector<element::Type>> jit_dnnl_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
std::set<std::vector<element::Type>> jit_dnnl_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}};
}

jit_dnnl_emitter::jit_dnnl_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& node, InferenceEngine::Precision exec_prc)
jit_dnnl_emitter::jit_dnnl_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& node, InferenceEngine::Precision exec_prc)
: jit_emitter(host, host_isa, exec_prc) {

kind = dnnl_eltwise_tanh;
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/emitters/x64/jit_dnnl_emitters.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@ class jit_dnnl_emitter : public jit_emitter {

void emit_impl(const std::vector<size_t> &in_idxs, const std::vector<size_t> &out_idxs) const override {};

static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);

protected:
jit_dnnl_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
dnnl_alg_kind_t algKind, float inpAlpha, float inpBeta,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
jit_dnnl_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_dnnl_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
void set_injector();

Expand Down
30 changes: 15 additions & 15 deletions src/plugins/intel_cpu/src/emitters/x64/jit_dnnl_ext_emitters.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ namespace intel_cpu {

class jit_relu_emitter : public jit_dnnl_emitter {
public:
jit_relu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_relu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_relu;
Expand All @@ -26,7 +26,7 @@ class jit_relu_emitter : public jit_dnnl_emitter {

class jit_sigmoid_emitter : public jit_dnnl_emitter {
public:
jit_sigmoid_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_sigmoid_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_logistic;
Expand All @@ -39,7 +39,7 @@ class jit_sigmoid_emitter : public jit_dnnl_emitter {

class jit_tanh_emitter : public jit_dnnl_emitter {
public:
jit_tanh_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_tanh_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_tanh;
Expand All @@ -52,11 +52,11 @@ class jit_tanh_emitter : public jit_dnnl_emitter {

class jit_elu_emitter : public jit_dnnl_emitter {
public:
jit_elu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_elu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_elu;
alpha = ngraph::as_type_ptr<ov::op::v0::Elu>(n)->get_alpha();
alpha = ov::as_type_ptr<ov::op::v0::Elu>(n)->get_alpha();
beta = 0.f;

set_injector();
Expand All @@ -65,7 +65,7 @@ class jit_elu_emitter : public jit_dnnl_emitter {

class jit_exp_emitter : public jit_dnnl_emitter {
public:
jit_exp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_exp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_exp;
Expand All @@ -78,7 +78,7 @@ class jit_exp_emitter : public jit_dnnl_emitter {

class jit_abs_emitter : public jit_dnnl_emitter {
public:
jit_abs_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_abs_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_abs;
Expand All @@ -91,11 +91,11 @@ class jit_abs_emitter : public jit_dnnl_emitter {

class jit_clamp_emitter : public jit_dnnl_emitter {
public:
jit_clamp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_clamp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_clip;
auto op = ngraph::as_type_ptr<ov::op::v0::Clamp>(n);
auto op = ov::as_type_ptr<ov::op::v0::Clamp>(n);
alpha = op->get_min();
beta = op->get_max();

Expand All @@ -105,11 +105,11 @@ class jit_clamp_emitter : public jit_dnnl_emitter {

class jit_swish_emitter : public jit_dnnl_emitter {
public:
jit_swish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_swish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_swish;
auto op = ngraph::as_type_ptr<ov::intel_cpu::SwishNode>(n);
auto op = ov::as_type_ptr<ov::intel_cpu::SwishNode>(n);
alpha = op->get_alpha();
beta = 0.f;

Expand All @@ -119,7 +119,7 @@ class jit_swish_emitter : public jit_dnnl_emitter {

class jit_hswish_emitter : public jit_dnnl_emitter {
public:
jit_hswish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_hswish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
// since v3.0 oneDNN has flexible version of hardswish, ov still uses the one with hardcoded alpha and beta
Expand All @@ -133,7 +133,7 @@ class jit_hswish_emitter : public jit_dnnl_emitter {

class jit_gelu_v0_emitter : public jit_dnnl_emitter {
public:
jit_gelu_v0_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_gelu_v0_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_gelu_erf;
Expand All @@ -144,7 +144,7 @@ class jit_gelu_v0_emitter : public jit_dnnl_emitter {

class jit_gelu_v7_emitter : public jit_dnnl_emitter {
public:
jit_gelu_v7_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
jit_gelu_v7_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
auto gelu = getNgraphOpAs<ngraph::op::v7::Gelu>(n);
Expand All @@ -167,7 +167,7 @@ class jit_round_emitter : public jit_dnnl_emitter {
jit_round_emitter(
dnnl::impl::cpu::x64::jit_generator *host,
dnnl::impl::cpu::x64::cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& n,
const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
const auto round = getNgraphOpAs<ngraph::op::v5::Round>(n);
const auto mode = round->get_mode();
Expand Down
Loading

0 comments on commit f5fd042

Please sign in to comment.