diff --git a/src/core/src/runtime/itensor.cpp b/src/core/src/runtime/itensor.cpp index 59d6d283ac3526..6d966566c65610 100644 --- a/src/core/src/runtime/itensor.cpp +++ b/src/core/src/runtime/itensor.cpp @@ -28,7 +28,6 @@ bool ITensor::is_continuous() const { if (get_element_type().bitwidth() < 8) // OpenVINO doesn't support strides for lp types return true; - const auto& shape = get_shape(); const auto& type = get_element_type(); std::vector strides(shape.size()); diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index bfbbc1cee8f2e7..1ca901f4b07117 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -1206,27 +1206,6 @@ bool Node::isFusedWith(Type fusedNodeType) const { return false; } -InferenceEngine::Layout Node::getWeightsLayoutByDims(SizeVector dims, bool isGrouped) { - switch (dims.size()) { - case 0: - return InferenceEngine::Layout::SCALAR; - case 1: - return InferenceEngine::Layout::C; - case 2: - return InferenceEngine::Layout::NC; - case 3: - return InferenceEngine::Layout::CHW; - case 4: - return InferenceEngine::Layout::OIHW; - case 5: - return isGrouped ? InferenceEngine::Layout::GOIHW : InferenceEngine::Layout::OIDHW; - case 6: - return isGrouped ? InferenceEngine::Layout::GOIDHW : InferenceEngine::Layout::BLOCKED; - default: - return InferenceEngine::Layout::BLOCKED; - } -} - dnnl::memory::format_tag Node::getWeightsFormatTagByDims(const SizeVector& dims) const { switch (dims.size()) { case 1: diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index c2b575052759c3..25250284d6831a 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -630,7 +630,6 @@ class Node { virtual std::vector getAvailableFormatsForDims(const Shape& dims) const; - InferenceEngine::Layout getWeightsLayoutByDims(InferenceEngine::SizeVector dims, bool isGrouped); dnnl::memory::format_tag getWeightsFormatTagByDims(const InferenceEngine::SizeVector& dims) const; /** diff --git a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp index 0feec97c849245..e3ac5168fef529 100644 --- a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp @@ -105,10 +105,6 @@ void AdaptivePooling::initSupportedPrimitiveDescriptors() { // we supports only fp32 currently precision = Precision::FP32; - InferenceEngine::LayerConfig config; - config.inConfs.resize(2); - config.outConfs.resize((algorithm == Algorithm::AdaptivePoolingAvg ? 1 : 2)); - std::vector dataFormats{ LayoutType::ncsp }; const auto &inDims = getInputShapeAtPort(0).getDims(); if (inDims[1] != Shape::UNDEFINED_DIM && inDims[1] != 1) { diff --git a/src/plugins/intel_cpu/src/nodes/proposal.cpp b/src/plugins/intel_cpu/src/nodes/proposal.cpp index c0c77977461e68..92c29f6ae562c2 100644 --- a/src/plugins/intel_cpu/src/nodes/proposal.cpp +++ b/src/plugins/intel_cpu/src/nodes/proposal.cpp @@ -191,7 +191,7 @@ void Proposal::execute(dnnl::stream strm) { InferenceEngine::Extensions::Cpu::XARCH::proposal_exec(probabilitiesData, anchorsData, inProbDims, {imgHeight, imgWidth, scaleHeight, scaleWidth}, anchors.data(), roi_indices.data(), outRoiData, outProbData, conf); - } catch (const InferenceEngine::Exception& e) { + } catch (const ov::Exception& e) { std::string errorMsg = e.what(); IE_THROW() << errorMsg; } diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index 530c7fad0bcdb3..966411d9507a7d 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -853,7 +853,7 @@ ov::SupportedOpsMap Engine::query_model(const std::shared_ptr& std::unique_ptr ptr; try { ptr.reset(Node::factory().create(op, context)); - } catch (const InferenceEngine::Exception&) { + } catch (const ov::Exception&) { return false; } return true;