Skip to content

Commit

Permalink
Some code cleanup.
Browse files Browse the repository at this point in the history
  • Loading branch information
maxnick authored and mandrono committed Jul 14, 2021
1 parent 46e8c85 commit d85a983
Show file tree
Hide file tree
Showing 8 changed files with 11 additions and 369 deletions.
1 change: 0 additions & 1 deletion inference-engine/src/mkldnn_plugin/cpu_shape.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ class Shape {
}

bool operator == (const Shape& rhs) const {
// TODO [DS]: Shouldn't we check dims as well?
return minDims == rhs.minDims && maxDims == rhs.maxDims;
}

Expand Down
1 change: 1 addition & 0 deletions inference-engine/src/mkldnn_plugin/mkldnn_edge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,7 @@ void MKLDNNEdge::allocate(const void* mem_ptr) {
if (!inputDesc.isDefined() || !outputDesc.isDefined() || !inputDesc.isCompatible(outputDesc))
IE_THROW() << "Cannot allocate memory. Nodes have primitive descriptors with different formats.";

//TODO [DS]: code cleanup
// if (!MKLDNNExtensionUtils::initTensorsAreEqual(outputDesc, inputDesc) ||
// (inputDesc.getDims().size() > 0 && inputDesc.getDims()[0] != 1 &&
// (inputDesc.getPrecision() != outputDesc.getPrecision() ||
Expand Down
71 changes: 0 additions & 71 deletions inference-engine/src/mkldnn_plugin/mkldnn_extension_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,77 +122,6 @@ bool MKLDNNExtensionUtils::initTensorsAreEqual(const InferenceEngine::TensorDesc
in1Block.getOffsetPadding() != uninitNum && in2Block.getOffsetPadding() != uninitNum);
}

PartialBlkDesc PartialBlkDesc::makePlain(const std::vector<size_t> &dims) {
PartialBlkDesc res;
res.outer_order.resize(dims.size());
std::iota(res.outer_order.begin(), res.outer_order.end(), 0);
return res;
}

PartialBlkDesc PartialBlkDesc::makeCBlocked(const std::vector<size_t> &dims, size_t block_size) {
PartialBlkDesc res;
res.outer_order.resize(dims.size());
std::iota(res.outer_order.begin(), res.outer_order.end(), 0);
res.inner_blk_size = {block_size};
res.inner_blk_idxes = {1};
return res;
}

PartialBlkDesc PartialBlkDesc::makeTailC(const InferenceEngine::SizeVector &dims) {
PartialBlkDesc res = makePlain(dims);
if (dims.size() > 2) {
auto itr = res.outer_order.begin() + 1;
std::rotate(itr, itr + 1, res.outer_order.end());
}
return res;
}

PartialBlkDesc PartialBlkDesc::extractFrom(const BlockedMemoryDesc &desc) {
const auto &dims = desc.getShape().getStaticDims();
const auto &blk_dims = desc.getBlockDims();
const auto &blk_order = desc.getOrder();

PartialBlkDesc res;
res.outer_order = {blk_order.begin(), blk_order.begin() + dims.size()};
res.inner_blk_idxes = {blk_order.begin() + dims.size(), blk_order.end()};
res.inner_blk_size = {blk_dims.begin() + dims.size(), blk_dims.end()};

return res;
}

bool PartialBlkDesc::isAutoExtendedWith(const std::vector<size_t> &dims) const {
auto tmp_dims = dims;
for (int i = 0; i < inner_blk_size.size(); i++) {
auto idx = inner_blk_idxes[i];
auto blk = inner_blk_size[i];
if (tmp_dims[idx] % blk == 0)
tmp_dims[idx] /= blk;
else
return true;
}
return false;
}

bool PartialBlkDesc::operator == (const PartialBlkDesc& it) const {
return std::tie(this->inner_blk_idxes,
this->inner_blk_size,
this->outer_order) ==
std::tie(it.inner_blk_idxes,
it.inner_blk_size,
it.outer_order);
}

// Lexicographical compare of content
bool PartialBlkDesc::operator < (const PartialBlkDesc& it) const {
return std::tie(this->inner_blk_idxes,
this->inner_blk_size,
this->outer_order) <
std::tie(it.inner_blk_idxes,
it.inner_blk_size,
it.outer_order);
}

// TODO [DS]: Move into InsertReorder();
std::string MKLDNNExtensionUtils::getReorderArgs(const MemoryDesc &parentDesc, const MemoryDesc &childDesc) {
std::string inArgs, outArgs;
if (parentDesc.getPrecision() != childDesc.getPrecision()) {
Expand Down
66 changes: 6 additions & 60 deletions inference-engine/src/mkldnn_plugin/mkldnn_extension_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,73 +17,19 @@

namespace MKLDNNPlugin {


/**
* Partial tensor descriptor
*
* Represent a classes of layout. As example Plain, TailC, CBlocked and other.
*
* The tensor are in one layout family if they have same PartialBlkDesc.
*
* Any tensor will have same PartialBlkDesc as it subview tensor.
*
* PartialBlkDesc plus Dims allow to reconstruct real tensorDesc (dense representation).
*/
class PartialBlkDesc {
public:
/**
* Check if this partial blocking desc will lead to additional zero padding
* for real tensor with provided dims
*
* Example: dims [2, 3, 8, 8] with blocking by 16 for second dim. Will lead
* to effective dims [2, 16, 8, 8] with zeroing all values
* [:, 3:16, :, :]
*
* @param dims to check on zero auto padding
* @return true if provided dims will use auto padding. Otherwise false.
*/
bool isAutoExtendedWith(const std::vector<size_t> &dims) const;

/**
* Construct PartialBlkDesc from provided TensorDesc
*
* PartialBlkDesc has less expressiveness power so some information from TensorDesc will be dropped.
* The different TensorDesc object will has equal PartialBlkDesc.
*
* @param desc to extract PartialBlkDesc information about kind of layout
* @return PartialBlkDesc object corresponds layout described in desc
*/
static PartialBlkDesc extractFrom(const BlockedMemoryDesc &desc);

/** Construct plain PartialBlkDesc based on dims information */
static PartialBlkDesc makePlain(const std::vector<size_t> &dims);

/** Construct blocked Channel PartialBlkDesc based on dims information */
static PartialBlkDesc makeCBlocked(const std::vector<size_t> &dims, size_t block_size);

/** Construct per Channel PartialBlkDesc based on dims information */
static PartialBlkDesc makeTailC(const InferenceEngine::SizeVector &dims);

/** Compare operators. Allow to use it as key for std::map */
bool operator == (const PartialBlkDesc& it) const;
bool operator < (const PartialBlkDesc& it) const;

private:
PartialBlkDesc() = default;
std::vector<size_t> outer_order;
std::vector<size_t> inner_blk_size;
std::vector<size_t> inner_blk_idxes;
};

class MKLDNNExtensionUtils {
public:
static uint8_t sizeOfDataType(mkldnn::memory::data_type dataType);
static mkldnn::memory::data_type IEPrecisionToDataType(const InferenceEngine::Precision& prec);
static InferenceEngine::Precision DataTypeToIEPrecision(mkldnn::memory::data_type dataType);
static InferenceEngine::TensorDesc getUninitTensorDesc(const InferenceEngine::TensorDesc& desc);
static bool initTensorsAreEqual(const InferenceEngine::TensorDesc &desc1, const InferenceEngine::TensorDesc &desc2);
static std::string getReorderArgs(const MemoryDesc &parentDesc, const MemoryDesc &childDesc);

//TODO : move to common utils
static InferenceEngine::Precision getMaxPrecision(std::vector<InferenceEngine::Precision> precisions);

// TODO [DS]: remove
static InferenceEngine::TensorDesc getUninitTensorDesc(const InferenceEngine::TensorDesc& desc);
static bool initTensorsAreEqual(const InferenceEngine::TensorDesc &desc1, const InferenceEngine::TensorDesc &desc2);
};

} // namespace MKLDNNPlugin
Loading

0 comments on commit d85a983

Please sign in to comment.