Skip to content

Commit

Permalink
Reorder data algorithm moved to a separate static method.
Browse files Browse the repository at this point in the history
  • Loading branch information
maxnick committed Jan 19, 2021
1 parent 4ae9737 commit dcdff34
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 39 deletions.
92 changes: 53 additions & 39 deletions inference-engine/src/mkldnn_plugin/mkldnn_memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,63 +100,77 @@ void MKLDNNMemory::Create(const mkldnn::memory::desc& desc, const void *data, bo
}
}

void MKLDNNMemory::reorderData(const MKLDNNMemory &input, const MKLDNNMemory &output) {
if (input.GetPrimitiveDescriptor() == output.GetPrimitiveDescriptor()) {
auto srcPtr = static_cast<uint8_t*>(input.GetData()) + input.GetDescriptor().data.layout_desc.blocking.offset_padding *
MKLDNNExtensionUtils::sizeOfDataType(input.GetDataType());
auto dstPtr = static_cast<uint8_t*>(output.GetData()) + output.GetDescriptor().data.layout_desc.blocking.offset_padding *
MKLDNNExtensionUtils::sizeOfDataType(output.GetDataType());

auto size = input.GetSize();
cpu_memcpy(dstPtr, srcPtr, size);
} else {
std::unique_ptr<mkldnn::reorder> pReorder;

try {
pReorder = std::unique_ptr<mkldnn::reorder>(new mkldnn::reorder(input.GetPrimitive(), output.GetPrimitive()));
}
catch (const mkldnn::error& err) {
if (mkldnn_unimplemented == err.status && output.GetDataType() != input.GetDataType()) {
//we probably could not make the reorder because there is no one supporting this precision conversion
//lets try to convert data first using cpu_convert
std::vector<uint8_t> tmpBuff(input.GetSize());
auto data = static_cast<const uint8_t*>(input.GetData()) + input.GetDescriptor().data.layout_desc.blocking.offset_padding *
MKLDNNExtensionUtils::sizeOfDataType(input.GetDataType());

cpu_convert(data, tmpBuff.data(), MKLDNNExtensionUtils::DataTypeToIEPrecision(input.GetDataType()),
MKLDNNExtensionUtils::DataTypeToIEPrecision(output.GetDataType()), input.GetElementsCount());

MKLDNNMemory src(output.eng);
src.Create(input.GetDims(), input.GetDataType(), input.GetFormat(), data);

pReorder = std::unique_ptr<mkldnn::reorder>(new mkldnn::reorder(src.GetPrimitive(), output.GetPrimitive()));
} else {
throw;
}
}
if (pReorder) {
mkldnn::stream(stream::kind::eager).submit({*pReorder});
} else {
THROW_IE_EXCEPTION << "Could not make mkldnn reorder.";
}
}
}

void MKLDNNMemory::SetData(memory::data_type dataType, memory::format format, const void* data, size_t size, bool ftz) const {
if (GetDataType() == dataType && static_cast<mkldnn_memory_format_t>(format) == GetDescriptor().data.format) {
uint8_t itemSize = MKLDNNExtensionUtils::sizeOfDataType(mkldnn::memory::data_type(dataType));
uint8_t* dataPtr = static_cast<uint8_t*>(GetData());
// We cannot support strides for i/o blobs because it affects performance.
dataPtr += itemSize * prim->get_primitive_desc().desc().data.layout_desc.blocking.offset_padding;
cpu_memcpy(dataPtr, data, size);

if (ftz
&& dataType == mkldnn::memory::f32
&& GetFormat() != mkldnn::memory::wino_fmt
&& GetDataType() != mkldnn::memory::bf16) {
// Internal blobs haven't strides yet.
auto *memData = static_cast<float *>(GetData());
memData += prim->get_primitive_desc().desc().data.layout_desc.blocking.offset_padding;
setSubnormalsToZero(memData, GetSize() / sizeof(float));
}
} else {
auto memData = this->GetDescriptor().data;
std::vector<ptrdiff_t> dims(memData.dims, memData.dims + memData.ndims);

MKLDNNMemory src(this->eng);
src.Create(dims, dataType, format, data);

this->SetData(src, ftz);
reorderData(src, *this);
}
if (ftz
&& dataType == mkldnn::memory::f32
&& GetFormat() != mkldnn::memory::wino_fmt
&& GetDataType() != mkldnn::memory::bf16) {
// Internal blobs haven't strides yet.
auto *memData = static_cast<float *>(GetData());
memData += prim->get_primitive_desc().desc().data.layout_desc.blocking.offset_padding;
setSubnormalsToZero(memData, GetSize() / sizeof(float));
}
}

void MKLDNNMemory::SetData(const MKLDNNMemory& memory, bool ftz) const {
std::unique_ptr<mkldnn::reorder> pReorder;

try {
pReorder = std::unique_ptr<mkldnn::reorder>(new mkldnn::reorder(memory.GetPrimitive(), this->GetPrimitive()));
}
catch (const mkldnn::error& err) {
if (mkldnn_unimplemented == err.status && this->GetDataType() != memory.GetDataType()) {
//we probably could not make the reorder because there is no one supporting this precision conversion
//lets try to convert data first using cpu_convert
std::vector<uint8_t> tmpBuff(memory.GetSize());
auto data = static_cast<const uint8_t*>(memory.GetData()) + memory.GetDescriptor().data.layout_desc.blocking.offset_padding;

cpu_convert(data, tmpBuff.data(), MKLDNNExtensionUtils::DataTypeToIEPrecision(memory.GetDataType()),
MKLDNNExtensionUtils::DataTypeToIEPrecision(this->GetDataType()), memory.GetElementsCount());

MKLDNNMemory src(this->eng);
src.Create(memory.GetDims(), memory.GetDataType(), memory.GetFormat(), data);

pReorder = std::unique_ptr<mkldnn::reorder>(new mkldnn::reorder(src.GetPrimitive(), this->GetPrimitive()));
} else {
throw;
}
}
if (pReorder) {
mkldnn::stream(stream::kind::eager).submit({*pReorder});
} else {
THROW_IE_EXCEPTION << "Could not make mkldnn reorder.";
}
reorderData(memory, *this);

if (ftz
&& memory.GetDataType() == mkldnn::memory::f32
Expand Down
1 change: 1 addition & 0 deletions inference-engine/src/mkldnn_plugin/mkldnn_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ class MKLDNNMemory {
static std::string formatToString(mkldnn::memory::format fmt);

static void CreateBlockingDesc(mkldnn::memory::desc& desc);
static void reorderData(const MKLDNNMemory& input, const MKLDNNMemory& output);

private:
std::shared_ptr<mkldnn::memory> prim;
Expand Down

0 comments on commit dcdff34

Please sign in to comment.