Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CPU] StringMemory for the output tensor in the InferRequest #21746

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions src/plugins/intel_cpu/src/cpu_memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ Memory::Memory(const dnnl::engine& eng, MemoryDescPtr desc, const void* data, bo
m_pMemDesc(desc),
m_mgrHandle(std::make_shared<DnnlMemoryMngr>(make_unique<MemoryMngrWithReuse>()), this),
dnnlMemHandle(this) {
if (desc->getPrecision() == element::string) {
OPENVINO_THROW("[CPU] Memory object cannot be created for string data.");
}
create(m_pMemDesc, data, pads_zeroing);
}

Expand All @@ -68,6 +71,9 @@ Memory::Memory(const dnnl::engine& eng, const MemoryDesc& desc, const void* data

Memory::Memory(const dnnl::engine& eng, MemoryDescPtr desc, MemoryMngrPtr mngr) :
m_eng(eng), m_pMemDesc(desc), m_mgrHandle(mngr, this), dnnlMemHandle(this) {
if (desc->getPrecision() == element::string) {
OPENVINO_THROW("[CPU] Memory object can't be created for string data.");
}
bool memAllocated = m_mgrHandle->getRawPtr();

create(desc, nullptr, !memAllocated);
Expand Down Expand Up @@ -105,6 +111,9 @@ void Memory::create(MemoryDescPtr desc, const void* data, bool pads_zeroing) {
}

void Memory::load(const IMemory& src, bool ftz) const {
if (src.getDesc().getPrecision() == element::string) {
OPENVINO_THROW("[CPU] Memory object cannot load string data.");
}
transferData(src, *this, ftz);
}

Expand All @@ -115,6 +124,9 @@ void Memory::nullify() {
}

void Memory::redefineDesc(MemoryDescPtr desc) {
if (desc->getPrecision() == element::string) {
OPENVINO_THROW("[CPU] Memory object cannot accept a descriptor with a string type.");
}
if (!desc->hasDefinedMaxSize()) {
OPENVINO_THROW("Can not reset descriptor, memory upper bound is unknown.");
}
Expand Down Expand Up @@ -445,6 +457,9 @@ void DnnlMemoryMngr::notifyUpdate() {

StaticMemory::StaticMemory(const dnnl::engine& eng, MemoryDescPtr desc, const void* data, bool pads_zeroing) :
m_eng(eng), m_pMemDesc(desc) {
if (desc->getPrecision() == element::string) {
OPENVINO_THROW("[CPU] StaticMemory object cannot be created for string data.");
}
if (!m_pMemDesc->isDefined()) {
OPENVINO_THROW("Can not create StaticMemory object. The memory desc is undefined");
}
Expand Down Expand Up @@ -511,6 +526,9 @@ void StaticMemory::redefineDesc(MemoryDescPtr desc) {
}

void StaticMemory::load(const IMemory& src, bool ftz) const {
if (src.getDesc().getPrecision() == element::string) {
OPENVINO_THROW("[CPU] StaticMemory cannot load string data.");
}
transferData(src, *this, ftz);
}

Expand Down
57 changes: 36 additions & 21 deletions src/plugins/intel_cpu/src/infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,14 +175,15 @@ std::vector<ov::ProfilingInfo> SyncInferRequest::get_profiling_info() const {
}

static inline void change_edge_ptr(const EdgePtr& edge, ov::SoPtr<ov::ITensor>& tensor) {
auto& mem = edge->getMemory();
auto mem = edge->getMemoryPtr();
OPENVINO_ASSERT(mem != nullptr, "Edge with name '", edge->name(), "' doesn't have allocated memory object.");

if (tensor->get_element_type() == element::string) {
auto memMngr = dynamic_cast<const StringMemory &>(mem).getStringMemoryMngrPtr();
auto memMngr = dynamic_cast<StringMemory *>(mem.get())->getStringMemoryMngrPtr();
OPENVINO_ASSERT(memMngr);
memMngr->setExtBuff(tensor->data<OvString>(), tensor->get_size());
memMngr->setExtBuff(tensor->data<StringMemory::OvString>(), tensor->get_size());
} else {
auto memMngr = mem.getMemoryMngr();
auto memMngr = mem->getMemoryMngr();
OPENVINO_ASSERT(memMngr);
memMngr->setExtBuff(tensor->data(), tensor->get_byte_size());
}
Expand Down Expand Up @@ -546,26 +547,40 @@ void SyncInferRequest::init_tensor(const std::string& name) {

if (!tensor) {
ov::Shape tensor_shape;
const auto model_prec = port.get_element_type();
if (isDynamic) {
const auto model_prec = port.get_element_type();
const auto graph_prec =
output->second->getParentEdgesAtPort(0)[0]->getMemory().getDesc().getPrecision();
OutputControlBlock control_block{model_prec, Shape{shape}};

DEBUG_LOG(name,
", tensor ",
control_block.tensor(),
", memmngr ",
control_block.tensor()->get_memory()->getMemoryMngr(),
"memory object ",
control_block.tensor()->get_memory().get());

tensor = control_block.tensor();
if (model_prec == graph_prec)
m_outputControlBlocks.emplace(std::make_pair(name, std::move(control_block)));
if (model_prec == element::string) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like we need to incapsulate this logic into OutputControlBlock ctor to have unified code here.
Anyway since current solution solves the issue we can refactor in follow-up PR

VectorDims memDims;
auto c_shape = Shape{shape};
for (auto&& dim : c_shape.getDims()) {
memDims.push_back(dim != Shape::UNDEFINED_DIM ? dim : 0);
}

dnnl::engine eng(dnnl::engine::kind::cpu, 0);
CpuBlockedMemoryDescPtr desc = std::make_shared<CpuBlockedMemoryDesc>(model_prec, Shape{memDims});
auto memory = std::make_shared<StringMemory>(eng, desc);

tensor = std::make_shared<Tensor>(memory);
} else {
const auto graph_prec =
output->second->getParentEdgesAtPort(0)[0]->getMemory().getDesc().getPrecision();
OutputControlBlock control_block{model_prec, Shape{shape}};

DEBUG_LOG(name,
", tensor ",
control_block.tensor(),
", memmngr ",
control_block.tensor()->get_memory()->getMemoryMngr(),
"memory object ",
control_block.tensor()->get_memory().get());

tensor = control_block.tensor();
if (model_prec == graph_prec)
m_outputControlBlocks.emplace(std::make_pair(name, std::move(control_block)));
}
} else {
tensor_shape = shape.to_shape();
tensor = ov::make_tensor(port.get_element_type(), tensor_shape);
tensor = ov::make_tensor(model_prec, tensor_shape);
}
ov::ISyncInferRequest::set_tensor(port, tensor);
} else {
Expand Down
Loading