diff --git a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp index e710f4576d7f76..4b8e8fa4f501b1 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp @@ -116,7 +116,8 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& const std::string shapeBufferName = SHAPE_TENSOR_PREFIX + inputName; const IONodeDescriptor& shapeDescriptor = _metadata.shapes.at(inputName); - check_level_zero_attributes_match(shapeDescriptor, executorInputDescriptors.at(shapeBufferName), + check_level_zero_attributes_match(shapeDescriptor, + executorInputDescriptors.at(shapeBufferName), shapeBufferName); allocate_tensor(inputName, shapeDescriptor, !STATE_TENSOR, allocator, SHAPE_TENSOR); } @@ -138,7 +139,8 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& const std::string shapeBufferName = SHAPE_TENSOR_PREFIX + outputName; const IONodeDescriptor& shapeDescriptor = _metadata.shapes.at(outputName); - check_level_zero_attributes_match(shapeDescriptor, executorOutputDescriptors.at(shapeBufferName), + check_level_zero_attributes_match(shapeDescriptor, + executorOutputDescriptors.at(shapeBufferName), shapeBufferName); allocate_tensor(outputName, shapeDescriptor, !STATE_TENSOR, allocator, SHAPE_TENSOR); }