From 6c1eebca46353ee646404769c92330efd94a8c69 Mon Sep 17 00:00:00 2001 From: Alexandre Eichenberger Date: Tue, 27 Sep 2022 21:19:47 +0000 Subject: [PATCH 1/3] removed the memory leaks directly in execution session Signed-off-by: Alexandre Eichenberger --- src/Runtime/ExecutionSession.cpp | 15 +++++++++++++++ src/Runtime/OMTensor.inc | 3 ++- src/Runtime/OMTensorList.inc | 17 ++++++++++++++++- test/numerical/TestLoop.cpp | 8 ++++---- 4 files changed, 37 insertions(+), 6 deletions(-) diff --git a/src/Runtime/ExecutionSession.cpp b/src/Runtime/ExecutionSession.cpp index 6478b6b35d..9e4112d458 100644 --- a/src/Runtime/ExecutionSession.cpp +++ b/src/Runtime/ExecutionSession.cpp @@ -26,6 +26,8 @@ #include "ExecutionSession.hpp" +void omTensorListDestroyWithoutDestroyingTensors(OMTensorList *list); + namespace onnx_mlir { const std::string ExecutionSession::_queryEntryPointsName = "omQueryEntryPoints"; @@ -85,6 +87,13 @@ std::vector ExecutionSession::run( auto *wrappedInput = omTensorListCreate(&omts[0], (int64_t)omts.size()); auto *wrappedOutput = _entryPointFunc(wrappedInput); + + // We created a wrapper for the input list, but the input list does not really + // own the tensor in the list, as they are coming as OMTensorUniquePtr. So we + // need to simply deallocate the list structure without touching the + // OMTensors. + omTensorListDestroyWithoutDestroyingTensors(wrappedInput); + if (!wrappedOutput) throw std::runtime_error(reportErrnoError()); std::vector outs; @@ -93,6 +102,12 @@ std::vector ExecutionSession::run( outs.emplace_back(OMTensorUniquePtr( omTensorListGetOmtByIndex(wrappedOutput, i), omTensorDestroy)); } + + // We created a wrapper for the output list, but the output list does not + // really own the tensor in the list, as they are returned in a vector of + // OMTensorUniquePtr. So we need to simply deallocate the list structure + // without touching the OMTensors. + omTensorListDestroyWithoutDestroyingTensors(wrappedOutput); errno = 0; // No errors. return outs; } diff --git a/src/Runtime/OMTensor.inc b/src/Runtime/OMTensor.inc index 62a019b095..b8005294d3 100644 --- a/src/Runtime/OMTensor.inc +++ b/src/Runtime/OMTensor.inc @@ -284,8 +284,9 @@ OMTensor *omTensorCreateEmpty( void omTensorDestroy(OMTensor *tensor) { if (!tensor) return; - if (tensor->_owning) + if (tensor->_owning) { free(tensor->_allocatedPtr); + } free(tensor->_shape); free(tensor->_strides); free(tensor); diff --git a/src/Runtime/OMTensorList.inc b/src/Runtime/OMTensorList.inc index 4ede95e637..9c951d062c 100644 --- a/src/Runtime/OMTensorList.inc +++ b/src/Runtime/OMTensorList.inc @@ -103,8 +103,23 @@ void omTensorListDestroy(OMTensorList *list) { return; for (int64_t i = 0; i < list->_size; i++) omTensorDestroy(list->_omts[i]); - if (list->_owning) + if (list->_owning) { free(list->_omts); + } + free(list); +} + +/* OMTensorList destroyer which does not destroy the tensors. Currently an + * unpublished call that is mainly used to handle the unique tensor scheme used + * in Execution Session. + */ +void omTensorListDestroyWithoutDestroyingTensors(OMTensorList *list) { + if (!list) + return; + // Omit destruction of the OMTensors. + if (list->_owning) { + free(list->_omts); + } free(list); } diff --git a/test/numerical/TestLoop.cpp b/test/numerical/TestLoop.cpp index 334bd28965..4090548af0 100644 --- a/test/numerical/TestLoop.cpp +++ b/test/numerical/TestLoop.cpp @@ -118,9 +118,9 @@ bool isOMLoopTheSameAsNaiveImplFor(std::string moduleIR, omTensorGetElem(condTensor.get(), {}) = true; inputs.emplace_back(move(condTensor)); - auto *yInitShape = new int64_t[1]{1}; + int64_t yInitShape = 1; auto yInitTensor = OMTensorUniquePtr( - omTensorCreateEmpty(&yInitShape[0], 1, OM_DATA_TYPE::ONNX_TYPE_INT64), + omTensorCreateEmpty(&yInitShape, 1, OM_DATA_TYPE::ONNX_TYPE_INT64), omTensorDestroy); omTensorGetElem(yInitTensor.get(), {0}) = yInit; inputs.emplace_back(move(yInitTensor)); @@ -135,9 +135,9 @@ bool isOMLoopTheSameAsNaiveImplFor(std::string moduleIR, return false; } - auto *yRefInitShape = new int64_t[1]{1}; + int64_t yRefInitShape = 1; auto vFinalRef = OMTensorUniquePtr( - omTensorCreateEmpty(&yRefInitShape[0], 1, OM_DATA_TYPE::ONNX_TYPE_INT64), + omTensorCreateEmpty(&yRefInitShape, 1, OM_DATA_TYPE::ONNX_TYPE_INT64), omTensorDestroy); omTensorGetElem(vFinalRef.get(), {0}) = yInit; From da04dd2aef8f392b02f89b1caa73aa521036a402 Mon Sep 17 00:00:00 2001 From: Alexandre Eichenberger Date: Wed, 28 Sep 2022 17:08:42 +0000 Subject: [PATCH 2/3] responding to commments Signed-off-by: Alexandre Eichenberger --- src/Runtime/ExecutionSession.cpp | 7 +++---- src/Runtime/OMTensorList.inc | 2 +- src/Runtime/OMTensorListHelper.hpp | 25 +++++++++++++++++++++++++ test/numerical/TestLoop.cpp | 8 ++++---- 4 files changed, 33 insertions(+), 9 deletions(-) create mode 100644 src/Runtime/OMTensorListHelper.hpp diff --git a/src/Runtime/ExecutionSession.cpp b/src/Runtime/ExecutionSession.cpp index 9e4112d458..6fba435a84 100644 --- a/src/Runtime/ExecutionSession.cpp +++ b/src/Runtime/ExecutionSession.cpp @@ -25,8 +25,7 @@ #include "llvm/Support/Path.h" #include "ExecutionSession.hpp" - -void omTensorListDestroyWithoutDestroyingTensors(OMTensorList *list); +#include "OMTensorListHelper.hpp" namespace onnx_mlir { const std::string ExecutionSession::_queryEntryPointsName = @@ -92,7 +91,7 @@ std::vector ExecutionSession::run( // own the tensor in the list, as they are coming as OMTensorUniquePtr. So we // need to simply deallocate the list structure without touching the // OMTensors. - omTensorListDestroyWithoutDestroyingTensors(wrappedInput); + omTensorListDestroyShallow(wrappedInput); if (!wrappedOutput) throw std::runtime_error(reportErrnoError()); @@ -107,7 +106,7 @@ std::vector ExecutionSession::run( // really own the tensor in the list, as they are returned in a vector of // OMTensorUniquePtr. So we need to simply deallocate the list structure // without touching the OMTensors. - omTensorListDestroyWithoutDestroyingTensors(wrappedOutput); + omTensorListDestroyShallow(wrappedOutput); errno = 0; // No errors. return outs; } diff --git a/src/Runtime/OMTensorList.inc b/src/Runtime/OMTensorList.inc index 9c951d062c..e540645aba 100644 --- a/src/Runtime/OMTensorList.inc +++ b/src/Runtime/OMTensorList.inc @@ -113,7 +113,7 @@ void omTensorListDestroy(OMTensorList *list) { * unpublished call that is mainly used to handle the unique tensor scheme used * in Execution Session. */ -void omTensorListDestroyWithoutDestroyingTensors(OMTensorList *list) { +void omTensorListDestroyShallow(OMTensorList *list) { if (!list) return; // Omit destruction of the OMTensors. diff --git a/src/Runtime/OMTensorListHelper.hpp b/src/Runtime/OMTensorListHelper.hpp new file mode 100644 index 0000000000..f59d0362ec --- /dev/null +++ b/src/Runtime/OMTensorListHelper.hpp @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +//===----- OMTensorListHelper.hpp - OMTensor List Helper Func header ------===// +// +// Copyright 2022 The IBM Research Authors. +// +// ============================================================================= +// +// This file contains declaration of OMTensorList C++ helper functions. +// +//===----------------------------------------------------------------------===// + +#pragma once + +#include "OnnxMlirRuntime.h" + +/* + * Destroy the OMTensorList data structure (including the array of OMTensor when + * ownership is asserted), but not the OMTensor themselves. Assumed here is that + * their live range is managed explicitly or implicitly using a different + * mechanism. + */ +void omTensorListDestroyShallow(OMTensorList *list); diff --git a/test/numerical/TestLoop.cpp b/test/numerical/TestLoop.cpp index 4090548af0..f464bca5a9 100644 --- a/test/numerical/TestLoop.cpp +++ b/test/numerical/TestLoop.cpp @@ -118,9 +118,9 @@ bool isOMLoopTheSameAsNaiveImplFor(std::string moduleIR, omTensorGetElem(condTensor.get(), {}) = true; inputs.emplace_back(move(condTensor)); - int64_t yInitShape = 1; + int64_t yInitShape[1] = {1}; auto yInitTensor = OMTensorUniquePtr( - omTensorCreateEmpty(&yInitShape, 1, OM_DATA_TYPE::ONNX_TYPE_INT64), + omTensorCreateEmpty(&yInitShape[0], 1, OM_DATA_TYPE::ONNX_TYPE_INT64), omTensorDestroy); omTensorGetElem(yInitTensor.get(), {0}) = yInit; inputs.emplace_back(move(yInitTensor)); @@ -135,9 +135,9 @@ bool isOMLoopTheSameAsNaiveImplFor(std::string moduleIR, return false; } - int64_t yRefInitShape = 1; + int64_t yRefInitShape[1] = {1}; auto vFinalRef = OMTensorUniquePtr( - omTensorCreateEmpty(&yRefInitShape, 1, OM_DATA_TYPE::ONNX_TYPE_INT64), + omTensorCreateEmpty(&yRefInitShape[0], 1, OM_DATA_TYPE::ONNX_TYPE_INT64), omTensorDestroy); omTensorGetElem(vFinalRef.get(), {0}) = yInit; From f79a408e4877c59c6bda144af8c721af189651f4 Mon Sep 17 00:00:00 2001 From: Alexandre Eichenberger Date: Wed, 28 Sep 2022 18:59:25 +0000 Subject: [PATCH 3/3] fix Signed-off-by: Alexandre Eichenberger --- docs/doc_example/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/doc_example/CMakeLists.txt b/docs/doc_example/CMakeLists.txt index 14946fbcda..825ebc1a71 100644 --- a/docs/doc_example/CMakeLists.txt +++ b/docs/doc_example/CMakeLists.txt @@ -82,7 +82,6 @@ add_onnx_mlir_executable(OMRuntimeTest LINK_LIBS PRIVATE OnnxMlirCompiler ExecutionSession - cruntime )