diff options
author | Vibhav Aggarwal <v.aggarwal@samsung.com> | 2024-01-23 15:44:40 +0900 |
---|---|---|
committer | Vibhav Aggarwal <v.aggarwal@samsung.com> | 2024-01-24 15:24:29 +0900 |
commit | 0212d1ae8fd058dd8d1fb27f134cc68c7d420790 (patch) | |
tree | 8596194891746305fc19cc655ddabd3bf9751362 | |
parent | 656c36a4d13f06c075baac83dcd2ef4b02569a9b (diff) | |
download | inference-engine-tflite-tizen_devel.tar.gz inference-engine-tflite-tizen_devel.tar.bz2 inference-engine-tflite-tizen_devel.zip |
InferenceTFLite: fix memory leaktizen_devel
[Issue type] bug fix
The GPU delegate created by TfLiteGpuDelegateV2Create()
must be deleted using TfLiteGpuDelegateV2Delete().
Change-Id: Iacdcb1c3e51181584fa3c42447bf008937f986ea
Signed-off-by: Vibhav Aggarwal <v.aggarwal@samsung.com>
-rw-r--r-- | src/inference_engine_tflite.cpp | 23 | ||||
-rw-r--r-- | src/inference_engine_tflite_private.h | 4 |
2 files changed, 16 insertions, 11 deletions
diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp index f37454b..edb27c2 100644 --- a/src/inference_engine_tflite.cpp +++ b/src/inference_engine_tflite.cpp @@ -30,7 +30,7 @@ namespace InferenceEngineImpl { namespace TFLiteImpl { - InferenceTFLite::InferenceTFLite(void) : mTargetTypes(INFERENCE_TARGET_NONE) + InferenceTFLite::InferenceTFLite() { LOGI("ENTER"); LOGI("LEAVE"); @@ -38,7 +38,8 @@ namespace TFLiteImpl InferenceTFLite::~InferenceTFLite() { - ; + if (mDelegate) + TfLiteGpuDelegateV2Delete(mDelegate); } int InferenceTFLite::SetPrivateData(void *data) @@ -117,13 +118,13 @@ namespace TFLiteImpl if (mTargetTypes == INFERENCE_TARGET_GPU) { TfLiteGpuDelegateOptionsV2 options = TfLiteGpuDelegateOptionsV2Default(); - TfLiteDelegate *delegate = TfLiteGpuDelegateV2Create(&options); - if (!delegate){ + mDelegate = TfLiteGpuDelegateV2Create(&options); + if (!mDelegate){ LOGE("Failed to GPU delegate"); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } - if (mInterpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) + if (mInterpreter->ModifyGraphWithDelegate(mDelegate) != kTfLiteOk) { LOGE("Failed to construct GPU delegate"); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; @@ -160,13 +161,14 @@ namespace TFLiteImpl for (auto& dim : layer.second.shape) size *= dim; + void *pBuff; switch (layer.second.data_type) { case INFERENCE_TENSOR_DATA_TYPE_UINT8: - auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first])); + pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first])); buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 }; break; case INFERENCE_TENSOR_DATA_TYPE_FLOAT32: - auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[layer.first])); + pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[layer.first])); buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 }; break; default: @@ -194,20 +196,21 @@ namespace TFLiteImpl for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size; ++idx2) size *= mInterpreter->tensor(mOutputLayerId[layer.first])->dims->data[idx2]; + void *pBuff; switch (mInterpreter->tensor(mOutputLayerId[layer.first])->type) { case kTfLiteUInt8: LOGI("type is kTfLiteUInt8"); - auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first])); + pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first])); buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 }; break; case kTfLiteInt64: LOGI("type is kTfLiteInt64"); - auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first])); + pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first])); buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1}; break; case kTfLiteFloat32: LOGI("type is kTfLiteFloat32"); - auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first])); + pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first])); buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 }; break; default: diff --git a/src/inference_engine_tflite_private.h b/src/inference_engine_tflite_private.h index 82605f6..8009022 100644 --- a/src/inference_engine_tflite_private.h +++ b/src/inference_engine_tflite_private.h @@ -102,7 +102,9 @@ namespace TFLiteImpl std::string mConfigFile; std::string mWeightFile; - int mTargetTypes; + int mTargetTypes { INFERENCE_TARGET_NONE }; + + TfLiteDelegate *mDelegate {}; }; } /* InferenceEngineImpl */ |