diff options
author | Vibhav Aggarwal <v.aggarwal@samsung.com> | 2024-01-04 16:37:30 +0900 |
---|---|---|
committer | Vibhav Aggarwal <v.aggarwal@samsung.com> | 2024-01-05 18:55:51 +0900 |
commit | 656c36a4d13f06c075baac83dcd2ef4b02569a9b (patch) | |
tree | 73e00f07fdd7f6ebf9b933331c03cbcaa552a547 | |
parent | 3a4e8ee9d6a785d19f0d7642ccf93636c0410ef4 (diff) | |
download | inference-engine-tflite-656c36a4d13f06c075baac83dcd2ef4b02569a9b.tar.gz inference-engine-tflite-656c36a4d13f06c075baac83dcd2ef4b02569a9b.tar.bz2 inference-engine-tflite-656c36a4d13f06c075baac83dcd2ef4b02569a9b.zip |
InferenceTFLite: drop mInputData member
[Issue type] code cleanup
Change-Id: Ifaff8ab6b1f1b16037b6249bd7918f8984282c35
Signed-off-by: Vibhav Aggarwal <v.aggarwal@samsung.com>
-rw-r--r-- | src/inference_engine_tflite.cpp | 42 | ||||
-rw-r--r-- | src/inference_engine_tflite_private.h | 1 |
2 files changed, 20 insertions, 23 deletions
diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp index a1c8517..f37454b 100644 --- a/src/inference_engine_tflite.cpp +++ b/src/inference_engine_tflite.cpp @@ -154,27 +154,22 @@ namespace TFLiteImpl SetInterpreterInfo(); } - mInputData.clear(); - - void *pBuff = NULL; - for (auto& layer : mInputLayers) { size_t size = 1; inference_engine_tensor_buffer buffer; for (auto& dim : layer.second.shape) size *= dim; - if ((layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) { - mInputData.push_back( - mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first])); - pBuff = mInputData.back(); + switch (layer.second.data_type) { + case INFERENCE_TENSOR_DATA_TYPE_UINT8: + auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first])); buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 }; - } else if ((layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) { - mInputData.push_back( - mInterpreter->typed_tensor<float>(mInputLayerId[layer.first])); - pBuff = mInputData.back(); + break; + case INFERENCE_TENSOR_DATA_TYPE_FLOAT32: + auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[layer.first])); buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 }; - } else { + break; + default: LOGE("Not supported"); return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; } @@ -193,31 +188,34 @@ namespace TFLiteImpl SetInterpreterInfo(); } - void *pBuff = NULL; for (auto& layer : mOutputLayers) { inference_engine_tensor_buffer buffer; size_t size = 1; for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size; ++idx2) size *= mInterpreter->tensor(mOutputLayerId[layer.first])->dims->data[idx2]; - if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteUInt8) { + switch (mInterpreter->tensor(mOutputLayerId[layer.first])->type) { + case kTfLiteUInt8: LOGI("type is kTfLiteUInt8"); - pBuff = (void *) mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]); + auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first])); buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 }; - } else if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteInt64) { + break; + case kTfLiteInt64: LOGI("type is kTfLiteInt64"); - pBuff = (void*)mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]); + auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first])); buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1}; - } else if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteFloat32) { + break; + case kTfLiteFloat32: LOGI("type is kTfLiteFloat32"); - pBuff = (void *) mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]); + auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first])); buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 }; - } else { + break; + default: LOGE("Not supported"); return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; } - buffers.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId[layer.first])->name, buffer)); + buffers.insert(std::make_pair(layer.first, buffer)); } LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; diff --git a/src/inference_engine_tflite_private.h b/src/inference_engine_tflite_private.h index d491500..82605f6 100644 --- a/src/inference_engine_tflite_private.h +++ b/src/inference_engine_tflite_private.h @@ -93,7 +93,6 @@ namespace TFLiteImpl std::unique_ptr<tflite::Interpreter> mInterpreter; std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel; - std::vector<void *> mInputData; std::map<std::string, inference_engine_tensor_info> mInputLayers; std::map<std::string, inference_engine_tensor_info> mOutputLayers; |