summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorheechul.jeon <heechul.jeon@samsung.com>2022-06-30 16:15:52 +0900
committerHyunsoo Park <hance.park@samsung.com>2022-08-24 15:17:59 +0900
commit689f25360305adb5c00b0b78ac04477aefdda256 (patch)
tree555dd6c48720d2319b787be9e5f881e46330d977
parentfd75f02a4c932f268f6f570e8e5333d59f422404 (diff)
downloadinference-engine-tflite-689f25360305adb5c00b0b78ac04477aefdda256.tar.gz
inference-engine-tflite-689f25360305adb5c00b0b78ac04477aefdda256.tar.bz2
inference-engine-tflite-689f25360305adb5c00b0b78ac04477aefdda256.zip
InferenceTFLite: Remove code redundancy
[Versin] 0.0.4 [Issue type] code cleanup Change-Id: Ia6942cd730aedd74f5acbd98b75f6b4e1b7dabfa Signed-off-by: heechul.jeon <heechul.jeon@samsung.com>
-rw-r--r--packaging/inference-engine-tflite.spec2
-rw-r--r--src/inference_engine_tflite.cpp118
-rw-r--r--src/inference_engine_tflite_private.h2
3 files changed, 54 insertions, 68 deletions
diff --git a/packaging/inference-engine-tflite.spec b/packaging/inference-engine-tflite.spec
index fb56e8c..8277bda 100644
--- a/packaging/inference-engine-tflite.spec
+++ b/packaging/inference-engine-tflite.spec
@@ -1,6 +1,6 @@
Name: inference-engine-tflite
Summary: Tensorflow-Lite based implementation of inference-engine-interface
-Version: 0.0.3
+Version: 0.0.4
Release: 0
Group: Multimedia/Libraries
License: Apache-2.0
diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp
index 36e671d..e78b971 100644
--- a/src/inference_engine_tflite.cpp
+++ b/src/inference_engine_tflite.cpp
@@ -366,81 +366,22 @@ namespace TFLiteImpl
int InferenceTFLite::SetInterpreterInfo()
{
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
LOGI("ENTER");
+
if (mInputLayers.empty()) {
LOGI("mInputLayer is empty. layers and tensors that mInterpreter has will be returned.");
- mInputLayers.clear();
- for (auto& layer : mInputLayerId) {
-
- std::vector<size_t> shape_nhwc;
-
- for (int idx = 0;
- idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
- shape_nhwc.push_back(
- mInterpreter->tensor(layer.second)->dims->data[idx]);
- }
-
- inference_engine_tensor_info tensor_info {
- shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
- INFERENCE_TENSOR_DATA_TYPE_NONE, 1
- };
-
- if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
- LOGI("type is kTfLiteUInt8");
- tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- } else if (mInterpreter->tensor(layer.second)->type ==
- kTfLiteFloat32) {
- LOGI("type is kTfLiteFloat32");
- tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- } else {
- LOGE("Not supported");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- for (auto& dim : tensor_info.shape) {
- tensor_info.size *= dim;
- }
- mInputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
- }
+ ret = FillLayer(mInputLayers, mInputLayerId);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
}
if (mOutputLayers.empty()) {
LOGI("mOutputLayers is empty. layers and tensors that mInterpreter has will be returned.");
-
- mOutputLayers.clear();
- for (auto& layer : mOutputLayerId) {
-
- std::vector<size_t> shape_nhwc;
-
- for (int idx = 0;
- idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
- shape_nhwc.push_back(
- mInterpreter->tensor(layer.second)->dims->data[idx]);
- }
-
- inference_engine_tensor_info tensor_info {
- shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
- INFERENCE_TENSOR_DATA_TYPE_NONE, 1
- };
-
- if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
- LOGI("type is kTfLiteUInt8");
- tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- } else if (mInterpreter->tensor(layer.second)->type ==
- kTfLiteFloat32) {
- LOGI("type is kTfLiteFloat32");
- tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- } else {
- LOGE("Not supported");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- for (auto& dim : tensor_info.shape) {
- tensor_info.size *= dim;
- }
- mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
- }
+ ret = FillLayer(mOutputLayers, mOutputLayerId);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
}
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
@@ -471,6 +412,49 @@ namespace TFLiteImpl
}
}
+ int InferenceTFLite::FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,
+ std::map<std::string, int>& layerId)
+ {
+ layers.clear();
+ for (auto& layer : layerId) {
+
+ std::vector<size_t> shape_nhwc;
+
+ for (int idx = 0;
+ idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
+ shape_nhwc.push_back(
+ mInterpreter->tensor(layer.second)->dims->data[idx]);
+ }
+
+ inference_engine_tensor_info tensor_info {
+ shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
+ INFERENCE_TENSOR_DATA_TYPE_NONE, 1
+ };
+
+ switch (mInterpreter->tensor(layer.second)->type)
+ {
+ case kTfLiteUInt8:
+ LOGI("type is kTfLiteUInt8");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ break;
+ case kTfLiteFloat32:
+ LOGI("type is kTfLiteFloat32");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ break;
+ default:
+ LOGE("Not supported");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ for (auto& dim : tensor_info.shape) {
+ tensor_info.size *= dim;
+ }
+ layers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
+
+ }
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
extern "C"
{
class IInferenceEngineCommon *EngineCommonInit(void)
diff --git a/src/inference_engine_tflite_private.h b/src/inference_engine_tflite_private.h
index 33dd1f4..d491500 100644
--- a/src/inference_engine_tflite_private.h
+++ b/src/inference_engine_tflite_private.h
@@ -88,6 +88,8 @@ namespace TFLiteImpl
void FillLayerId(std::map<std::string, int>& layerId,
std::map<std::string, inference_engine_tensor_info>& layers,
const std::vector<int>& buffer);
+ int FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,
+ std::map<std::string, int>& layerId);
std::unique_ptr<tflite::Interpreter> mInterpreter;
std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;