summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHyunsoo Park <hance.park@samsung.com>2022-07-07 11:31:08 +0900
committerHyunsoo Park <hance.park@samsung.com>2022-08-24 15:18:04 +0900
commitd6259b371456de2c6d3b30dfb83234c574afcc05 (patch)
tree6de049d273df3e3f349d38b0be52208e153a01c1
parent689f25360305adb5c00b0b78ac04477aefdda256 (diff)
downloadinference-engine-tflite-tizen_7.0_hotfix.tar.gz
inference-engine-tflite-tizen_7.0_hotfix.tar.bz2
inference-engine-tflite-tizen_7.0_hotfix.zip
Change-Id: Ieb020b720fa19cccd1826706d67ae7887332002b Signed-off-by: Hyunsoo Park <hance.park@samsung.com>
-rw-r--r--packaging/inference-engine-tflite.spec2
-rw-r--r--src/inference_engine_tflite.cpp68
2 files changed, 29 insertions, 41 deletions
diff --git a/packaging/inference-engine-tflite.spec b/packaging/inference-engine-tflite.spec
index 8277bda..566eae8 100644
--- a/packaging/inference-engine-tflite.spec
+++ b/packaging/inference-engine-tflite.spec
@@ -1,6 +1,6 @@
Name: inference-engine-tflite
Summary: Tensorflow-Lite based implementation of inference-engine-interface
-Version: 0.0.4
+Version: 0.0.5
Release: 0
Group: Multimedia/Libraries
License: Apache-2.0
diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp
index e78b971..8f22bd9 100644
--- a/src/inference_engine_tflite.cpp
+++ b/src/inference_engine_tflite.cpp
@@ -161,16 +161,15 @@ namespace TFLiteImpl
for (auto& layer : mInputLayers) {
size_t size = 1;
inference_engine_tensor_buffer buffer;
- for (auto& dim : layer.second.shape) {
+ for (auto& dim : layer.second.shape)
size *= dim;
- }
- if ( (layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+ if ((layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
mInputData.push_back(
mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
pBuff = mInputData.back();
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
- } else if ( (layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
+ } else if ((layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
mInputData.push_back(
mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
pBuff = mInputData.back();
@@ -198,11 +197,8 @@ namespace TFLiteImpl
for (auto& layer : mOutputLayers) {
inference_engine_tensor_buffer buffer;
size_t size = 1;
- for (int idx2 = 0;
- idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size;
- ++idx2) {
+ for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size; ++idx2)
size *= mInterpreter->tensor(mOutputLayerId[layer.first])->dims->data[idx2];
- }
if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteUInt8) {
LOGI("type is kTfLiteUInt8");
@@ -245,30 +241,25 @@ namespace TFLiteImpl
{
LOGI("ENTER");
- std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
+ mOutputLayers.clear();
for (auto& layer :mOutputLayerId) {
- LOGI("output layer ID: %d", layer.second);
- if ( layer.second < 0) {
- LOGE("Invalid output layer");
+ if (layer.second < 0) {
+ LOGE("Invalid output layer ID [%d]", layer.second);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
inference_engine_tensor_info tensor_info;
- LOGI("mInterpreter->tensor(%d)->dims name[%s]",
- layer.second, mInterpreter->tensor(layer.second)->name);
- LOGI("mInterpreter->tensor(%d)->dims size[%d]",
- layer.second, mInterpreter->tensor(layer.second)->dims->size);
- LOGI("mInterpreter->tensor(%d)->dims type[%d]",
- layer.second, mInterpreter->tensor(layer.second)->type);
+ LOGI("mInterpreter->tensor(%d)->dims name[%s] size[%d] type[%d]",
+ layer.second,
+ mInterpreter->tensor(layer.second)->name,
+ mInterpreter->tensor(layer.second)->dims->size,
+ mInterpreter->tensor(layer.second)->type);
std::vector<size_t> shape_nhwc;
- for (int idx = 0; idx < mInterpreter->tensor(layer.second)->dims->size;
- idx++) {
- shape_nhwc.push_back(
- mInterpreter->tensor(layer.second)->dims->data[idx]);
- }
+ for (int idx = 0; idx < mInterpreter->tensor(layer.second)->dims->size; idx++)
+ shape_nhwc.push_back(mInterpreter->tensor(layer.second)->dims->data[idx]);
//tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
tensor_info.shape = shape_nhwc;
@@ -287,9 +278,10 @@ namespace TFLiteImpl
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
}
tensor_info.size = 1;
- for (auto & dim : tensor_info.shape) {
+
+ for (auto & dim : tensor_info.shape)
tensor_info.size *= dim;
- }
+
mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
}
@@ -304,10 +296,10 @@ namespace TFLiteImpl
{
LOGI("ENTER");
- for (auto& layer : property.layers) {
+ for (auto& layer : property.layers)
LOGI("input layer name = %s", layer.first.c_str());
- }
- std::map<std::string, inference_engine_tensor_info>().swap(mInputLayers);
+
+ mInputLayers.clear();
mInputLayers = property.layers;
LOGI("LEAVE");
@@ -320,10 +312,10 @@ namespace TFLiteImpl
{
LOGI("ENTER");
- for (auto& layer : property.layers) {
+ for (auto& layer : property.layers)
LOGI("input layer name = %s", layer.first.c_str());
- }
- std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
+
+ mOutputLayers.clear();
mOutputLayers = property.layers;
LOGI("LEAVE");
@@ -420,19 +412,15 @@ namespace TFLiteImpl
std::vector<size_t> shape_nhwc;
- for (int idx = 0;
- idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
- shape_nhwc.push_back(
- mInterpreter->tensor(layer.second)->dims->data[idx]);
- }
+ for (int idx = 0; idx < mInterpreter->tensor(layer.second)->dims->size; idx++)
+ shape_nhwc.push_back(mInterpreter->tensor(layer.second)->dims->data[idx]);
inference_engine_tensor_info tensor_info {
shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
INFERENCE_TENSOR_DATA_TYPE_NONE, 1
};
- switch (mInterpreter->tensor(layer.second)->type)
- {
+ switch (mInterpreter->tensor(layer.second)->type) {
case kTfLiteUInt8:
LOGI("type is kTfLiteUInt8");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
@@ -446,9 +434,9 @@ namespace TFLiteImpl
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
}
- for (auto& dim : tensor_info.shape) {
+ for (auto& dim : tensor_info.shape)
tensor_info.size *= dim;
- }
+
layers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
}