diff options
author | Hyunsoo Park <hance.park@samsung.com> | 2020-04-02 18:16:04 +0900 |
---|---|---|
committer | Hyunsoo Park <hance.park@samsung.com> | 2020-04-02 18:16:04 +0900 |
commit | be54195efafaa52c194b5ac68063cb3a0f90edb5 (patch) | |
tree | 5815cd1ad6500eb2327658b8d8e87efa0075dd0c | |
parent | 50893b2d672b2e39ce03f7debed74c19c05aa9d0 (diff) | |
download | inference-engine-tflite-be54195efafaa52c194b5ac68063cb3a0f90edb5.tar.gz inference-engine-tflite-be54195efafaa52c194b5ac68063cb3a0f90edb5.tar.bz2 inference-engine-tflite-be54195efafaa52c194b5ac68063cb3a0f90edb5.zip |
Support UINT_8
Change-Id: Idc7776c9123abd665f96204120864801657e2d54
Signed-off-by: Hyunsoo Park <hance.park@samsung.com>
-rw-r--r-- | src/inference_engine_tflite.cpp | 39 |
1 files changed, 31 insertions, 8 deletions
diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp index 920f74e..1e8aa2a 100644 --- a/src/inference_engine_tflite.cpp +++ b/src/inference_engine_tflite.cpp @@ -151,18 +151,21 @@ int InferenceTFLite::GetInputTensorBuffers(std::vector<inference_engine_tensor_b void *pBuff = NULL; for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx ) { - + size_t size = 1; inference_engine_tensor_buffer buffer; - + for (std::vector<int>::iterator iter = mInputTensorInfo[idx].shape.begin(); + iter != mInputTensorInfo[idx].shape.end(); ++iter) { + size *= (*iter); + } if (mInputAttrType[idx] == kTfLiteUInt8) { mInputData.push_back(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[idx])); pBuff = mInputData.back(); - buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, 0, 1}; + buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, size, 1}; } else if (mInputAttrType[idx] == kTfLiteFloat32) { - mInputData.push_back(mInterpreter->typed_tensor<float>(mInputLayerId[idx])); + mInputData.push_back(mInterpreter->typed_tensor<float>(mInputLayerId[idx])); pBuff = mInputData.back(); - buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, 0, 1}; + buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, size * 4, 1}; } else { LOGE("Not supported"); @@ -180,9 +183,25 @@ int InferenceTFLite::GetOutputTensorBuffers(std::vector<inference_engine_tensor_ for (unsigned int idx = 0; idx < mOutputLayerId.size(); ++idx) { inference_engine_tensor_buffer buffer; + size_t size = 1; + for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[idx])->dims->size; ++idx2) { + size *= mInterpreter->tensor(mOutputLayerId[idx])->dims->data[idx2]; + } - pBuff = (void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]); - buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, 0, 1}; + if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteUInt8) { + LOGI("type is kTfLiteUInt8"); + pBuff = (void*)mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[idx]); + buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, size, 1}; + } + else if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteFloat32) { + LOGI("type is kTfLiteFloat32"); + pBuff = (void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]); + buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, size * 4, 1}; + } + else { + LOGE("Not supported"); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + } buffers.push_back(buffer); } @@ -229,7 +248,6 @@ int InferenceTFLite::GetOutputLayerProperty(inference_engine_layer_property &pro std::vector<int> shape_nhwc; for (int idx = 0; idx <mInterpreter->tensor((*iter))->dims->size; idx++) { - LOGI("mInterpreter->tensor((*iter))->dims[%d]= [%d]", idx, mInterpreter->tensor((*iter))->dims->data[idx]); shape_nhwc.push_back(mInterpreter->tensor((*iter))->dims->data[idx]); } @@ -282,11 +300,14 @@ int InferenceTFLite::SetInputLayerProperty(inference_engine_layer_property &prop mInputLayer = property.layer_names; mInputTensorInfo = property.tensor_infos; + LOGI("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; } int InferenceTFLite::SetOutputLayerProperty(inference_engine_layer_property &property) { + LOGI("ENTER"); std::vector<std::string>::iterator iter; for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) { std::string name = *iter; @@ -298,6 +319,8 @@ int InferenceTFLite::SetOutputLayerProperty(inference_engine_layer_property &pro mOutputLayer = property.layer_names; + LOGI("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; } |