diff options
author | Inki Dae <inki.dae@samsung.com> | 2020-06-26 09:22:24 +0900 |
---|---|---|
committer | Inki Dae <inki.dae@samsung.com> | 2020-06-26 09:25:02 +0900 |
commit | 2fc9aa5a36a9047f4161e0261d265a8d1c436420 (patch) | |
tree | 4149fc8e7dd7029ff84c9dfd187f1e7e710f5311 /src/inference_engine_tflite.cpp | |
parent | d91064f338288f1a08949879e3f1d7a75cd92871 (diff) | |
download | inference-engine-tflite-2fc9aa5a36a9047f4161e0261d265a8d1c436420.tar.gz inference-engine-tflite-2fc9aa5a36a9047f4161e0261d265a8d1c436420.tar.bz2 inference-engine-tflite-2fc9aa5a36a9047f4161e0261d265a8d1c436420.zip |
Fix coding style based on Tizen SE C++ Coding Rulesubmit/tizen/20200626.070253submit/tizen/20200626.060446submit/tizen/20200626.050805accepted/tizen/unified/20200628.221636
Tizen SE C++ Coding Rule:
https://wiki.tizen.org/Native_Platform_Coding_Idiom_and_Style_Guide#C.2B.2B_Coding_Style
Change-Id: I9462a7838c00bfd5c8c1ec18270b203db923eb87
Signed-off-by: Inki Dae <inki.dae@samsung.com>
Diffstat (limited to 'src/inference_engine_tflite.cpp')
-rw-r--r-- | src/inference_engine_tflite.cpp | 758 |
1 files changed, 395 insertions, 363 deletions
diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp index 9677804..8490b4d 100644 --- a/src/inference_engine_tflite.cpp +++ b/src/inference_engine_tflite.cpp @@ -24,410 +24,442 @@ #include <queue> // H/W -#define MV_INFERENCE_TFLITE_MAX_THREAD_NUM 4 +#define MV_INFERENCE_TFLITE_MAX_THREAD_NUM 4 -namespace InferenceEngineImpl { -namespace TFLiteImpl { - -InferenceTFLite::InferenceTFLite(void) : - mTargetTypes(INFERENCE_TARGET_NONE) +namespace InferenceEngineImpl { - LOGI("ENTER"); - LOGI("LEAVE"); -} - -InferenceTFLite::~InferenceTFLite() +namespace TFLiteImpl { - ; -} - -int InferenceTFLite::SetPrivateData(void *data) -{ - // Nothing to do yet. - - return INFERENCE_ENGINE_ERROR_NONE; -} + InferenceTFLite::InferenceTFLite(void) : mTargetTypes(INFERENCE_TARGET_NONE) + { + LOGI("ENTER"); + LOGI("LEAVE"); + } -int InferenceTFLite::SetTargetDevices(int types) -{ - LOGI("ENTER"); + InferenceTFLite::~InferenceTFLite() + { + ; + } - mTargetTypes = types; + int InferenceTFLite::SetPrivateData(void *data) + { + // Nothing to do yet. - LOGI("LEAVE"); - return INFERENCE_ENGINE_ERROR_NONE; -} + return INFERENCE_ENGINE_ERROR_NONE; + } -int InferenceTFLite::Load(std::vector<std::string> model_paths, inference_model_format_e model_format) -{ - int ret = INFERENCE_ENGINE_ERROR_NONE; + int InferenceTFLite::SetTargetDevices(int types) + { + LOGI("ENTER"); - mWeightFile = model_paths.back(); + mTargetTypes = types; - if (access(mWeightFile.c_str(), F_OK)) { - LOGE("model file path [%s]", mWeightFile.c_str()); - return INFERENCE_ENGINE_ERROR_INVALID_PATH; + LOGI("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; } - LOGI("mWeightFile.c_str() result [%s]", mWeightFile.c_str()); + int InferenceTFLite::Load(std::vector<std::string> model_paths, + inference_model_format_e model_format) + { + int ret = INFERENCE_ENGINE_ERROR_NONE; + + mWeightFile = model_paths.back(); + + if (access(mWeightFile.c_str(), F_OK)) { + LOGE("model file path [%s]", mWeightFile.c_str()); + return INFERENCE_ENGINE_ERROR_INVALID_PATH; + } + + LOGI("mWeightFile.c_str() result [%s]", mWeightFile.c_str()); + + mFlatBuffModel = + tflite::FlatBufferModel::BuildFromFile(mWeightFile.c_str()); + if (!mFlatBuffModel) { + LOGE("Failed to mmap model %s", mWeightFile.c_str()); + return INFERENCE_ENGINE_ERROR_INVALID_DATA; + } + + tflite::ops::builtin::BuiltinOpResolver resolver; + + tflite::InterpreterBuilder(*mFlatBuffModel, resolver)(&mInterpreter); + if (!mInterpreter) { + LOGE("Failed to construct interpreter"); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("Inferece targets are: [%d]", mTargetTypes); + + switch (mTargetTypes) { + case INFERENCE_TARGET_CPU: + mInterpreter->UseNNAPI(false); + break; + case INFERENCE_TARGET_GPU: + mInterpreter->UseNNAPI(true); + break; + case INFERENCE_TARGET_CUSTOM: + case INFERENCE_TARGET_NONE: + default: + LOGW("Not supported device type [%d], Set CPU mode", + (int) mTargetTypes); + } + + mInterpreter->SetNumThreads(MV_INFERENCE_TFLITE_MAX_THREAD_NUM); + LOGI("mInterpreter->tensors_size() :[%zu]", + mInterpreter->tensors_size()); + + // input tensor + if (mInterpreter->inputs().size()) { + mInputLayerId = mInterpreter->inputs(); + } else { + std::vector<std::string>::iterator iter; + mInputLayerId.clear(); + for (iter = mInputLayer.begin(); iter != mInputLayer.end(); + ++iter) { + LOGI("mInputLayer list [%s]", (*iter).c_str()); + for (unsigned int idx = 0; idx < mInterpreter->tensors_size(); + ++idx) { + if (mInterpreter->tensor(idx)->name == NULL) + continue; + if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) { + mInputLayerId.push_back(idx); + break; + } + } + } + } + + // output tensor + if (mInterpreter->outputs().size()) { + mOutputLayerId = mInterpreter->outputs(); + } else { + std::vector<std::string>::iterator iter; + mOutputLayerId.clear(); + for (iter = mOutputLayer.begin(); iter != mOutputLayer.end(); + ++iter) { + LOGI("mOutputLayer list [%s]", (*iter).c_str()); + for (unsigned int idx = 0; idx < mInterpreter->tensors_size(); + ++idx) { + if (mInterpreter->tensor(idx)->name == NULL) + continue; + if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) { + mOutputLayerId.push_back(idx); + break; + } + } + } + } + + if (mInterpreter->AllocateTensors() != kTfLiteOk) { + LOGE("Fail to allocate tensor"); + return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY; + } + + for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx) { + mInputAttrType.push_back( + mInterpreter->tensor(mInputLayerId[idx])->type); + } + + return ret; + } - mFlatBuffModel = tflite::FlatBufferModel::BuildFromFile(mWeightFile.c_str()); - if (!mFlatBuffModel) { - LOGE("Failed to mmap model %s", mWeightFile.c_str()); - return INFERENCE_ENGINE_ERROR_INVALID_DATA; - } + int InferenceTFLite::GetInputTensorBuffers( + std::vector<inference_engine_tensor_buffer> &buffers) + { + LOGI("ENTER"); + + if (mInputTensorInfo.empty()) { + SetInterpreterInfo(); + } + + mInputData.clear(); + + void *pBuff = NULL; + + for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx) { + size_t size = 1; + inference_engine_tensor_buffer buffer; + for (std::vector<size_t>::iterator iter = + mInputTensorInfo[idx].shape.begin(); + iter != mInputTensorInfo[idx].shape.end(); ++iter) { + size *= (*iter); + } + if (mInputAttrType[idx] == kTfLiteUInt8) { + mInputData.push_back(mInterpreter->typed_tensor<uint8_t>( + mInputLayerId[idx])); + pBuff = mInputData.back(); + buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 }; + } else if (mInputAttrType[idx] == kTfLiteFloat32) { + mInputData.push_back( + mInterpreter->typed_tensor<float>(mInputLayerId[idx])); + pBuff = mInputData.back(); + buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, + 1 }; + } else { + LOGE("Not supported"); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + } + buffers.push_back(buffer); + } + + return INFERENCE_ENGINE_ERROR_NONE; + } - tflite::ops::builtin::BuiltinOpResolver resolver; + int InferenceTFLite::GetOutputTensorBuffers( + std::vector<inference_engine_tensor_buffer> &buffers) + { + void *pBuff = NULL; + + for (unsigned int idx = 0; idx < mOutputLayerId.size(); ++idx) { + inference_engine_tensor_buffer buffer; + size_t size = 1; + for (int idx2 = 0; + idx2 < mInterpreter->tensor(mOutputLayerId[idx])->dims->size; + ++idx2) { + size *= mInterpreter->tensor(mOutputLayerId[idx]) + ->dims->data[idx2]; + } + + if (mInterpreter->tensor(mOutputLayerId[idx])->type == + kTfLiteUInt8) { + LOGI("type is kTfLiteUInt8"); + pBuff = (void *) mInterpreter->typed_tensor<uint8_t>( + mOutputLayerId[idx]); + buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 }; + } else if (mInterpreter->tensor(mOutputLayerId[idx])->type == + kTfLiteFloat32) { + LOGI("type is kTfLiteFloat32"); + pBuff = (void *) mInterpreter->typed_tensor<float>( + mOutputLayerId[idx]); + buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, + 1 }; + } else { + LOGE("Not supported"); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + } + + buffers.push_back(buffer); + } + return INFERENCE_ENGINE_ERROR_NONE; + } - tflite::InterpreterBuilder(*mFlatBuffModel, resolver)(&mInterpreter); - if (!mInterpreter) { - LOGE("Failed to construct interpreter"); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } + int InferenceTFLite::GetInputLayerProperty( + inference_engine_layer_property &property) + { + LOGI("ENTER"); + SetInterpreterInfo(); + property.layer_names = mInputLayer; + property.tensor_infos = mInputTensorInfo; - LOGI("Inferece targets are: [%d]", mTargetTypes); + LOGI("LEAVE"); - switch (mTargetTypes) { - case INFERENCE_TARGET_CPU: - mInterpreter->UseNNAPI(false); - break; - case INFERENCE_TARGET_GPU: - mInterpreter->UseNNAPI(true); - break; - case INFERENCE_TARGET_CUSTOM: - case INFERENCE_TARGET_NONE: - default: - LOGW("Not supported device type [%d], Set CPU mode", (int)mTargetTypes); + return INFERENCE_ENGINE_ERROR_NONE; } - mInterpreter->SetNumThreads(MV_INFERENCE_TFLITE_MAX_THREAD_NUM); - LOGI("mInterpreter->tensors_size() :[%zu]",mInterpreter->tensors_size()); - - // input tensor - if (mInterpreter->inputs().size()) { - mInputLayerId = mInterpreter->inputs(); - } else { - std::vector<std::string>::iterator iter; - mInputLayerId.clear(); - for (iter = mInputLayer.begin(); iter != mInputLayer.end(); ++iter) { - LOGI("mInputLayer list [%s]", (*iter).c_str()); - for (unsigned int idx = 0; idx < mInterpreter->tensors_size(); ++idx) { - if (mInterpreter->tensor(idx)->name == NULL) - continue; - if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) { - mInputLayerId.push_back(idx); - break; - } - } - } - } - - // output tensor - if (mInterpreter->outputs().size()) { - mOutputLayerId = mInterpreter->outputs(); - } else { - std::vector<std::string>::iterator iter; - mOutputLayerId.clear(); - for (iter = mOutputLayer.begin(); iter != mOutputLayer.end(); ++iter) { - LOGI("mOutputLayer list [%s]", (*iter).c_str()); - for (unsigned int idx = 0; idx < mInterpreter->tensors_size(); ++idx) { - if (mInterpreter->tensor(idx)->name == NULL) - continue; - if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) { - mOutputLayerId.push_back(idx); - break; - } - } - } - } - - if (mInterpreter->AllocateTensors() != kTfLiteOk) { - LOGE("Fail to allocate tensor"); - return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY; - } - - for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx ) { - mInputAttrType.push_back(mInterpreter->tensor(mInputLayerId[idx])->type); - } - - return ret; -} - -int InferenceTFLite::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) -{ - LOGI("ENTER"); - - if (mInputTensorInfo.empty()) { - SetInterpreterInfo(); - } - - mInputData.clear(); - - void *pBuff = NULL; - - for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx ) { - size_t size = 1; - inference_engine_tensor_buffer buffer; - for (std::vector<size_t>::iterator iter = mInputTensorInfo[idx].shape.begin(); - iter != mInputTensorInfo[idx].shape.end(); ++iter) { - size *= (*iter); - } - if (mInputAttrType[idx] == kTfLiteUInt8) { - mInputData.push_back(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[idx])); - pBuff = mInputData.back(); - buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1}; - } - else if (mInputAttrType[idx] == kTfLiteFloat32) { - mInputData.push_back(mInterpreter->typed_tensor<float>(mInputLayerId[idx])); - pBuff = mInputData.back(); - buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1}; - } - else { - LOGE("Not supported"); - return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; - } - buffers.push_back(buffer); - } - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceTFLite::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) -{ - void *pBuff = NULL; - - for (unsigned int idx = 0; idx < mOutputLayerId.size(); ++idx) { - inference_engine_tensor_buffer buffer; - size_t size = 1; - for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[idx])->dims->size; ++idx2) { - size *= mInterpreter->tensor(mOutputLayerId[idx])->dims->data[idx2]; - } - - if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteUInt8) { - LOGI("type is kTfLiteUInt8"); - pBuff = (void*)mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[idx]); - buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1}; - } - else if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteFloat32) { - LOGI("type is kTfLiteFloat32"); - pBuff = (void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]); - buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1}; - } - else { - LOGE("Not supported"); - return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; - } - - buffers.push_back(buffer); - } - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceTFLite::GetInputLayerProperty(inference_engine_layer_property &property) -{ - LOGI("ENTER"); - - SetInterpreterInfo(); - property.layer_names = mInputLayer; - property.tensor_infos = mInputTensorInfo; - - LOGI("LEAVE"); + int InferenceTFLite::GetOutputLayerProperty( + inference_engine_layer_property &property) + { + LOGI("ENTER"); + + std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo); + + for (std::vector<int>::iterator iter = mOutputLayerId.begin(); + iter != mOutputLayerId.end(); ++iter) { + LOGI("output layer ID: %d", (*iter)); + if ((*iter) < 0) { + LOGE("Invalid output layer"); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + mOutputLayer.push_back(mInterpreter->tensor((*iter))->name); + + inference_engine_tensor_info tensor_info; + + LOGI("mInterpreter->tensor((*iter))->dims name[%s]", + mInterpreter->tensor((*iter))->name); + LOGI("mInterpreter->tensor((*iter))->dims size[%d]", + mInterpreter->tensor((*iter))->dims->size); + LOGI("mInterpreter->tensor((*iter))->dims type[%d]", + mInterpreter->tensor((*iter))->type); + + std::vector<size_t> shape_nhwc; + for (int idx = 0; idx < mInterpreter->tensor((*iter))->dims->size; + idx++) { + shape_nhwc.push_back( + mInterpreter->tensor((*iter))->dims->data[idx]); + } + + //tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility). + tensor_info.shape = shape_nhwc; + tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NHWC; + if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) { + LOGI("type is kTfLiteUInt8"); + tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8; + } else if (mInterpreter->tensor((*iter))->type == kTfLiteFloat32) { + LOGI("type is kTfLiteFloat32"); + tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; + } else { + LOGE("Not supported"); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + } + tensor_info.size = 1; + for (std::vector<size_t>::iterator iter2 = + tensor_info.shape.begin(); + iter2 != tensor_info.shape.end(); ++iter2) { + tensor_info.size *= (*iter2); + } + mOutputTensorInfo.push_back(tensor_info); + } + + property.layer_names = mOutputLayer; + property.tensor_infos = mOutputTensorInfo; + + LOGI("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; + } - return INFERENCE_ENGINE_ERROR_NONE; -} + int InferenceTFLite::SetInputLayerProperty( + inference_engine_layer_property &property) + { + LOGI("ENTER"); -int InferenceTFLite::GetOutputLayerProperty(inference_engine_layer_property &property) -{ - LOGI("ENTER"); - - std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo); - - for (std::vector<int>::iterator iter = mOutputLayerId.begin(); iter != mOutputLayerId.end(); ++iter) { - LOGI("output layer ID: %d", (*iter)); - if((*iter) < 0) { - LOGE("Invalid output layer"); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - mOutputLayer.push_back(mInterpreter->tensor((*iter))->name); - - inference_engine_tensor_info tensor_info; - - LOGI("mInterpreter->tensor((*iter))->dims name[%s]", mInterpreter->tensor((*iter))->name); - LOGI("mInterpreter->tensor((*iter))->dims size[%d]", mInterpreter->tensor((*iter))->dims->size); - LOGI("mInterpreter->tensor((*iter))->dims type[%d]", mInterpreter->tensor((*iter))->type); - - std::vector<size_t> shape_nhwc; - for (int idx = 0; idx <mInterpreter->tensor((*iter))->dims->size; idx++) { - shape_nhwc.push_back(mInterpreter->tensor((*iter))->dims->data[idx]); - } - - //tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility). - tensor_info.shape = shape_nhwc; - tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NHWC; - if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) { - LOGI("type is kTfLiteUInt8"); - tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8; - } - else if (mInterpreter->tensor((*iter))->type == kTfLiteFloat32) { - LOGI("type is kTfLiteFloat32"); - tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; - } - else { - LOGE("Not supported"); - return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; - } - tensor_info.size = 1; - for (std::vector<size_t>::iterator iter2 = tensor_info.shape.begin(); - iter2 != tensor_info.shape.end(); ++iter2) { - tensor_info.size *= (*iter2); - } - mOutputTensorInfo.push_back(tensor_info); - } - - property.layer_names = mOutputLayer; - property.tensor_infos = mOutputTensorInfo; - - LOGI("LEAVE"); - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceTFLite::SetInputLayerProperty(inference_engine_layer_property &property) -{ - LOGI("ENTER"); + std::vector<std::string>::iterator iter; + for (iter = property.layer_names.begin(); + iter != property.layer_names.end(); iter++) { + std::string name = *iter; + LOGI("input layer name = %s", name.c_str()); + } - std::vector<std::string>::iterator iter; - for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) { - std::string name = *iter; - LOGI("input layer name = %s", name.c_str()); - } + mInputLayer.clear(); + std::vector<std::string>().swap(mInputLayer); - mInputLayer.clear(); - std::vector<std::string>().swap(mInputLayer); + mInputTensorInfo.clear(); + std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo); - mInputTensorInfo.clear(); - std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo); + mInputLayer = property.layer_names; + mInputTensorInfo = property.tensor_infos; - mInputLayer = property.layer_names; - mInputTensorInfo = property.tensor_infos; + LOGI("LEAVE"); - LOGI("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; + } - return INFERENCE_ENGINE_ERROR_NONE; -} + int InferenceTFLite::SetOutputLayerProperty( + inference_engine_layer_property &property) + { + LOGI("ENTER"); -int InferenceTFLite::SetOutputLayerProperty(inference_engine_layer_property &property) -{ - LOGI("ENTER"); + std::vector<std::string>::iterator iter; + for (iter = property.layer_names.begin(); + iter != property.layer_names.end(); iter++) { + std::string name = *iter; + LOGI("output layer name = %s", name.c_str()); + } - std::vector<std::string>::iterator iter; - for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) { - std::string name = *iter; - LOGI("output layer name = %s", name.c_str()); - } + mOutputLayer.clear(); + std::vector<std::string>().swap(mOutputLayer); - mOutputLayer.clear(); - std::vector<std::string>().swap(mOutputLayer); + mOutputLayer = property.layer_names; - mOutputLayer = property.layer_names; + LOGI("LEAVE"); - LOGI("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; + } - return INFERENCE_ENGINE_ERROR_NONE; -} + int InferenceTFLite::GetBackendCapacity(inference_engine_capacity *capacity) + { + LOGI("ENTER"); -int InferenceTFLite::GetBackendCapacity(inference_engine_capacity *capacity) -{ - LOGI("ENTER"); + if (capacity == NULL) { + LOGE("Bad pointer."); + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + } - if (capacity == NULL) { - LOGE("Bad pointer."); - return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; - } + capacity->supported_accel_devices = INFERENCE_TARGET_CPU; - capacity->supported_accel_devices = INFERENCE_TARGET_CPU; + LOGI("LEAVE"); - LOGI("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; + } - return INFERENCE_ENGINE_ERROR_NONE; -} + int InferenceTFLite::Run( + std::vector<inference_engine_tensor_buffer> &input_buffers, + std::vector<inference_engine_tensor_buffer> &output_buffers) + { + LOGI("ENTER"); + TfLiteStatus status = mInterpreter->Invoke(); -int InferenceTFLite::Run(std::vector<inference_engine_tensor_buffer> &input_buffers, - std::vector<inference_engine_tensor_buffer> &output_buffers) -{ - LOGI("ENTER"); - TfLiteStatus status = mInterpreter->Invoke(); + if (status != kTfLiteOk) { + LOGE("Fail to invoke with kTfLiteError"); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } - if (status != kTfLiteOk) { - LOGE("Fail to invoke with kTfLiteError"); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("LEAVE"); - return INFERENCE_ENGINE_ERROR_NONE; -} + LOGI("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; + } -int InferenceTFLite::SetInterpreterInfo() -{ - if (mInputLayer.empty() || mInputTensorInfo.empty()) { - LOGI("mInputLayer is empty. layers and tensors that mInterpreter has will be returned."); - - mInputLayer.clear(); - std::vector<std::string>().swap(mInputLayer); - - mInputTensorInfo.clear(); - std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo); - - for (auto iter = mInputLayerId.begin(); iter != mInputLayerId.end(); ++iter) { - mInputLayer.push_back(mInterpreter->tensor((*iter))->name); - - std::vector<size_t> shape_nhwc; - - for (int idx = 0; idx <mInterpreter->tensor((*iter))->dims->size; idx++) { - shape_nhwc.push_back(mInterpreter->tensor((*iter))->dims->data[idx]); - } - - inference_engine_tensor_info tensor_info { - shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC, INFERENCE_TENSOR_DATA_TYPE_NONE, 1 - }; - - if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) { - LOGI("type is kTfLiteUInt8"); - tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8; - } - else if (mInterpreter->tensor((*iter))->type == kTfLiteFloat32) { - LOGI("type is kTfLiteFloat32"); - tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; - } - else { - LOGE("Not supported"); - return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; - } - - for (auto iter2 : tensor_info.shape) - { - tensor_info.size *= iter2; - } - mInputTensorInfo.push_back(tensor_info); - } - } - - return INFERENCE_ENGINE_ERROR_NONE; -} - -extern "C" -{ -class IInferenceEngineCommon* EngineCommonInit(void) -{ - InferenceTFLite *engine = new InferenceTFLite(); - return engine; -} + int InferenceTFLite::SetInterpreterInfo() + { + if (mInputLayer.empty() || mInputTensorInfo.empty()) { + LOGI("mInputLayer is empty. layers and tensors that mInterpreter has will be returned."); + + mInputLayer.clear(); + std::vector<std::string>().swap(mInputLayer); + + mInputTensorInfo.clear(); + std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo); + + for (auto iter = mInputLayerId.begin(); iter != mInputLayerId.end(); + ++iter) { + mInputLayer.push_back(mInterpreter->tensor((*iter))->name); + + std::vector<size_t> shape_nhwc; + + for (int idx = 0; + idx < mInterpreter->tensor((*iter))->dims->size; idx++) { + shape_nhwc.push_back( + mInterpreter->tensor((*iter))->dims->data[idx]); + } + + inference_engine_tensor_info tensor_info { + shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC, + INFERENCE_TENSOR_DATA_TYPE_NONE, 1 + }; + + if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) { + LOGI("type is kTfLiteUInt8"); + tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8; + } else if (mInterpreter->tensor((*iter))->type == + kTfLiteFloat32) { + LOGI("type is kTfLiteFloat32"); + tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; + } else { + LOGE("Not supported"); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + } + + for (auto iter2 : tensor_info.shape) { + tensor_info.size *= iter2; + } + mInputTensorInfo.push_back(tensor_info); + } + } + + return INFERENCE_ENGINE_ERROR_NONE; + } -void EngineCommonDestroy(class IInferenceEngineCommon *engine) -{ - delete engine; -} -} + extern "C" + { + class IInferenceEngineCommon *EngineCommonInit(void) + { + InferenceTFLite *engine = new InferenceTFLite(); + return engine; + } + + void EngineCommonDestroy(class IInferenceEngineCommon *engine) + { + delete engine; + } + } } /* TFLiteImpl */ } /* InferenceEngineImpl */ |