diff options
author | Inki Dae <inki.dae@samsung.com> | 2022-08-12 15:34:17 +0900 |
---|---|---|
committer | Inki Dae <inki.dae@samsung.com> | 2022-08-12 15:34:17 +0900 |
commit | 2d1356841e56eece0f52305dcd4023f214e08ac1 (patch) | |
tree | 2315998e1d2d1f4ab21e1c9569cd96c4a09401f6 | |
parent | 41bf477305a1202dcf14d9c7e44dd305a9dac564 (diff) | |
download | inference-engine-mlapi-2d1356841e56eece0f52305dcd4023f214e08ac1.tar.gz inference-engine-mlapi-2d1356841e56eece0f52305dcd4023f214e08ac1.tar.bz2 inference-engine-mlapi-2d1356841e56eece0f52305dcd4023f214e08ac1.zip |
drop code duplicationsubmit/tizen/20220812.070529tizen_devel
[Version] : 0.4.5-0
[Issue type] : code cleanup
Drop the code duplication for cleanup.
Change-Id: Ic41991cb335b12bd21a6caba159b5b2bc5881576
Signed-off-by: Inki Dae <inki.dae@samsung.com>
-rw-r--r-- | packaging/inference-engine-mlapi.spec | 2 | ||||
-rw-r--r-- | src/inference_engine_mlapi.cpp | 110 | ||||
-rw-r--r-- | src/inference_engine_mlapi_private.h | 3 |
3 files changed, 43 insertions, 72 deletions
diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec index 1bf2368..d37f2c9 100644 --- a/packaging/inference-engine-mlapi.spec +++ b/packaging/inference-engine-mlapi.spec @@ -1,6 +1,6 @@ Name: inference-engine-mlapi Summary: ML Single API backend of NNStreamer for MediaVision -Version: 0.4.4 +Version: 0.4.5 Release: 0 Group: Multimedia/Libraries License: Apache-2.0 diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index b0ee2f5..b23ffe3 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -387,54 +387,29 @@ namespace MLAPIImpl return err; } - int InferenceMLAPI::GetInputTensorBuffers( - std::map<std::string, inference_engine_tensor_buffer> &buffers) + int InferenceMLAPI::GetTensorInfo(std::map<std::string, int>& designated_layers, + std::map<std::string, inference_engine_tensor_buffer> &buffers, + ml_tensors_data_h& dataHandle, ml_tensors_info_h& infoHandle) { - LOGI("ENTER"); - - // TODO. Implement this function according to a given ML Single API backend properly. - - // ML Single API will always provide internal tensor buffers so - // get the tensor buffers back to Mediavision framework so that - // Mediavision framework doesn't allocate the tensor buffers internally. - - buffers.clear(); - - int ret = INFERENCE_ENGINE_ERROR_NONE; - - // TODO. Below is test code, should we allocate new buffer for every inference? - if (mInputDataHandle == NULL) { - ret = ml_tensors_data_create(mInputInfoHandle, &mInputDataHandle); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_data_create(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - } - - // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo() - for (auto& input : mDesignated_inputs) { + for (auto& layer : designated_layers) { inference_engine_tensor_buffer in_buffer; ml_tensor_type_e in_type; - ret = ml_tensors_data_get_tensor_data(mInputDataHandle, input.second, &in_buffer.buffer, &in_buffer.size); + int ret = ml_tensors_data_get_tensor_data(dataHandle, layer.second, &in_buffer.buffer, &in_buffer.size); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret); - ml_tensors_data_destroy(mInputDataHandle); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } LOGE("buffer = %p, size = %zu\n", in_buffer.buffer, in_buffer.size); - ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, input.second, &in_type); + ret = ml_tensors_info_get_tensor_type(infoHandle, layer.second, &in_type); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); - ml_tensors_data_destroy(mInputDataHandle); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } - LOGI("input tensor type = %d", in_type); + LOGI("tensor type = %d", in_type); int type = 0; @@ -442,28 +417,24 @@ namespace MLAPIImpl type = ConvertTensorTypeToInternal(in_type); } catch (const std::invalid_argument& ex) { LOGE("Error (%s) (%d)", ex.what(), in_type); - ml_tensors_data_destroy(mInputDataHandle); - return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } in_buffer.data_type = static_cast<inference_tensor_data_type_e>(type); in_buffer.owner_is_backend = 1; - buffers.insert(std::make_pair(input.first, in_buffer)); + buffers.insert(std::make_pair(layer.first, in_buffer)); } - LOGI("LEAVE"); - return INFERENCE_ENGINE_ERROR_NONE; } - int InferenceMLAPI::GetOutputTensorBuffers( + int InferenceMLAPI::GetInputTensorBuffers( std::map<std::string, inference_engine_tensor_buffer> &buffers) { LOGI("ENTER"); - // TODO. Need to check if model file loading is done. + // TODO. Implement this function according to a given ML Single API backend properly. // ML Single API will always provide internal tensor buffers so // get the tensor buffers back to Mediavision framework so that @@ -474,8 +445,8 @@ namespace MLAPIImpl int ret = INFERENCE_ENGINE_ERROR_NONE; // TODO. Below is test code, should we allocate new buffer for every inference? - if (mOutputDataHandle == NULL) { - ret = ml_tensors_data_create(mOutputInfoHandle, &mOutputDataHandle); + if (mInputDataHandle == NULL) { + ret = ml_tensors_data_create(mInputInfoHandle, &mInputDataHandle); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_data_create(%d).", ret); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; @@ -483,47 +454,44 @@ namespace MLAPIImpl } // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo() - for (auto& output : mDesignated_outputs) { - inference_engine_tensor_buffer out_buffer; - ml_tensor_type_e out_type; - - ret = ml_tensors_data_get_tensor_data(mOutputDataHandle, output.second, &out_buffer.buffer, &out_buffer.size); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret); - ml_tensors_data_destroy(mOutputDataHandle); + ret = GetTensorInfo(mDesignated_inputs, buffers, mInputDataHandle, mInputInfoHandle); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + ml_tensors_data_destroy(mInputDataHandle); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } + LOGI("LEAVE"); - LOGE("buffer = %p, size = %zu\n", out_buffer.buffer, out_buffer.size); + return INFERENCE_ENGINE_ERROR_NONE; + } - ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); - ml_tensors_data_destroy(mOutputDataHandle); + int InferenceMLAPI::GetOutputTensorBuffers( + std::map<std::string, inference_engine_tensor_buffer> &buffers) + { + LOGI("ENTER"); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } + // TODO. Need to check if model file loading is done. - LOGI("output tensor type = %d", out_type); + // ML Single API will always provide internal tensor buffers so + // get the tensor buffers back to Mediavision framework so that + // Mediavision framework doesn't allocate the tensor buffers internally. - int type = 0; + buffers.clear(); - try { - type = ConvertTensorTypeToInternal(out_type); - } catch (const std::invalid_argument& ex) { - LOGE("Error (%s) (%d)", ex.what(), out_type); - ml_tensors_data_destroy(mOutputDataHandle); + int ret = INFERENCE_ENGINE_ERROR_NONE; - return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + // TODO. Below is test code, should we allocate new buffer for every inference? + if (mOutputDataHandle == NULL) { + ret = ml_tensors_data_create(mOutputInfoHandle, &mOutputDataHandle); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_data_create(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } - - out_buffer.data_type = static_cast<inference_tensor_data_type_e>(type); - out_buffer.owner_is_backend = 1; - - buffers.insert(std::make_pair(output.first, out_buffer)); } + // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo() + ret = GetTensorInfo(mDesignated_outputs, buffers, mOutputDataHandle, mOutputInfoHandle); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + ml_tensors_data_destroy(mOutputDataHandle); + LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; diff --git a/src/inference_engine_mlapi_private.h b/src/inference_engine_mlapi_private.h index be74a6c..60d2570 100644 --- a/src/inference_engine_mlapi_private.h +++ b/src/inference_engine_mlapi_private.h @@ -91,6 +91,9 @@ namespace MLAPIImpl std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo(); std::string GetModelPath(const std::vector<std::string>& model_paths); const char *GetCustomProp(); + int GetTensorInfo(std::map<std::string, int>& designated_layers, + std::map<std::string, inference_engine_tensor_buffer> &buffers, + ml_tensors_data_h& dataHandle, ml_tensors_info_h& infoHandle); int mPluginType; int mTargetDevice; |