summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2020-05-21 16:38:54 +0900
committerInki Dae <inki.dae@samsung.com>2020-06-26 08:58:22 +0900
commit07fbae0891a66e50937de9cdcdf64c376e575f51 (patch)
tree8a7ad6305fd09a7bad9933ca26714a1215a7a708
parent426af98316759abf2125b4fea75e98a528c5aaf8 (diff)
downloadmediavision-07fbae0891a66e50937de9cdcdf64c376e575f51.tar.gz
mediavision-07fbae0891a66e50937de9cdcdf64c376e575f51.tar.bz2
mediavision-07fbae0891a66e50937de9cdcdf64c376e575f51.zip
mv_inference: Add Vivante NPU type
This patch adds a new type for Vivante NPU support. On MediaVision, Vivante NPU will be controlled by inference-engine-mlapi backend which interfaces with ML Single API of NNStreamer. Ps. Vivante NPU needs two binaries for the inference. One is Vivante NPU specific model file(xxx.nb), and other is so library which implements graph creation API specific to Vivante NPU. Change-Id: I1ec157d533b70b93953d25b2f74bbbd9adde7a05 Signed-off-by: Inki Dae <inki.dae@samsung.com>
-rw-r--r--include/mv_inference_type.h1
-rwxr-xr-xmv_inference/inference/src/Inference.cpp3
2 files changed, 4 insertions, 0 deletions
diff --git a/include/mv_inference_type.h b/include/mv_inference_type.h
index 41a9e9de..c014e29b 100644
--- a/include/mv_inference_type.h
+++ b/include/mv_inference_type.h
@@ -44,6 +44,7 @@ typedef enum {
MV_INFERENCE_BACKEND_OPENCV, /**< OpenCV */
MV_INFERENCE_BACKEND_TFLITE, /**< TensorFlow-Lite */
MV_INFERENCE_BACKEND_ARMNN, /**< ARMNN (Since 6.0) */
+ MV_INFERENCE_BACKEND_VIVANTE, /**< Vivante (Since 6.0) */
MV_INFERENCE_BACKEND_MAX /**< Backend MAX */
} mv_inference_backend_type_e;
diff --git a/mv_inference/inference/src/Inference.cpp b/mv_inference/inference/src/Inference.cpp
index 5a12b350..d9bfb772 100755
--- a/mv_inference/inference/src/Inference.cpp
+++ b/mv_inference/inference/src/Inference.cpp
@@ -83,6 +83,7 @@ Inference::Inference() :
mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_OPENCV, std::make_pair("opencv", false)));
mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_TFLITE, std::make_pair("tflite", false)));
mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_ARMNN, std::make_pair("armnn", false)));
+ mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_VIVANTE, std::make_pair("nnstreamer", false)));
CheckSupportedInferenceBackend();
@@ -98,6 +99,7 @@ Inference::Inference() :
mModelFormats.insert(std::make_pair<std::string, int>("weights", INFERENCE_MODEL_DARKNET));
mModelFormats.insert(std::make_pair<std::string, int>("bin", INFERENCE_MODEL_DLDT));
mModelFormats.insert(std::make_pair<std::string, int>("onnx", INFERENCE_MODEL_ONNX));
+ mModelFormats.insert(std::make_pair<std::string, int>("nb", INFERENCE_MODEL_VIVANTE));
LOGI("LEAVE");
}
@@ -815,6 +817,7 @@ int Inference::Load(void)
case INFERENCE_MODEL_DARKNET:
case INFERENCE_MODEL_DLDT:
case INFERENCE_MODEL_ONNX:
+ case INFERENCE_MODEL_VIVANTE:
models.push_back(mConfig.mWeightFilePath);
models.push_back(mConfig.mConfigFilePath);
break;