summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2020-06-04 14:24:39 +0900
committerInki Dae <inki.dae@samsung.com>2020-06-26 08:58:22 +0900
commita7ca960719948ef5dff51ba6043376a07c6b3506 (patch)
treedb0eb0e3861d08d1e40952ee30c14cd034e7174c
parentbe6a77af5db65bcde68dd4ee432549e029723caf (diff)
downloadmediavision-a7ca960719948ef5dff51ba6043376a07c6b3506.tar.gz
mediavision-a7ca960719948ef5dff51ba6043376a07c6b3506.tar.bz2
mediavision-a7ca960719948ef5dff51ba6043376a07c6b3506.zip
mv_inference: fix coding style based on Tizen C++ Coding Rule
Tizen C++ Coding Rule: https://wiki.tizen.org/Native_Platform_Coding_Idiom_and_Style_Guide#C.2B.2B_Coding_Style Change-Id: Ifdb19e21f9ec67784213218a3f2465312aa43dbe Signed-off-by: Inki Dae <inki.dae@samsung.com>
-rw-r--r--[-rwxr-xr-x]mv_inference/inference/include/Inference.h617
-rw-r--r--mv_inference/inference/include/InferenceIni.h76
-rw-r--r--[-rwxr-xr-x]mv_inference/inference/include/mv_inference_open.h975
-rw-r--r--[-rwxr-xr-x]mv_inference/inference/src/Inference.cpp2244
-rw-r--r--mv_inference/inference/src/InferenceIni.cpp129
-rw-r--r--[-rwxr-xr-x]mv_inference/inference/src/mv_inference.c127
-rw-r--r--[-rwxr-xr-x]mv_inference/inference/src/mv_inference_open.cpp328
-rw-r--r--test/testsuites/inference/inference_test_suite.c4543
8 files changed, 4420 insertions, 4619 deletions
diff --git a/mv_inference/inference/include/Inference.h b/mv_inference/inference/include/Inference.h
index c2a7b2e3..895f6d59 100755..100644
--- a/mv_inference/inference/include/Inference.h
+++ b/mv_inference/inference/include/Inference.h
@@ -35,326 +35,321 @@
using namespace InferenceEngineInterface::Common;
typedef struct _ImageClassficationResults {
- int number_of_classes;
- std::vector<int> indices;
- std::vector<std::string> names;
- std::vector<float> confidences;
+ int number_of_classes;
+ std::vector<int> indices;
+ std::vector<std::string> names;
+ std::vector<float> confidences;
} ImageClassificationResults; /**< structure ImageClassificationResults */
typedef struct _ObjectDetectionResults {
- int number_of_objects;
- std::vector<int> indices;
- std::vector<std::string> names;
- std::vector<float> confidences;
- std::vector<cv::Rect> locations;
-} ObjectDetectionResults; /**< structure ObjectDetectionResults */
+ int number_of_objects;
+ std::vector<int> indices;
+ std::vector<std::string> names;
+ std::vector<float> confidences;
+ std::vector<cv::Rect> locations;
+} ObjectDetectionResults; /**< structure ObjectDetectionResults */
typedef struct _FaceDetectionResults {
- int number_of_faces;
- std::vector<float> confidences;
- std::vector<cv::Rect> locations;
-} FaceDetectionResults; /**< structure FaceDetectionResults */
+ int number_of_faces;
+ std::vector<float> confidences;
+ std::vector<cv::Rect> locations;
+} FaceDetectionResults; /**< structure FaceDetectionResults */
typedef struct _FacialLandMarkDetectionResults {
- int number_of_landmarks;
- std::vector<cv::Point> locations;
-} FacialLandMarkDetectionResults; /**< structure FacialLandMarkDetectionResults */
+ int number_of_landmarks;
+ std::vector<cv::Point> locations;
+} FacialLandMarkDetectionResults; /**< structure FacialLandMarkDetectionResults */
typedef struct _PoseEstimationResults {
- int number_of_pose_estimation;
- std::vector<cv::Point> locations;
-} PoseEstimationResults; /**< structure PoseEstimationResults */
-
-namespace mediavision {
-namespace inference {
-
-struct TensorInfo {
- int width;
- int height;
- int dim;
- int ch;
-};
-
-struct InferenceConfig {
- /**
- * @brief Default constructor for the @ref InferenceConfig
- *
- * @since_tizen 5.0
- */
- InferenceConfig();
-
- std::string mConfigFilePath; /**< Path of a model configuration file */
-
- std::string mWeightFilePath; /**< Path of a model weight file */
-
- std::string mUserFilePath; /**< Path of model user file */
-
- TensorInfo mTensorInfo; /**< Tensor information */
-
- mv_inference_data_type_e mDataType; /**< Data type of a input tensor */
-
- mv_inference_backend_type_e mBackedType; /**< Backed type of model files */
-
- int mTargetTypes; /**< Target type to run inference */
-
- double mConfidenceThresHold; /**< Confidence threshold value */
-
- double mMeanValue; /**< The mean value for normalization */
-
- double mStdValue; /**< The scale factor value for normalization */
-
- int mMaxOutputNumbers;
-
- std::vector<std::string> mInputLayerNames; /**< The input layer names */
- std::vector<std::string> mOutputLayerNames; /**< The output layer names */
-};
-
-
-class Inference {
-public:
- /**
- * @brief Creates an Inference class instance.
- *
- * @since_tizen 5.5
- */
- Inference();
-
- /**
- * @brief Destroys an Inference class instance including
- * its all resources.
- *
- * @since_tizen 5.5
- */
- ~Inference();
-
- /**
- * @brief Configure modelfiles
- *
- * @since_tizen 5.5
- */
- void ConfigureModelFiles(
- const std::string modelConfigFilePath,
- const std::string modelWeightFilePath,
- const std::string modelUserFilePath);
-
- /**
- * @brief Configure input tensor information
- *
- * @since_tizen 5.5
- * @remarks deprecated Replayced by ConfigureInputInfo
- */
- void ConfigureTensorInfo(int width,
- int height,
- int dim,
- int ch,
- double stdValue,
- double meanValue);
-
- /**
- * @brief Configure input infomation
- *
- * @since_tizen 6.0
- */
- void ConfigureInputInfo(int width,
- int height,
- int dim,
- int ch,
- double stdValue,
- double meanValue,
- int dataType,
- const std::vector<std::string> names);
-
- void ConfigureOutputInfo(std::vector<std::string> names);
-
- /**
- * @brief Configure inference backend type.
- *
- * @since_tizen 6.0
- */
- int ConfigureBackendType(const mv_inference_backend_type_e backendType);
-
- /**
- * @brief Configure a inference target device type such as CPU, GPU or NPU. (only one type can be set)
- * @details Internally, a given device type will be converted to new type.
- * This API is just used for backward compatibility.
- *
- * @since_tizen 6.0 (Deprecated)
- */
- int ConfigureTargetTypes(const int targetType);
-
- /**
- * @brief Configure inference target devices such as CPU, GPU or NPU. (one more types can be combined)
- *
- * @since_tizen 6.0
- */
- int ConfigureTargetDevices(const int targetDevices);
-
- /**
- * @brief Configure the maximum number of inference results
- *
- * @since_tizen 5.5
- */
- void ConfigureOutput(const int maxOutputNumbers);
-
- /**
- * @brief Configure the confidence threshold
- *
- * @since_tizen 5.5
- */
- void ConfigureThreshold(const double threshold);
-
- /**
- * @brief Bind a backend engine
- * @details Use this function to bind a backend engine for the inference.
- * This creates a inference engine common class object, and loads a backend
- * library which inferfaces with a Neural Network runtime such as TF Lite,
- * OpenCV, ARMNN and so on.
- *
- * Ps. The created inference engine common object will be released and its
- * corresponding backend library will be unbound when deconstructor
- * of Inference class will be called.
- *
- * @since_tizen 6.0
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- */
- int Bind();
-
- /**
- * @brief Set default configuration for the inference
- * @details Use this function to set default configuration given in json file by user.
- *
- * Ps. this callback should be called after Bind callback.
- *
- * @since_tizen 6.0
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- */
- int Prepare();
-
- /**
- * @brief Load model files
- * @details Use this function to load given model files for the inference.
- *
- * Ps. this callback should be called after Prepare callback.
- *
- * @since_tizen 6.0
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- */
- int Load();
-
- /**
- * @brief Runs inference with a region of a given image
- * @details Use this function to run forward pass with the given image.
- * The given image is preprocessed and the region of the image is
- * thrown to neural network. Then, the output tensor is returned.
- * If roi is NULL, then full source will be analyzed.
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
- int Run(std::vector<mv_source_h> &mvSources, std::vector<mv_rectangle_s> &rects);
-
- /**
- * @brief Gets that given engine is supported or not
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
- std::pair<std::string, bool> GetSupportedInferenceBackend(int backend);
-
- /**
- * @brief Gets the ImageClassificationResults
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
- int GetClassficationResults(ImageClassificationResults *classificationResults);
-
- /**
- * @brief Gets the ObjectDetectioResults
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
- int GetObjectDetectionResults(ObjectDetectionResults *detectionResults);
-
- /**
- * @brief Gets the FaceDetectioResults
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
- int GetFaceDetectionResults(FaceDetectionResults *detectionResults);
-
- /**
- * @brief Gets the FacialLandmarkDetectionResults
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
- int GetFacialLandMarkDetectionResults(FacialLandMarkDetectionResults* results);
-
- /**
- * @brief Gets the PoseEstimationDetectionResults
- *
- * @since_tizen 6.0
- * @return @c true on success, otherwise a negative error value
- */
- int GetPoseEstimationDetectionResults(PoseEstimationResults* results);
-
- int GetResults(std::vector<std::vector<int>>* dimInfo, std::vector<float*> *results);
-
- mv_engine_config_h GetEngineConfig(void) { return engine_config; }
-
- void SetEngineConfig(mv_engine_config_h config) { engine_config = config; }
-
-private:
- bool mCanRun; /**< The flag indicating ready to run Inference */
- InferenceConfig mConfig;
- inference_engine_capacity mBackendCapacity;
- std::map<int, std::pair<std::string, bool>> mSupportedInferenceBackend;
- cv::Size mInputSize;
- int mCh;
- int mDim;
- double mDeviation;
- double mMean;
- double mThreshold;
- int mOutputNumbers;
- cv::Size mSourceSize;
- cv::Mat mInputBuffer;
-
- mv_engine_config_h engine_config;
-
- InferenceEngineCommon * mBackend;
-
- std::map<std::string, int> mModelFormats;
- std::vector<std::string> mUserListName;
-
- std::vector<inference_engine_tensor_buffer> mInputTensorBuffers;
- inference_engine_layer_property mInputLayerProperty;
- std::vector<inference_engine_tensor_buffer> mOutputTensorBuffers;
- inference_engine_layer_property mOutputLayerProperty;
-
-private:
- void CheckSupportedInferenceBackend();
- int ConvertEngineErrorToVisionError(int error);
- int ConvertTargetTypes(int given_types);
- int ConvertToCv(int given_type);
- inference_tensor_data_type_e ConvertToIE(int given_type);
- int Preprocess(cv::Mat cvImg, cv::Mat cvDst, int data_type);
- int PrepareTenosrBuffers(void);
- void CleanupTensorBuffers(void);
- int SetUserFile(std::string filename);
- int FillOutputResult(tensor_t &outputData);
-};
+ int number_of_pose_estimation;
+ std::vector<cv::Point> locations;
+} PoseEstimationResults; /**< structure PoseEstimationResults */
+
+namespace mediavision
+{
+namespace inference
+{
+ struct TensorInfo {
+ int width;
+ int height;
+ int dim;
+ int ch;
+ };
+
+ struct InferenceConfig {
+ /**
+ * @brief Default constructor for the @ref InferenceConfig
+ *
+ * @since_tizen 5.0
+ */
+ InferenceConfig();
+
+ std::string mConfigFilePath; /**< Path of a model configuration file */
+
+ std::string mWeightFilePath; /**< Path of a model weight file */
+
+ std::string mUserFilePath; /**< Path of model user file */
+
+ TensorInfo mTensorInfo; /**< Tensor information */
+
+ mv_inference_data_type_e mDataType; /**< Data type of a input tensor */
+
+ mv_inference_backend_type_e mBackedType; /**< Backed type of model files */
+
+ int mTargetTypes; /**< Target type to run inference */
+
+ double mConfidenceThresHold; /**< Confidence threshold value */
+
+ double mMeanValue; /**< The mean value for normalization */
+
+ double mStdValue; /**< The scale factor value for normalization */
+
+ int mMaxOutputNumbers;
+
+ std::vector<std::string> mInputLayerNames; /**< The input layer names */
+ std::vector<std::string> mOutputLayerNames; /**< The output layer names */
+ };
+
+ class Inference
+ {
+ public:
+ /**
+ * @brief Creates an Inference class instance.
+ *
+ * @since_tizen 5.5
+ */
+ Inference();
+
+ /**
+ * @brief Destroys an Inference class instance including
+ * its all resources.
+ *
+ * @since_tizen 5.5
+ */
+ ~Inference();
+
+ /**
+ * @brief Configure modelfiles
+ *
+ * @since_tizen 5.5
+ */
+ void ConfigureModelFiles(const std::string modelConfigFilePath,
+ const std::string modelWeightFilePath,
+ const std::string modelUserFilePath);
+
+ /**
+ * @brief Configure input tensor information
+ *
+ * @since_tizen 5.5
+ * @remarks deprecated Replayced by ConfigureInputInfo
+ */
+ void ConfigureTensorInfo(int width, int height, int dim, int ch,
+ double stdValue, double meanValue);
+
+ /**
+ * @brief Configure input infomation
+ *
+ * @since_tizen 6.0
+ */
+ void ConfigureInputInfo(int width, int height, int dim, int ch,
+ double stdValue, double meanValue, int dataType,
+ const std::vector<std::string> names);
+
+ void ConfigureOutputInfo(std::vector<std::string> names);
+
+ /**
+ * @brief Configure inference backend type.
+ *
+ * @since_tizen 6.0
+ */
+ int ConfigureBackendType(const mv_inference_backend_type_e backendType);
+
+ /**
+ * @brief Configure a inference target device type such as CPU, GPU or NPU. (only one type can be set)
+ * @details Internally, a given device type will be converted to new type.
+ * This API is just used for backward compatibility.
+ *
+ * @since_tizen 6.0 (Deprecated)
+ */
+ int ConfigureTargetTypes(const int targetType);
+
+ /**
+ * @brief Configure inference target devices such as CPU, GPU or NPU. (one more types can be combined)
+ *
+ * @since_tizen 6.0
+ */
+ int ConfigureTargetDevices(const int targetDevices);
+
+ /**
+ * @brief Configure the maximum number of inference results
+ *
+ * @since_tizen 5.5
+ */
+ void ConfigureOutput(const int maxOutputNumbers);
+
+ /**
+ * @brief Configure the confidence threshold
+ *
+ * @since_tizen 5.5
+ */
+ void ConfigureThreshold(const double threshold);
+
+ /**
+ * @brief Bind a backend engine
+ * @details Use this function to bind a backend engine for the inference.
+ * This creates a inference engine common class object, and loads a backend
+ * library which inferfaces with a Neural Network runtime such as TF Lite,
+ * OpenCV, ARMNN and so on.
+ *
+ * Ps. The created inference engine common object will be released and its
+ * corresponding backend library will be unbound when deconstructor
+ * of Inference class will be called.
+ *
+ * @since_tizen 6.0
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ */
+ int Bind();
+
+ /**
+ * @brief Set default configuration for the inference
+ * @details Use this function to set default configuration given in json file by user.
+ *
+ * Ps. this callback should be called after Bind callback.
+ *
+ * @since_tizen 6.0
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ */
+ int Prepare();
+
+ /**
+ * @brief Load model files
+ * @details Use this function to load given model files for the inference.
+ *
+ * Ps. this callback should be called after Prepare callback.
+ *
+ * @since_tizen 6.0
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ */
+ int Load();
+
+ /**
+ * @brief Runs inference with a region of a given image
+ * @details Use this function to run forward pass with the given image.
+ * The given image is preprocessed and the region of the image is
+ * thrown to neural network. Then, the output tensor is returned.
+ * If roi is NULL, then full source will be analyzed.
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ int Run(std::vector<mv_source_h> &mvSources,
+ std::vector<mv_rectangle_s> &rects);
+
+ /**
+ * @brief Gets that given engine is supported or not
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ std::pair<std::string, bool> GetSupportedInferenceBackend(int backend);
+
+ /**
+ * @brief Gets the ImageClassificationResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ int GetClassficationResults(ImageClassificationResults *classificationResults);
+
+ /**
+ * @brief Gets the ObjectDetectioResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ int GetObjectDetectionResults(ObjectDetectionResults *detectionResults);
+
+ /**
+ * @brief Gets the FaceDetectioResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ int GetFaceDetectionResults(FaceDetectionResults *detectionResults);
+
+ /**
+ * @brief Gets the FacialLandmarkDetectionResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ int GetFacialLandMarkDetectionResults(FacialLandMarkDetectionResults *results);
+
+ /**
+ * @brief Gets the PoseEstimationDetectionResults
+ *
+ * @since_tizen 6.0
+ * @return @c true on success, otherwise a negative error value
+ */
+ int GetPoseEstimationDetectionResults(PoseEstimationResults *results);
+
+ int GetResults(std::vector<std::vector<int> > *dimInfo,
+ std::vector<float *> *results);
+
+ mv_engine_config_h GetEngineConfig(void)
+ {
+ return engine_config;
+ }
+
+ void SetEngineConfig(mv_engine_config_h config)
+ {
+ engine_config = config;
+ }
+
+ private:
+ bool mCanRun; /**< The flag indicating ready to run Inference */
+ InferenceConfig mConfig;
+ inference_engine_capacity mBackendCapacity;
+ std::map<int, std::pair<std::string, bool> > mSupportedInferenceBackend;
+ cv::Size mInputSize;
+ int mCh;
+ int mDim;
+ double mDeviation;
+ double mMean;
+ double mThreshold;
+ int mOutputNumbers;
+ cv::Size mSourceSize;
+ cv::Mat mInputBuffer;
+ mv_engine_config_h engine_config;
+ InferenceEngineCommon *mBackend;
+ std::map<std::string, int> mModelFormats;
+ std::vector<std::string> mUserListName;
+ std::vector<inference_engine_tensor_buffer> mInputTensorBuffers;
+ inference_engine_layer_property mInputLayerProperty;
+ std::vector<inference_engine_tensor_buffer> mOutputTensorBuffers;
+ inference_engine_layer_property mOutputLayerProperty;
+
+ private:
+ void CheckSupportedInferenceBackend();
+ int ConvertEngineErrorToVisionError(int error);
+ int ConvertTargetTypes(int given_types);
+ int ConvertToCv(int given_type);
+ inference_tensor_data_type_e ConvertToIE(int given_type);
+ int Preprocess(cv::Mat cvImg, cv::Mat cvDst, int data_type);
+ int PrepareTenosrBuffers(void);
+ void CleanupTensorBuffers(void);
+ int SetUserFile(std::string filename);
+ int FillOutputResult(tensor_t &outputData);
+ };
} /* Inference */
} /* MediaVision */
diff --git a/mv_inference/inference/include/InferenceIni.h b/mv_inference/inference/include/InferenceIni.h
index 0834f195..7a586148 100644
--- a/mv_inference/inference/include/InferenceIni.h
+++ b/mv_inference/inference/include/InferenceIni.h
@@ -21,48 +21,50 @@
#include <vector>
#include <mv_inference_type.h>
-namespace mediavision {
-namespace inference {
+namespace mediavision
+{
+namespace inference
+{
+ class InferenceInI
+ {
+ public:
+ /**
+ * @brief Creates an Inference class instance.
+ *
+ * @since_tizen 5.5
+ */
+ InferenceInI();
-class InferenceInI {
-public:
- /**
- * @brief Creates an Inference class instance.
- *
- * @since_tizen 5.5
- */
- InferenceInI();
+ /**
+ * @brief Destroys an Inference class instance including
+ * its all resources.
+ *
+ * @since_tizen 5.5
+ */
+ ~InferenceInI();
- /**
- * @brief Destroys an Inference class instance including
- * its all resources.
- *
- * @since_tizen 5.5
- */
- ~InferenceInI();
+ /**
+ * @brief Load()
+ *
+ * @since_tizen 5.5
+ */
+ int LoadInI();
- /**
- * @brief Load()
- *
- * @since_tizen 5.5
- */
- int LoadInI();
+ /**
+ * @brief Unload()
+ *
+ * @since_tizen 5.5
+ */
+ void UnLoadInI();
- /**
- * @brief Unload()
- *
- * @since_tizen 5.5
- */
- void UnLoadInI();
+ std::vector<int> GetSupportedInferenceEngines();
- std::vector<int> GetSupportedInferenceEngines();
-
-private:
- std::vector<int> mSupportedInferenceBackend;
- std::string mIniDefaultPath;
- std::string mDefaultBackend;
- std::string mDelimeter;
-};
+ private:
+ std::vector<int> mSupportedInferenceBackend;
+ std::string mIniDefaultPath;
+ std::string mDefaultBackend;
+ std::string mDelimeter;
+ };
} /* Inference */
} /* MediaVision */
diff --git a/mv_inference/inference/include/mv_inference_open.h b/mv_inference/inference/include/mv_inference_open.h
index e3140524..43ce8e41 100755..100644
--- a/mv_inference/inference/include/mv_inference_open.h
+++ b/mv_inference/inference/include/mv_inference_open.h
@@ -22,516 +22,517 @@
#include <mv_inference.h>
#ifdef __cplusplus
-extern "C" {
+extern "C"
+{
#endif /* __cplusplus */
-/**
- * @file mv_inference_open.h
- * @brief This file contains the Media Vision Inference Open API.
- */
+ /**
+ * @file mv_inference_open.h
+ * @brief This file contains the Media Vision Inference Open API.
+ */
-/*************/
-/* Inference */
-/*************/
+ /*************/
+ /* Inference */
+ /*************/
-mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer);
+ mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer);
-/**
- * @brief Create infernce handle.
- * @details Use this function to create an inference handle. After creation
- * the inference handle has to be prepared with
- * @ref mv_inference_prepare() function to prepare an inference.
- *
- * @since_tizen 5.5
- *
- * @param [out] infer The handle to the inference to be created
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- *
- * @post Release @a infer by using
- * @ref mv_inference_destroy() function when it is not needed
- * anymore
- *
- * @see mv_inference_destroy_open()
- * @see mv_inference_prepare_open()
- */
-int mv_inference_create_open(mv_inference_h *infer);
+ /**
+ * @brief Create infernce handle.
+ * @details Use this function to create an inference handle. After creation
+ * the inference handle has to be prepared with
+ * @ref mv_inference_prepare() function to prepare an inference.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [out] infer The handle to the inference to be created
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ *
+ * @post Release @a infer by using
+ * @ref mv_inference_destroy() function when it is not needed
+ * anymore
+ *
+ * @see mv_inference_destroy_open()
+ * @see mv_inference_prepare_open()
+ */
+ int mv_inference_create_open(mv_inference_h *infer);
-/**
- * @brief Destroy inference handle and releases all its resources.
- *
- * @since_tizen 5.5
- *
- * @param [in] infer The handle to the inference to be destroyed
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- *
- * @pre Create an inference handle by using @ref mv_inference_create_open()
- *
- * @see mv_inference_create_open()
- */
-int mv_inference_destroy_open(mv_inference_h infer);
+ /**
+ * @brief Destroy inference handle and releases all its resources.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference to be destroyed
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ *
+ * @pre Create an inference handle by using @ref mv_inference_create_open()
+ *
+ * @see mv_inference_create_open()
+ */
+ int mv_inference_destroy_open(mv_inference_h infer);
-/**
- * @brief Configure the inference model data to inference handle
- *
- * @since_tizen 5.5
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
-int mv_inference_configure_model_open(mv_inference_h infer, mv_engine_config_h engine_config);
+ /**
+ * @brief Configure the inference model data to inference handle
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+ int mv_inference_configure_model_open(mv_inference_h infer,
+ mv_engine_config_h engine_config);
-/**
- * @brief Configure the tensor information to the inference handle
- *
- * @since_tizen 5.5
- * @remarks deprecated Replaced by mv_inference_configure_input_info
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
+ /**
+ * @brief Configure the tensor information to the inference handle
+ *
+ * @since_tizen 5.5
+ * @remarks deprecated Replaced by mv_inference_configure_input_info
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
-int mv_inference_configure_input_info_open(mv_inference_h infer, mv_engine_config_h engine_config);
+ int
+ mv_inference_configure_input_info_open(mv_inference_h infer,
+ mv_engine_config_h engine_config);
-/**
- * @brief Configure the input information to the inference handle
- *
- * @since_tizen 6.0
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
-int mv_inference_configure_input_info_open(mv_inference_h infer, mv_engine_config_h engine_config);
+ /**
+ * @brief Configure the input information to the inference handle
+ *
+ * @since_tizen 6.0
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+ int
+ mv_inference_configure_input_info_open(mv_inference_h infer,
+ mv_engine_config_h engine_config);
-/**
- * @brief Configure the backend to the inference handle
- *
- * @since_tizen 5.5
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
-int mv_inference_configure_engine_open(mv_inference_h infer, mv_engine_config_h engine_config);
+ /**
+ * @brief Configure the backend to the inference handle
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+ int mv_inference_configure_engine_open(mv_inference_h infer,
+ mv_engine_config_h engine_config);
-/**
- * @brief Configure the number of output to the inference handle
- *
- * @since_tizen 5.5
- * @remarks deprecated Replaced by mv_inference_configure_post_process_info_open
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
-int mv_inference_configure_output_open(mv_inference_h infer, mv_engine_config_h engine_config);
+ /**
+ * @brief Configure the number of output to the inference handle
+ *
+ * @since_tizen 5.5
+ * @remarks deprecated Replaced by mv_inference_configure_post_process_info_open
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+ int mv_inference_configure_output_open(mv_inference_h infer,
+ mv_engine_config_h engine_config);
-/**
- * @brief Configure the confidence threshold value to the inference handle
- *
- * @since_tizen 5.5
- * @remarks deprecated Replaced by mv_inference_configure_post_process_info_open
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
-int mv_inference_configure_confidence_threshold_open(mv_inference_h infer, mv_engine_config_h engine_config);
+ /**
+ * @brief Configure the confidence threshold value to the inference handle
+ *
+ * @since_tizen 5.5
+ * @remarks deprecated Replaced by mv_inference_configure_post_process_info_open
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+ int mv_inference_configure_confidence_threshold_open(
+ mv_inference_h infer, mv_engine_config_h engine_config);
-/**
- * @brief Configure the post process infomation to the inference handle
- *
- * @since_tizen 6.0
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
-int mv_inference_configure_post_process_info_open(mv_inference_h infer, mv_engine_config_h engine_config);
+ /**
+ * @brief Configure the post process infomation to the inference handle
+ *
+ * @since_tizen 6.0
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+ int mv_inference_configure_post_process_info_open(
+ mv_inference_h infer, mv_engine_config_h engine_config);
-/**
- * @brief Configure the set of output node names to the inference handle
- *
- * @since_tizen 5.5
- * @remarks deprecated Replaced by mv_inference_configure_output_info_open
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
-int mv_inference_configure_output_node_names_open(mv_inference_h infer, mv_engine_config_h engine_config);
+ /**
+ * @brief Configure the set of output node names to the inference handle
+ *
+ * @since_tizen 5.5
+ * @remarks deprecated Replaced by mv_inference_configure_output_info_open
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+ int mv_inference_configure_output_node_names_open(
+ mv_inference_h infer, mv_engine_config_h engine_config);
-/**
- * @brief Configure the output information to the inference handle
- *
- * @since_tizen 6.0
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
-int mv_inference_configure_output_info_open(mv_inference_h infer, mv_engine_config_h engine_config);
+ /**
+ * @brief Configure the output information to the inference handle
+ *
+ * @since_tizen 6.0
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+ int
+ mv_inference_configure_output_info_open(mv_inference_h infer,
+ mv_engine_config_h engine_config);
-/**
- * @brief Prepare inference.
- * @details Use this function to prepare inference based on
- * the configured network.
- *
- * @since_tizen 5.5
- *
- * @param [in] infer The handle to the inference
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- */
-int mv_inference_prepare_open(mv_inference_h infer);
+ /**
+ * @brief Prepare inference.
+ * @details Use this function to prepare inference based on
+ * the configured network.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ */
+ int mv_inference_prepare_open(mv_inference_h infer);
-/**
-* @brief Traverses the list of supported engines for inference.
-* @details Using this function the supported engines can be obtained.
-* The names can be used with mv_engine_config_h related
-* getters and setters to get/set MV_INFERENCE_BACKEND_TYPE attribute
-* value.
-*
-* @since_tizen 5.5
-* @param [in] infer The handle to the inference
-* @param [in] callback The iteration callback function
-* @param [in] user_data The user data to be passed to the callback function
-* @return @c 0 on success, otherwise a negative error value
-* @retval #MEDIA_VISION_ERROR_NONE Successful
-* @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
-* @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
-* @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
-*
-* @pre @a engine_cfg must be created
-*
-* @see mv_engine_config_set_string_attribute()
-* @see mv_engine_config_get_string_attribute()
-*/
-int mv_inference_foreach_supported_engine_open(
- mv_inference_h infer,
- mv_inference_supported_engine_cb callback,
- void *user_data);
+ /**
+ * @brief Traverses the list of supported engines for inference.
+ * @details Using this function the supported engines can be obtained.
+ * The names can be used with mv_engine_config_h related
+ * getters and setters to get/set MV_INFERENCE_BACKEND_TYPE attribute
+ * value.
+ *
+ * @since_tizen 5.5
+ * @param [in] infer The handle to the inference
+ * @param [in] callback The iteration callback function
+ * @param [in] user_data The user data to be passed to the callback function
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre @a engine_cfg must be created
+ *
+ * @see mv_engine_config_set_string_attribute()
+ * @see mv_engine_config_get_string_attribute()
+ */
+ int mv_inference_foreach_supported_engine_open(
+ mv_inference_h infer, mv_inference_supported_engine_cb callback,
+ void *user_data);
-/**
- * @brief Performs image classification on the @a source
- * @details Use this function to launch image classification.
- * Each time when mv_inference_image_classify is
- * called, @a classified_cb will receive classes
- * which the media source may belong to.
- *
- * @since_tizen 5.5
- *
- * @param [in] source The handle to the source of the media
- * @param [in] infer The handle to the inference
- * @param [in] roi Rectangular box bounding the region-of-interest on the
- * @a source. If NULL, then full source will be
- * analyzed.
- * @param [in] classified_cb The callback which will be called for
- * classification on media source.
- * This callback will receive classification results.
- * @param [in] user_data The user data passed from the code where
- * @ref mv_inference_image_classify_open() is invoked. This data will
- * be accessible from @a classified_cb callback.
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
- * isn't supported
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- *
- * @pre Create a source handle by calling @ref mv_create_source()
- * @pre Create an inference handle by calling @ref mv_inference_create()
- * @pre Configure an inference handle by calling @ref mv_inference_configure()
- * @pre Prepare an inference by calling @ref mv_inference_prepare()
- * @post @a classified_cb will be called to process classification results
- *
- * @see mv_inference_image_classified_cb
- */
-int mv_inference_image_classify_open(
- mv_source_h source,
- mv_inference_h infer,
- mv_rectangle_s *roi,
- mv_inference_image_classified_cb classified_cb,
- void *user_data);
+ /**
+ * @brief Performs image classification on the @a source
+ * @details Use this function to launch image classification.
+ * Each time when mv_inference_image_classify is
+ * called, @a classified_cb will receive classes
+ * which the media source may belong to.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] source The handle to the source of the media
+ * @param [in] infer The handle to the inference
+ * @param [in] roi Rectangular box bounding the region-of-interest on the
+ * @a source. If NULL, then full source will be
+ * analyzed.
+ * @param [in] classified_cb The callback which will be called for
+ * classification on media source.
+ * This callback will receive classification results.
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_inference_image_classify_open() is invoked. This data will
+ * be accessible from @a classified_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create an inference handle by calling @ref mv_inference_create()
+ * @pre Configure an inference handle by calling @ref mv_inference_configure()
+ * @pre Prepare an inference by calling @ref mv_inference_prepare()
+ * @post @a classified_cb will be called to process classification results
+ *
+ * @see mv_inference_image_classified_cb
+ */
+ int mv_inference_image_classify_open(
+ mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+ mv_inference_image_classified_cb classified_cb, void *user_data);
+ /**
+ * @brief Performs object detection on the @a source
+ * @details Use this function to launch object detection.
+ * Each time when mv_inference_object_detection is
+ * called, @a detected_cb will receive a list of objects and their locations
+ * on the media source.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] source The handle to the source of the media
+ * @param [in] infer The handle to the inference
+ * @param [in] detected_cb The callback which will be called for
+ * detecting objects on media source.
+ * This callback will receive the detection results.
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_inference_object_detect() is invoked. This data will
+ * be accessible from @a detected_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create an inference handle by calling @ref mv_inference_create()
+ * @pre Configure an inference handle by calling @ref mv_inference_configure()
+ * @pre Prepare an inference by calling @ref mv_inference_prepare()
+ * @post @a detected_cb will be called to process detection results
+ *
+ * @see mv_inference_object_detected_cb
+ */
+ int
+ mv_inference_object_detect_open(mv_source_h source, mv_inference_h infer,
+ mv_inference_object_detected_cb detected_cb,
+ void *user_data);
-/**
- * @brief Performs object detection on the @a source
- * @details Use this function to launch object detection.
- * Each time when mv_inference_object_detection is
- * called, @a detected_cb will receive a list of objects and their locations
- * on the media source.
- *
- * @since_tizen 5.5
- *
- * @param [in] source The handle to the source of the media
- * @param [in] infer The handle to the inference
- * @param [in] detected_cb The callback which will be called for
- * detecting objects on media source.
- * This callback will receive the detection results.
- * @param [in] user_data The user data passed from the code where
- * @ref mv_inference_object_detect() is invoked. This data will
- * be accessible from @a detected_cb callback.
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
- * isn't supported
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- *
- * @pre Create a source handle by calling @ref mv_create_source()
- * @pre Create an inference handle by calling @ref mv_inference_create()
- * @pre Configure an inference handle by calling @ref mv_inference_configure()
- * @pre Prepare an inference by calling @ref mv_inference_prepare()
- * @post @a detected_cb will be called to process detection results
- *
- * @see mv_inference_object_detected_cb
- */
-int mv_inference_object_detect_open(
- mv_source_h source,
- mv_inference_h infer,
- mv_inference_object_detected_cb detected_cb,
- void *user_data);
-
-/**
- * @brief Performs face detection on the @a source
- * @details Use this function to launch face detection.
- * Each time when mv_inference_face_detection is
- * called, @a detected_cb will receive a list of faces and their locations
- * on the media source.
- *
- * @since_tizen 5.5
- *
- * @param [in] source The handle to the source of the media
- * @param [in] infer The handle to the inference
- * @param [in] detected_cb The callback which will be called for
- * detecting faces on media source.
- * This callback will receive the detection results.
- * @param [in] user_data The user data passed from the code where
- * @ref mv_inference_face_detect() is invoked. This data will
- * be accessible from @a detected_cb callback.
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
- * isn't supported
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- *
- * @pre Create a source handle by calling @ref mv_create_source()
- * @pre Create an inference handle by calling @ref mv_inference_create()
- * @pre Configure an inference handle by calling @ref mv_inference_configure()
- * @pre Prepare an inference by calling @ref mv_inference_prepare()
- * @post @a detected_cb will be called to process detection results
- *
- * @see mv_inference_face_detected_cb
- */
-int mv_inference_face_detect_open(
- mv_source_h source,
- mv_inference_h infer,
- mv_inference_face_detected_cb detected_cb,
- void *user_data);
+ /**
+ * @brief Performs face detection on the @a source
+ * @details Use this function to launch face detection.
+ * Each time when mv_inference_face_detection is
+ * called, @a detected_cb will receive a list of faces and their locations
+ * on the media source.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] source The handle to the source of the media
+ * @param [in] infer The handle to the inference
+ * @param [in] detected_cb The callback which will be called for
+ * detecting faces on media source.
+ * This callback will receive the detection results.
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_inference_face_detect() is invoked. This data will
+ * be accessible from @a detected_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create an inference handle by calling @ref mv_inference_create()
+ * @pre Configure an inference handle by calling @ref mv_inference_configure()
+ * @pre Prepare an inference by calling @ref mv_inference_prepare()
+ * @post @a detected_cb will be called to process detection results
+ *
+ * @see mv_inference_face_detected_cb
+ */
+ int mv_inference_face_detect_open(mv_source_h source, mv_inference_h infer,
+ mv_inference_face_detected_cb detected_cb,
+ void *user_data);
-/**
- * @brief Performs facial landmarks detection on the @a source
- * @details Use this function to launch facial landmark detection.
- * Each time when mv_inference_facial_landmark_detect() is
- * called, @a detected_cb will receive a list facial landmark's locations
- * on the media source.
- *
- * @since_tizen 5.5
- *
- * @param [in] source The handle to the source of the media
- * @param [in] infer The handle to the inference
- * @param[in] roi Rectangular box bounding face image on the
- * @a source. If NULL, then full source will be
- * analyzed.
- * @param [in] detected_cb The callback which will be called for
- * detecting facial landmark on media source.
- * This callback will receive the detection results.
- * @param [in] user_data The user data passed from the code where
- * @ref mv_inference_facial_landmark_detect() is invoked.
- * This data will be accessible from @a detected_cb callback.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
- * isn't supported
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- *
- * @pre Create a source handle by calling @ref mv_create_source()
- * @pre Create an inference handle by calling @ref mv_inference_create()
- * @pre Configure an inference handle by calling @ref mv_inference_configure()
- * @pre Prepare an inference by calling @ref mv_inference_prepare()
- * @post @a detected_cb will be called to process detection results
- *
- * @see mv_inference_facial_landmark_detected_cb
- */
-int mv_inference_facial_landmark_detect_open(
- mv_source_h source,
- mv_inference_h infer,
- mv_rectangle_s *roi,
- mv_inference_facial_landmark_detected_cb detected_cb,
- void *user_data);
+ /**
+ * @brief Performs facial landmarks detection on the @a source
+ * @details Use this function to launch facial landmark detection.
+ * Each time when mv_inference_facial_landmark_detect() is
+ * called, @a detected_cb will receive a list facial landmark's locations
+ * on the media source.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] source The handle to the source of the media
+ * @param [in] infer The handle to the inference
+ * @param[in] roi Rectangular box bounding face image on the
+ * @a source. If NULL, then full source will be
+ * analyzed.
+ * @param [in] detected_cb The callback which will be called for
+ * detecting facial landmark on media source.
+ * This callback will receive the detection results.
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_inference_facial_landmark_detect() is invoked.
+ * This data will be accessible from @a detected_cb callback.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create an inference handle by calling @ref mv_inference_create()
+ * @pre Configure an inference handle by calling @ref mv_inference_configure()
+ * @pre Prepare an inference by calling @ref mv_inference_prepare()
+ * @post @a detected_cb will be called to process detection results
+ *
+ * @see mv_inference_facial_landmark_detected_cb
+ */
+ int mv_inference_facial_landmark_detect_open(
+ mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+ mv_inference_facial_landmark_detected_cb detected_cb,
+ void *user_data);
-/**
- * @brief Performs pose estimation detection on the @a source
- * @details Use this function to launch pose estimation detection.
- * Each time when mv_inference_pose_estimation_detect() is
- * called, @a detected_cb will receive a list pose estimation's locations
- * on the media source.
- *
- * @since_tizen 5.5
- *
- * @param [in] source The handle to the source of the media
- * @param [in] infer The handle to the inference
- * @param[in] roi Rectangular box bounding face image on the
- * @a source. If NULL, then full source will be
- * analyzed.
- * @param [in] detected_cb The callback which will be called for
- * detecting facial landmark on media source.
- * This callback will receive the detection results.
- * @param [in] user_data The user data passed from the code where
- * @ref mv_inference_facial_landmark_detect() is invoked.
- * This data will be accessible from @a detected_cb callback.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
- * isn't supported
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- *
- * @pre Create a source handle by calling @ref mv_create_source()
- * @pre Create an inference handle by calling @ref mv_inference_create()
- * @pre Configure an inference handle by calling @ref mv_inference_configure()
- * @pre Prepare an inference by calling @ref mv_inference_prepare()
- * @post @a detected_cb will be called to process detection results
- *
- * @see mv_inference_pose_estimation_detected_cb
- */
-int mv_inference_pose_estimation_detect_open(
- mv_source_h source,
- mv_inference_h infer,
- mv_rectangle_s *roi,
- mv_inference_pose_estimation_detected_cb detected_cb,
- void *user_data);
+ /**
+ * @brief Performs pose estimation detection on the @a source
+ * @details Use this function to launch pose estimation detection.
+ * Each time when mv_inference_pose_estimation_detect() is
+ * called, @a detected_cb will receive a list pose estimation's locations
+ * on the media source.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] source The handle to the source of the media
+ * @param [in] infer The handle to the inference
+ * @param[in] roi Rectangular box bounding face image on the
+ * @a source. If NULL, then full source will be
+ * analyzed.
+ * @param [in] detected_cb The callback which will be called for
+ * detecting facial landmark on media source.
+ * This callback will receive the detection results.
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_inference_facial_landmark_detect() is invoked.
+ * This data will be accessible from @a detected_cb callback.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create an inference handle by calling @ref mv_inference_create()
+ * @pre Configure an inference handle by calling @ref mv_inference_configure()
+ * @pre Prepare an inference by calling @ref mv_inference_prepare()
+ * @post @a detected_cb will be called to process detection results
+ *
+ * @see mv_inference_pose_estimation_detected_cb
+ */
+ int mv_inference_pose_estimation_detect_open(
+ mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+ mv_inference_pose_estimation_detected_cb detected_cb,
+ void *user_data);
#ifdef __cplusplus
}
diff --git a/mv_inference/inference/src/Inference.cpp b/mv_inference/inference/src/Inference.cpp
index ae44143a..62262adc 100755..100644
--- a/mv_inference/inference/src/Inference.cpp
+++ b/mv_inference/inference/src/Inference.cpp
@@ -31,1298 +31,1396 @@
#define MV_INFERENCE_CONFIDENCE_THRESHOLD_MAX 1.0
#define MV_INFERENCE_CONFIDENCE_THRESHOLD_MIN 0.0
-typedef enum {
- InputAttrNoType = 0,
- InputAttrFloat32 = 1,
- InputAttrInt32 = 2,
- InputAttrUInt8 = 3,
- InputAttrInt64 = 4,
- InputAttrString = 5,
- InputAttrBool = 6,
+typedef enum {
+ InputAttrNoType = 0,
+ InputAttrFloat32 = 1,
+ InputAttrInt32 = 2,
+ InputAttrUInt8 = 3,
+ InputAttrInt64 = 4,
+ InputAttrString = 5,
+ InputAttrBool = 6,
} InputAttrType;
-namespace mediavision {
-namespace inference {
-InferenceConfig::InferenceConfig() :
- mConfigFilePath(),
- mWeightFilePath(),
- mUserFilePath(),
- mDataType(MV_INFERENCE_DATA_FLOAT32),
- mBackedType(MV_INFERENCE_BACKEND_NONE),
- mTargetTypes(MV_INFERENCE_TARGET_NONE),
- mConfidenceThresHold(),
- mMeanValue(),
- mStdValue(),
- mMaxOutputNumbers(1)
+namespace mediavision
{
- mTensorInfo.width = -1;
- mTensorInfo.height = -1;
- mTensorInfo.dim = -1;
- mTensorInfo.ch = -1;
-}
-
-Inference::Inference() :
- mCanRun(),
- mConfig(),
- mBackendCapacity(),
- mSupportedInferenceBackend(),
- mInputSize(cv::Size()),
- mCh(),
- mDim(),
- mDeviation(),
- mMean(),
- mThreshold(),
- mOutputNumbers(),
- mSourceSize(cv::Size()),
- mInputBuffer(cv::Mat()),
- engine_config(),
- mBackend()
+namespace inference
{
- LOGI("ENTER");
-
- mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_OPENCV, std::make_pair("opencv", false)));
- mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_TFLITE, std::make_pair("tflite", false)));
- mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_ARMNN, std::make_pair("armnn", false)));
- mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_MLAPI, std::make_pair("mlapi", false)));
- mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_NNFW, std::make_pair("mlapi", false)));
+ InferenceConfig::InferenceConfig() :
+ mConfigFilePath(),
+ mWeightFilePath(),
+ mUserFilePath(),
+ mDataType(MV_INFERENCE_DATA_FLOAT32),
+ mBackedType(MV_INFERENCE_BACKEND_NONE),
+ mTargetTypes(MV_INFERENCE_TARGET_NONE),
+ mConfidenceThresHold(),
+ mMeanValue(),
+ mStdValue(),
+ mMaxOutputNumbers(1)
+ {
+ mTensorInfo.width = -1;
+ mTensorInfo.height = -1;
+ mTensorInfo.dim = -1;
+ mTensorInfo.ch = -1;
+ }
- CheckSupportedInferenceBackend();
+ Inference::Inference() :
+ mCanRun(),
+ mConfig(),
+ mBackendCapacity(),
+ mSupportedInferenceBackend(),
+ mInputSize(cv::Size()),
+ mCh(),
+ mDim(),
+ mDeviation(),
+ mMean(),
+ mThreshold(),
+ mOutputNumbers(),
+ mSourceSize(cv::Size()),
+ mInputBuffer(cv::Mat()),
+ engine_config(),
+ mBackend()
+ {
+ LOGI("ENTER");
+
+ mSupportedInferenceBackend.insert(std::make_pair(
+ MV_INFERENCE_BACKEND_OPENCV, std::make_pair("opencv", false)));
+ mSupportedInferenceBackend.insert(std::make_pair(
+ MV_INFERENCE_BACKEND_TFLITE, std::make_pair("tflite", false)));
+ mSupportedInferenceBackend.insert(std::make_pair(
+ MV_INFERENCE_BACKEND_ARMNN, std::make_pair("armnn", false)));
+ mSupportedInferenceBackend.insert(std::make_pair(
+ MV_INFERENCE_BACKEND_MLAPI, std::make_pair("mlapi", false)));
+ mSupportedInferenceBackend.insert(std::make_pair(
+ MV_INFERENCE_BACKEND_NNFW, std::make_pair("mlapi", false)));
+
+ CheckSupportedInferenceBackend();
+
+ for (int i = 0; i < MV_INFERENCE_BACKEND_MAX; ++i) {
+ auto iter = mSupportedInferenceBackend.find(i);
+ LOGE("%d: %s: %s", i, (iter->second).first.c_str(),
+ (iter->second).second ? "TRUE" : "FALSE");
+ }
- for(int i = 0; i < MV_INFERENCE_BACKEND_MAX; ++i) {
- auto iter = mSupportedInferenceBackend.find(i);
- LOGE("%d: %s: %s", i, (iter->second).first.c_str(), (iter->second).second ? "TRUE" : "FALSE");
+ mModelFormats.insert(std::make_pair<std::string, int>(
+ "caffemodel", INFERENCE_MODEL_CAFFE));
+ mModelFormats.insert(
+ std::make_pair<std::string, int>("pb", INFERENCE_MODEL_TF));
+ mModelFormats.insert(std::make_pair<std::string, int>(
+ "tflite", INFERENCE_MODEL_TFLITE));
+ mModelFormats.insert(
+ std::make_pair<std::string, int>("t7", INFERENCE_MODEL_TORCH));
+ mModelFormats.insert(std::make_pair<std::string, int>(
+ "weights", INFERENCE_MODEL_DARKNET));
+ mModelFormats.insert(
+ std::make_pair<std::string, int>("bin", INFERENCE_MODEL_DLDT));
+ mModelFormats.insert(
+ std::make_pair<std::string, int>("onnx", INFERENCE_MODEL_ONNX));
+ mModelFormats.insert(std::make_pair<std::string, int>(
+ "nb", INFERENCE_MODEL_VIVANTE));
+
+ LOGI("LEAVE");
}
- mModelFormats.insert(std::make_pair<std::string, int>("caffemodel", INFERENCE_MODEL_CAFFE));
- mModelFormats.insert(std::make_pair<std::string, int>("pb", INFERENCE_MODEL_TF));
- mModelFormats.insert(std::make_pair<std::string, int>("tflite", INFERENCE_MODEL_TFLITE));
- mModelFormats.insert(std::make_pair<std::string, int>("t7", INFERENCE_MODEL_TORCH));
- mModelFormats.insert(std::make_pair<std::string, int>("weights", INFERENCE_MODEL_DARKNET));
- mModelFormats.insert(std::make_pair<std::string, int>("bin", INFERENCE_MODEL_DLDT));
- mModelFormats.insert(std::make_pair<std::string, int>("onnx", INFERENCE_MODEL_ONNX));
- mModelFormats.insert(std::make_pair<std::string, int>("nb", INFERENCE_MODEL_VIVANTE));
-
- LOGI("LEAVE");
-}
+ Inference::~Inference()
+ {
+ CleanupTensorBuffers();
-Inference::~Inference()
-{
- CleanupTensorBuffers();
+ if (!mInputLayerProperty.tensor_infos.empty()) {
+ mInputLayerProperty.tensor_infos.clear();
+ std::vector<inference_engine_tensor_info>().swap(
+ mInputLayerProperty.tensor_infos);
+ }
+ if (!mOutputLayerProperty.tensor_infos.empty()) {
+ mOutputLayerProperty.tensor_infos.clear();
+ std::vector<inference_engine_tensor_info>().swap(
+ mOutputLayerProperty.tensor_infos);
+ }
- if (!mInputLayerProperty.tensor_infos.empty()) {
- mInputLayerProperty.tensor_infos.clear();
- std::vector<inference_engine_tensor_info>().swap(mInputLayerProperty.tensor_infos);
- }
- if (!mOutputLayerProperty.tensor_infos.empty()) {
- mOutputLayerProperty.tensor_infos.clear();
- std::vector<inference_engine_tensor_info>().swap(mOutputLayerProperty.tensor_infos);
- }
+ mModelFormats.clear();
- mModelFormats.clear();
+ // Release backend engine.
+ if (mBackend) {
+ mBackend->UnbindBackend();
+ delete mBackend;
+ }
- // Release backend engine.
- if (mBackend) {
- mBackend->UnbindBackend();
- delete mBackend;
+ LOGI("Released backend engine.");
}
- LOGI("Released backend engine.");
-}
+ void Inference::CheckSupportedInferenceBackend()
+ {
+ LOGE("ENTER");
-void Inference::CheckSupportedInferenceBackend()
-{
- LOGE("ENTER");
+ InferenceInI ini;
+ ini.LoadInI();
- InferenceInI ini;
- ini.LoadInI();
-
- std::vector<int> supportedBackend = ini.GetSupportedInferenceEngines();
- for (std::vector<int>::const_iterator it = supportedBackend.begin();
- it != supportedBackend.end(); ++it) {
+ std::vector<int> supportedBackend = ini.GetSupportedInferenceEngines();
+ for (std::vector<int>::const_iterator it = supportedBackend.begin();
+ it != supportedBackend.end(); ++it) {
LOGE("engine: %d", *it);
- auto iter = mSupportedInferenceBackend.find(*it);
- (iter->second).second = true;
- }
+ auto iter = mSupportedInferenceBackend.find(*it);
+ (iter->second).second = true;
+ }
- LOGE("LEAVE");
+ LOGE("LEAVE");
+ }
-}
+ int Inference::ConvertEngineErrorToVisionError(int error)
+ {
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ switch (error) {
+ case INFERENCE_ENGINE_ERROR_NONE:
+ ret = MEDIA_VISION_ERROR_NONE;
+ break;
+ case INFERENCE_ENGINE_ERROR_NOT_SUPPORTED:
+ ret = MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ break;
+ case INFERENCE_ENGINE_ERROR_MSG_TOO_LONG:
+ ret = MEDIA_VISION_ERROR_MSG_TOO_LONG;
+ break;
+ case INFERENCE_ENGINE_ERROR_NO_DATA:
+ ret = MEDIA_VISION_ERROR_NO_DATA;
+ break;
+ case INFERENCE_ENGINE_ERROR_KEY_NOT_AVAILABLE:
+ ret = MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ break;
+ case INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY:
+ ret = MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ break;
+ case INFERENCE_ENGINE_ERROR_INVALID_PARAMETER:
+ ret = MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ break;
+ case INFERENCE_ENGINE_ERROR_INVALID_OPERATION:
+ ret = MEDIA_VISION_ERROR_INVALID_OPERATION;
+ break;
+ case INFERENCE_ENGINE_ERROR_PERMISSION_DENIED:
+ ret = MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ break;
+ case INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT:
+ ret = MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ break;
+ case INFERENCE_ENGINE_ERROR_INTERNAL:
+ ret = MEDIA_VISION_ERROR_INTERNAL;
+ break;
+ case INFERENCE_ENGINE_ERROR_INVALID_DATA:
+ ret = MEDIA_VISION_ERROR_INVALID_DATA;
+ break;
+ case INFERENCE_ENGINE_ERROR_INVALID_PATH:
+ ret = MEDIA_VISION_ERROR_INVALID_PATH;
+ break;
+ default:
+ LOGE("Unknown inference engine error type");
+ }
-int Inference::ConvertEngineErrorToVisionError(int error)
-{
- int ret = MEDIA_VISION_ERROR_NONE;
-
- switch(error) {
- case INFERENCE_ENGINE_ERROR_NONE:
- ret = MEDIA_VISION_ERROR_NONE;
- break;
- case INFERENCE_ENGINE_ERROR_NOT_SUPPORTED:
- ret = MEDIA_VISION_ERROR_NOT_SUPPORTED;
- break;
- case INFERENCE_ENGINE_ERROR_MSG_TOO_LONG:
- ret = MEDIA_VISION_ERROR_MSG_TOO_LONG;
- break;
- case INFERENCE_ENGINE_ERROR_NO_DATA:
- ret = MEDIA_VISION_ERROR_NO_DATA;
- break;
- case INFERENCE_ENGINE_ERROR_KEY_NOT_AVAILABLE:
- ret = MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
- break;
- case INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY:
- ret = MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- break;
- case INFERENCE_ENGINE_ERROR_INVALID_PARAMETER:
- ret = MEDIA_VISION_ERROR_INVALID_PARAMETER;
- break;
- case INFERENCE_ENGINE_ERROR_INVALID_OPERATION:
- ret = MEDIA_VISION_ERROR_INVALID_OPERATION;
- break;
- case INFERENCE_ENGINE_ERROR_PERMISSION_DENIED:
- ret = MEDIA_VISION_ERROR_PERMISSION_DENIED;
- break;
- case INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT:
- ret = MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
- break;
- case INFERENCE_ENGINE_ERROR_INTERNAL:
- ret = MEDIA_VISION_ERROR_INTERNAL;
- break;
- case INFERENCE_ENGINE_ERROR_INVALID_DATA:
- ret = MEDIA_VISION_ERROR_INVALID_DATA;
- break;
- case INFERENCE_ENGINE_ERROR_INVALID_PATH:
- ret = MEDIA_VISION_ERROR_INVALID_PATH;
- break;
- default:
- LOGE("Unknown inference engine error type");
+ return ret;
}
- return ret;
-}
+ int Inference::ConvertTargetTypes(int given_types)
+ {
+ int target_types = INFERENCE_TARGET_NONE;
-int Inference::ConvertTargetTypes(int given_types)
-{
- int target_types = INFERENCE_TARGET_NONE;
+ if (given_types & MV_INFERENCE_TARGET_DEVICE_CPU)
+ target_types |= INFERENCE_TARGET_CPU;
+ if (given_types & MV_INFERENCE_TARGET_DEVICE_GPU)
+ target_types |= INFERENCE_TARGET_GPU;
+ if (given_types & MV_INFERENCE_TARGET_DEVICE_CUSTOM)
+ target_types |= INFERENCE_TARGET_CUSTOM;
- if (given_types & MV_INFERENCE_TARGET_DEVICE_CPU)
- target_types |= INFERENCE_TARGET_CPU;
- if (given_types & MV_INFERENCE_TARGET_DEVICE_GPU)
- target_types |= INFERENCE_TARGET_GPU;
- if (given_types & MV_INFERENCE_TARGET_DEVICE_CUSTOM)
- target_types |= INFERENCE_TARGET_CUSTOM;
+ return target_types;
+ }
- return target_types;
-}
+ int Inference::ConvertToCv(int given_type)
+ {
+ int type = 0;
+
+ switch (given_type) {
+ case INFERENCE_TENSOR_DATA_TYPE_UINT8:
+ LOGI("Type is %d ch with UINT8", mCh);
+ type = mCh == 1 ? CV_8UC1 : CV_8UC3;
+ break;
+ case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
+ LOGI("Type is %d ch with FLOAT32", mCh);
+ type = mCh == 1 ? CV_32FC1 : CV_32FC3;
+ break;
+ default:
+ LOGI("unknown data type so FLOAT32 data type will be used in default");
+ type = mCh == 1 ? CV_32FC1 : CV_32FC3;
+ break;
+ }
-int Inference::ConvertToCv(int given_type)
-{
- int type = 0;
-
- switch (given_type) {
- case INFERENCE_TENSOR_DATA_TYPE_UINT8:
- LOGI("Type is %d ch with UINT8", mCh);
- type = mCh == 1 ? CV_8UC1 : CV_8UC3;
- break;
- case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
- LOGI("Type is %d ch with FLOAT32", mCh);
- type = mCh == 1 ? CV_32FC1 : CV_32FC3;
- break;
- default:
- LOGI("unknown data type so FLOAT32 data type will be used in default");
- type = mCh == 1 ? CV_32FC1 : CV_32FC3;
- break;
+ return type;
}
- return type;
-}
+ inference_tensor_data_type_e Inference::ConvertToIE(int given_type)
+ {
+ inference_tensor_data_type_e type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+
+ switch (given_type) {
+ case MV_INFERENCE_DATA_FLOAT32:
+ type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ break;
+ case MV_INFERENCE_DATA_UINT8:
+ type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ break;
+ default:
+ LOGI("unknown data type so FLOAT32 data type will be used in default");
+ break;
+ }
-inference_tensor_data_type_e Inference::ConvertToIE(int given_type)
-{
- inference_tensor_data_type_e type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-
- switch (given_type) {
- case MV_INFERENCE_DATA_FLOAT32:
- type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- break;
- case MV_INFERENCE_DATA_UINT8:
- type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- break;
- default:
- LOGI("unknown data type so FLOAT32 data type will be used in default");
- break;
+ return type;
}
- return type;
-}
+ int Inference::Preprocess(cv::Mat cvImg, cv::Mat cvDst, int data_type)
+ {
+ mSourceSize = cvImg.size();
+ int width = mInputSize.width;
+ int height = mInputSize.height;
+
+ cv::Mat sample;
+ if (cvImg.channels() == 3 && mCh == 1)
+ cv::cvtColor(cvImg, sample, cv::COLOR_BGR2GRAY);
+ else
+ sample = cvImg;
+
+ // size
+ cv::Mat sampleResized;
+ if (sample.size() != cv::Size(width, height))
+ cv::resize(sample, sampleResized, cv::Size(width, height));
+ else
+ sampleResized = sample;
+
+ // type
+ cv::Mat sampleFloat;
+ if (mCh == 3)
+ sampleResized.convertTo(sampleFloat, CV_32FC3);
+ else
+ sampleResized.convertTo(sampleFloat, CV_32FC1);
+
+ // normalize
+ cv::Mat sampleNormalized;
+ cv::Mat meanMat;
+ if (mCh == 3)
+ meanMat = cv::Mat(sampleFloat.size(), CV_32FC3,
+ cv::Scalar((float) mMean, (float) mMean,
+ (float) mMean));
+ else
+ meanMat = cv::Mat(sampleFloat.size(), CV_32FC1,
+ cv::Scalar((float) mMean));
+
+ cv::subtract(sampleFloat, meanMat, sampleNormalized);
+
+ sampleNormalized /= static_cast<float>(mDeviation);
+
+ sampleNormalized.convertTo(cvDst, data_type);
+
+ return MEDIA_VISION_ERROR_NONE;
+ }
-int Inference::Preprocess(cv::Mat cvImg, cv::Mat cvDst, int data_type)
-{
- mSourceSize = cvImg.size();
- int width = mInputSize.width;
- int height = mInputSize.height;
-
- cv::Mat sample;
- if (cvImg.channels() == 3 && mCh == 1)
- cv::cvtColor(cvImg, sample, cv::COLOR_BGR2GRAY);
- else
- sample = cvImg;
-
- // size
- cv::Mat sampleResized;
- if (sample.size() != cv::Size(width, height))
- cv::resize(sample, sampleResized, cv::Size(width, height));
- else
- sampleResized = sample;
-
- // type
- cv::Mat sampleFloat;
- if (mCh == 3)
- sampleResized.convertTo(sampleFloat, CV_32FC3);
- else
- sampleResized.convertTo(sampleFloat, CV_32FC1);
-
- // normalize
- cv::Mat sampleNormalized;
- cv::Mat meanMat;
- if (mCh == 3)
- meanMat = cv::Mat(sampleFloat.size(), CV_32FC3, cv::Scalar((float)mMean, (float)mMean, (float)mMean));
- else
- meanMat = cv::Mat(sampleFloat.size(), CV_32FC1, cv::Scalar((float)mMean));
-
- cv::subtract(sampleFloat, meanMat, sampleNormalized);
-
- sampleNormalized /= static_cast<float>(mDeviation);
-
- sampleNormalized.convertTo(cvDst, data_type);
-
- return MEDIA_VISION_ERROR_NONE;
-}
-
-int Inference::SetUserFile(std::string filename)
-{
- std::ifstream fp(filename.c_str());
- if (!fp.is_open()) {
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- std::string userListName;
- while (!fp.eof()) {
- std::getline(fp, userListName);
- if (userListName.length())
- mUserListName.push_back(userListName);
- }
-
- fp.close();
-
- return MEDIA_VISION_ERROR_NONE;
-}
-
-void Inference::ConfigureModelFiles(const std::string modelConfigFilePath,
- const std::string modelWeightFilePath,
- const std::string modelUserFilePath)
-{
- LOGI("ENTER");
+ int Inference::SetUserFile(std::string filename)
+ {
+ std::ifstream fp(filename.c_str());
+ if (!fp.is_open()) {
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
- mConfig.mConfigFilePath = modelConfigFilePath;
- mConfig.mWeightFilePath = modelWeightFilePath;
- mConfig.mUserFilePath = modelUserFilePath;
+ std::string userListName;
+ while (!fp.eof()) {
+ std::getline(fp, userListName);
+ if (userListName.length())
+ mUserListName.push_back(userListName);
+ }
- LOGI("LEAVE");
-}
+ fp.close();
-void Inference::ConfigureTensorInfo(int width,
- int height,
- int dim,
- int ch,
- double stdValue,
- double meanValue)
-{
- LOGI("ENTER");
-
- mConfig.mTensorInfo = {width, height, dim, ch};
- mConfig.mStdValue = stdValue;
- mConfig.mMeanValue = meanValue;
-
- LOGI("LEAVE");
-}
-
-void Inference::ConfigureInputInfo(int width,
- int height,
- int dim,
- int ch,
- double stdValue,
- double meanValue,
- int dataType,
- const std::vector<std::string> names)
-{
- LOGI("ENTER");
-
- mConfig.mTensorInfo = {width, height, dim, ch};
- mConfig.mStdValue = stdValue;
- mConfig.mMeanValue = meanValue;
- mConfig.mDataType = static_cast<mv_inference_data_type_e>(dataType);
- mConfig.mInputLayerNames = names;
-
- inference_engine_layer_property property;
- // In case of that a inference plugin deosn't support to get properties,
- // the tensor info given by a user will be used.
- // If the plugin supports that, the given info will be ignored.
- inference_engine_tensor_info tensor_info;
-
- tensor_info.data_type = ConvertToIE(dataType);
-
- // In case of OpenCV, only supports NCHW
- tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
- // modify to handle multiple tensor infos
- tensor_info.shape.push_back(mConfig.mTensorInfo.dim);
- tensor_info.shape.push_back(mConfig.mTensorInfo.ch);
- tensor_info.shape.push_back(mConfig.mTensorInfo.height);
- tensor_info.shape.push_back(mConfig.mTensorInfo.width);
-
- tensor_info.size = 1;
- for (std::vector<size_t>::iterator iter = tensor_info.shape.begin();
- iter != tensor_info.shape.end(); ++iter) {
- tensor_info.size *= (*iter);
+ return MEDIA_VISION_ERROR_NONE;
}
- property.layer_names = mConfig.mInputLayerNames;
- property.tensor_infos.push_back(tensor_info);
+ void Inference::ConfigureModelFiles(const std::string modelConfigFilePath,
+ const std::string modelWeightFilePath,
+ const std::string modelUserFilePath)
+ {
+ LOGI("ENTER");
+
+ mConfig.mConfigFilePath = modelConfigFilePath;
+ mConfig.mWeightFilePath = modelWeightFilePath;
+ mConfig.mUserFilePath = modelUserFilePath;
- int ret = mBackend->SetInputLayerProperty(property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to set input layer property");
+ LOGI("LEAVE");
}
- LOGI("LEAVE");
-}
+ void Inference::ConfigureTensorInfo(int width, int height, int dim, int ch,
+ double stdValue, double meanValue)
+ {
+ LOGI("ENTER");
-void Inference::ConfigureOutputInfo(const std::vector<std::string> names)
-{
- LOGI("ENTER");
+ mConfig.mTensorInfo = { width, height, dim, ch };
+ mConfig.mStdValue = stdValue;
+ mConfig.mMeanValue = meanValue;
- mConfig.mOutputLayerNames = names;
+ LOGI("LEAVE");
+ }
- inference_engine_layer_property property;
+ void Inference::ConfigureInputInfo(int width, int height, int dim, int ch,
+ double stdValue, double meanValue,
+ int dataType,
+ const std::vector<std::string> names)
+ {
+ LOGI("ENTER");
+
+ mConfig.mTensorInfo = { width, height, dim, ch };
+ mConfig.mStdValue = stdValue;
+ mConfig.mMeanValue = meanValue;
+ mConfig.mDataType = static_cast<mv_inference_data_type_e>(dataType);
+ mConfig.mInputLayerNames = names;
+
+ inference_engine_layer_property property;
+ // In case of that a inference plugin deosn't support to get properties,
+ // the tensor info given by a user will be used.
+ // If the plugin supports that, the given info will be ignored.
+ inference_engine_tensor_info tensor_info;
+
+ tensor_info.data_type = ConvertToIE(dataType);
+
+ // In case of OpenCV, only supports NCHW
+ tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
+ // modify to handle multiple tensor infos
+ tensor_info.shape.push_back(mConfig.mTensorInfo.dim);
+ tensor_info.shape.push_back(mConfig.mTensorInfo.ch);
+ tensor_info.shape.push_back(mConfig.mTensorInfo.height);
+ tensor_info.shape.push_back(mConfig.mTensorInfo.width);
+
+ tensor_info.size = 1;
+ for (std::vector<size_t>::iterator iter = tensor_info.shape.begin();
+ iter != tensor_info.shape.end(); ++iter) {
+ tensor_info.size *= (*iter);
+ }
- property.layer_names = names;
- int ret = mBackend->SetOutputLayerProperty(property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to set output layer property");
- }
+ property.layer_names = mConfig.mInputLayerNames;
+ property.tensor_infos.push_back(tensor_info);
- LOGI("LEAVE");
-}
+ int ret = mBackend->SetInputLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to set input layer property");
+ }
-int Inference::ConfigureBackendType(const mv_inference_backend_type_e backendType)
-{
- std::pair<std::string, bool> backend = mSupportedInferenceBackend[backendType];
- if (backend.second == false) {
- LOGE("%s type is not supported", (backend.first).c_str());
- return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ LOGI("LEAVE");
}
- LOGI("backend engine : %d", backendType);
+ void Inference::ConfigureOutputInfo(const std::vector<std::string> names)
+ {
+ LOGI("ENTER");
- mConfig.mBackedType = backendType;
+ mConfig.mOutputLayerNames = names;
- return MEDIA_VISION_ERROR_NONE;
-}
+ inference_engine_layer_property property;
-int Inference::ConfigureTargetTypes(const int targetType)
-{
- // Check if given target types are valid or not.
- if (MV_INFERENCE_TARGET_NONE >= targetType || MV_INFERENCE_TARGET_MAX <= targetType) {
- LOGE("Invalid target device.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ property.layer_names = names;
+ int ret = mBackend->SetOutputLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to set output layer property");
+ }
- LOGI("Before convering target types : %d", targetType);
-
- unsigned int new_type = MV_INFERENCE_TARGET_DEVICE_NONE;
-
- // Convert old type to new one.
- switch (targetType) {
- case MV_INFERENCE_TARGET_CPU:
- new_type = MV_INFERENCE_TARGET_DEVICE_CPU;
- break;
- case MV_INFERENCE_TARGET_GPU:
- new_type = MV_INFERENCE_TARGET_DEVICE_GPU;
- break;
- case MV_INFERENCE_TARGET_CUSTOM:
- new_type = MV_INFERENCE_TARGET_DEVICE_CUSTOM;
- break;
+ LOGI("LEAVE");
}
- LOGI("After convering target types : %d", new_type);
+ int Inference::ConfigureBackendType(
+ const mv_inference_backend_type_e backendType)
+ {
+ std::pair<std::string, bool> backend =
+ mSupportedInferenceBackend[backendType];
+ if (backend.second == false) {
+ LOGE("%s type is not supported", (backend.first).c_str());
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
- mConfig.mTargetTypes = new_type;
+ LOGI("backend engine : %d", backendType);
- return MEDIA_VISION_ERROR_NONE;
-}
+ mConfig.mBackedType = backendType;
-int Inference::ConfigureTargetDevices(const int targetDevices)
-{
- // Check if given target types are valid or not.
- if (MV_INFERENCE_TARGET_DEVICE_NONE >= targetDevices || MV_INFERENCE_TARGET_DEVICE_MAX <= targetDevices) {
- LOGE("Invalid target device.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ return MEDIA_VISION_ERROR_NONE;
}
- LOGI("target devices : %d", targetDevices);
+ int Inference::ConfigureTargetTypes(const int targetType)
+ {
+ // Check if given target types are valid or not.
+ if (MV_INFERENCE_TARGET_NONE >= targetType ||
+ MV_INFERENCE_TARGET_MAX <= targetType) {
+ LOGE("Invalid target device.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- mConfig.mTargetTypes = targetDevices;
+ LOGI("Before convering target types : %d", targetType);
+
+ unsigned int new_type = MV_INFERENCE_TARGET_DEVICE_NONE;
+
+ // Convert old type to new one.
+ switch (targetType) {
+ case MV_INFERENCE_TARGET_CPU:
+ new_type = MV_INFERENCE_TARGET_DEVICE_CPU;
+ break;
+ case MV_INFERENCE_TARGET_GPU:
+ new_type = MV_INFERENCE_TARGET_DEVICE_GPU;
+ break;
+ case MV_INFERENCE_TARGET_CUSTOM:
+ new_type = MV_INFERENCE_TARGET_DEVICE_CUSTOM;
+ break;
+ }
- return MEDIA_VISION_ERROR_NONE;
-}
+ LOGI("After convering target types : %d", new_type);
-void Inference::ConfigureOutput(const int maxOutputNumbers)
-{
- mConfig.mMaxOutputNumbers = std::max(std::min(maxOutputNumbers, MV_INFERENCE_OUTPUT_NUMBERS_MAX),
- MV_INFERENCE_OUTPUT_NUMBERS_MIN);
-}
+ mConfig.mTargetTypes = new_type;
-void Inference::ConfigureThreshold(const double threshold)
-{
- mConfig.mConfidenceThresHold = std::max(std::min(threshold, MV_INFERENCE_CONFIDENCE_THRESHOLD_MAX),
- MV_INFERENCE_CONFIDENCE_THRESHOLD_MIN);
-}
+ return MEDIA_VISION_ERROR_NONE;
+ }
-void Inference::CleanupTensorBuffers(void)
-{
- LOGI("ENTER");
+ int Inference::ConfigureTargetDevices(const int targetDevices)
+ {
+ // Check if given target types are valid or not.
+ if (MV_INFERENCE_TARGET_DEVICE_NONE >= targetDevices ||
+ MV_INFERENCE_TARGET_DEVICE_MAX <= targetDevices) {
+ LOGE("Invalid target device.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- if (!mInputTensorBuffers.empty()) {
- std::vector<inference_engine_tensor_buffer>::iterator iter;
- for (iter = mInputTensorBuffers.begin(); iter != mInputTensorBuffers.end(); iter++) {
- inference_engine_tensor_buffer tensor_buffer = *iter;
+ LOGI("target devices : %d", targetDevices);
- // If tensor buffer owner is a backend then skip to release the tensor buffer.
- // This tensor buffer will be released by the backend.
- if (tensor_buffer.owner_is_backend) {
- continue;
- }
+ mConfig.mTargetTypes = targetDevices;
- if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
- delete[] static_cast<float *>(tensor_buffer.buffer);
- else
- delete[] static_cast<unsigned char *>(tensor_buffer.buffer);
- }
+ return MEDIA_VISION_ERROR_NONE;
+ }
- LOGI("input tensor buffers(%zu) have been released.", mInputTensorBuffers.size());
- std::vector<inference_engine_tensor_buffer>().swap(mInputTensorBuffers);
+ void Inference::ConfigureOutput(const int maxOutputNumbers)
+ {
+ mConfig.mMaxOutputNumbers = std::max(
+ std::min(maxOutputNumbers, MV_INFERENCE_OUTPUT_NUMBERS_MAX),
+ MV_INFERENCE_OUTPUT_NUMBERS_MIN);
}
- if (!mOutputTensorBuffers.empty()) {
- std::vector<inference_engine_tensor_buffer>::iterator iter;
- for (iter = mOutputTensorBuffers.begin(); iter != mOutputTensorBuffers.end(); iter++) {
- inference_engine_tensor_buffer tensor_buffer = *iter;
+ void Inference::ConfigureThreshold(const double threshold)
+ {
+ mConfig.mConfidenceThresHold = std::max(
+ std::min(threshold, MV_INFERENCE_CONFIDENCE_THRESHOLD_MAX),
+ MV_INFERENCE_CONFIDENCE_THRESHOLD_MIN);
+ }
- // If tensor buffer owner is a backend then skip to release the tensor buffer.
- // This tensor buffer will be released by the backend.
- if (tensor_buffer.owner_is_backend) {
- continue;
+ void Inference::CleanupTensorBuffers(void)
+ {
+ LOGI("ENTER");
+
+ if (!mInputTensorBuffers.empty()) {
+ std::vector<inference_engine_tensor_buffer>::iterator iter;
+ for (iter = mInputTensorBuffers.begin();
+ iter != mInputTensorBuffers.end(); iter++) {
+ inference_engine_tensor_buffer tensor_buffer = *iter;
+
+ // If tensor buffer owner is a backend then skip to release the tensor buffer.
+ // This tensor buffer will be released by the backend.
+ if (tensor_buffer.owner_is_backend) {
+ continue;
+ }
+
+ if (tensor_buffer.data_type ==
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
+ delete[] static_cast<float *>(tensor_buffer.buffer);
+ else
+ delete[] static_cast<unsigned char *>(tensor_buffer.buffer);
}
- if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
- delete[] static_cast<float *>(tensor_buffer.buffer);
- else
- delete[] static_cast<unsigned char *>(tensor_buffer.buffer);
+ LOGI("input tensor buffers(%zu) have been released.",
+ mInputTensorBuffers.size());
+ std::vector<inference_engine_tensor_buffer>().swap(
+ mInputTensorBuffers);
}
- LOGI("output tensor buffers(%zu) have been released.", mOutputTensorBuffers.size());
- std::vector<inference_engine_tensor_buffer>().swap(mOutputTensorBuffers);
- }
+ if (!mOutputTensorBuffers.empty()) {
+ std::vector<inference_engine_tensor_buffer>::iterator iter;
+ for (iter = mOutputTensorBuffers.begin();
+ iter != mOutputTensorBuffers.end(); iter++) {
+ inference_engine_tensor_buffer tensor_buffer = *iter;
+
+ // If tensor buffer owner is a backend then skip to release the tensor buffer.
+ // This tensor buffer will be released by the backend.
+ if (tensor_buffer.owner_is_backend) {
+ continue;
+ }
+
+ if (tensor_buffer.data_type ==
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
+ delete[] static_cast<float *>(tensor_buffer.buffer);
+ else
+ delete[] static_cast<unsigned char *>(tensor_buffer.buffer);
+ }
- LOGI("LEAVE");
-}
+ LOGI("output tensor buffers(%zu) have been released.",
+ mOutputTensorBuffers.size());
+ std::vector<inference_engine_tensor_buffer>().swap(
+ mOutputTensorBuffers);
+ }
-int Inference::PrepareTenosrBuffers(void)
-{
- // If there are input and output tensor buffers allocated before then release the buffers.
- // They will be allocated again according to a new model file to be loaded.
- CleanupTensorBuffers();
-
- // IF model file is loaded again then the model type could be different so
- // clean up input and output layer properties so that they can be updated again
- // after reloading the model file.
- if (!mInputLayerProperty.tensor_infos.empty()) {
- mInputLayerProperty.tensor_infos.clear();
- std::vector<inference_engine_tensor_info>().swap(mInputLayerProperty.tensor_infos);
- }
- if (!mOutputLayerProperty.tensor_infos.empty()) {
- mOutputLayerProperty.tensor_infos.clear();
- std::vector<inference_engine_tensor_info>().swap(mOutputLayerProperty.tensor_infos);
+ LOGI("LEAVE");
}
- // Get input tensor buffers from a backend engine if the backend engine allocated.
- int ret = mBackend->GetInputTensorBuffers(mInputTensorBuffers);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to get input tensor buffers from backend engine.");
- return ConvertEngineErrorToVisionError(ret);
- }
+ int Inference::PrepareTenosrBuffers(void)
+ {
+ // If there are input and output tensor buffers allocated before then release the buffers.
+ // They will be allocated again according to a new model file to be loaded.
+ CleanupTensorBuffers();
+
+ // IF model file is loaded again then the model type could be different so
+ // clean up input and output layer properties so that they can be updated again
+ // after reloading the model file.
+ if (!mInputLayerProperty.tensor_infos.empty()) {
+ mInputLayerProperty.tensor_infos.clear();
+ std::vector<inference_engine_tensor_info>().swap(
+ mInputLayerProperty.tensor_infos);
+ }
+ if (!mOutputLayerProperty.tensor_infos.empty()) {
+ mOutputLayerProperty.tensor_infos.clear();
+ std::vector<inference_engine_tensor_info>().swap(
+ mOutputLayerProperty.tensor_infos);
+ }
- ret = mBackend->GetInputLayerProperty(mInputLayerProperty);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to get input layer property from backend engine.");
- return ConvertEngineErrorToVisionError(ret);
- }
+ // Get input tensor buffers from a backend engine if the backend engine allocated.
+ int ret = mBackend->GetInputTensorBuffers(mInputTensorBuffers);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to get input tensor buffers from backend engine.");
+ return ConvertEngineErrorToVisionError(ret);
+ }
- // If the backend engine isn't able to allocate input tensor buffers internally,
- // then allocate the buffers at here.
- if (mInputTensorBuffers.empty()) {
- for (int i = 0; i < mInputLayerProperty.tensor_infos.size(); ++i) {
- inference_engine_tensor_info tensor_info = mInputLayerProperty.tensor_infos[i];
- inference_engine_tensor_buffer tensor_buffer;
- if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
- tensor_buffer.buffer = new float[tensor_info.size];
- tensor_buffer.size = tensor_info.size * 4;
- } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- tensor_buffer.buffer = new unsigned char[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT16) {
- tensor_buffer.buffer = new short[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else {
- LOGE("Invalid input tensor data type.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ ret = mBackend->GetInputLayerProperty(mInputLayerProperty);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to get input layer property from backend engine.");
+ return ConvertEngineErrorToVisionError(ret);
+ }
- if (tensor_buffer.buffer == NULL) {
- LOGE("Fail to allocate input tensor buffer.");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ // If the backend engine isn't able to allocate input tensor buffers internally,
+ // then allocate the buffers at here.
+ if (mInputTensorBuffers.empty()) {
+ for (int i = 0; i < mInputLayerProperty.tensor_infos.size(); ++i) {
+ inference_engine_tensor_info tensor_info =
+ mInputLayerProperty.tensor_infos[i];
+ inference_engine_tensor_buffer tensor_buffer;
+ if (tensor_info.data_type ==
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
+ tensor_buffer.buffer = new float[tensor_info.size];
+ tensor_buffer.size = tensor_info.size * 4;
+ } else if (tensor_info.data_type ==
+ INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+ tensor_buffer.buffer = new unsigned char[tensor_info.size];
+ tensor_buffer.size = tensor_info.size;
+ } else if (tensor_info.data_type ==
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT16) {
+ tensor_buffer.buffer = new short[tensor_info.size];
+ tensor_buffer.size = tensor_info.size;
+ } else {
+ LOGE("Invalid input tensor data type.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (tensor_buffer.buffer == NULL) {
+ LOGE("Fail to allocate input tensor buffer.");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ LOGI("Allocated input tensor buffer(size = %zu, data type = %d)",
+ tensor_info.size, tensor_info.data_type);
+ tensor_buffer.owner_is_backend = 0;
+ tensor_buffer.data_type = tensor_info.data_type;
+ mInputTensorBuffers.push_back(tensor_buffer);
}
-
- LOGI("Allocated input tensor buffer(size = %zu, data type = %d)", tensor_info.size, tensor_info.data_type);
- tensor_buffer.owner_is_backend = 0;
- tensor_buffer.data_type = tensor_info.data_type;
- mInputTensorBuffers.push_back(tensor_buffer);
}
- }
- LOGI("Input tensor buffer count is %zu", mInputTensorBuffers.size());
+ LOGI("Input tensor buffer count is %zu", mInputTensorBuffers.size());
- // Get output tensor buffers from a backend engine if the backend engine allocated.
- ret = mBackend->GetOutputTensorBuffers(mOutputTensorBuffers);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to get output tensor buffers from backend engine.");
- return ConvertEngineErrorToVisionError(ret);
- }
-
- ret = mBackend->GetOutputLayerProperty(mOutputLayerProperty);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to get output layer property from backend engine.");
- return ConvertEngineErrorToVisionError(ret);
- }
+ // Get output tensor buffers from a backend engine if the backend engine allocated.
+ ret = mBackend->GetOutputTensorBuffers(mOutputTensorBuffers);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to get output tensor buffers from backend engine.");
+ return ConvertEngineErrorToVisionError(ret);
+ }
- // If the backend engine isn't able to allocate output tensor buffers internally,
- // then allocate the buffers at here.
- if (mOutputTensorBuffers.empty()) {
- for (int i = 0; i < mOutputLayerProperty.tensor_infos.size(); ++i) {
- inference_engine_tensor_info tensor_info = mOutputLayerProperty.tensor_infos[i];
- inference_engine_tensor_buffer tensor_buffer;
- if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
- tensor_buffer.buffer = new float[tensor_info.size];
- tensor_buffer.size = tensor_info.size * 4;
- } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- tensor_buffer.buffer = new char[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT16) {
- tensor_buffer.buffer = new short[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else {
- LOGE("Invalid output tensor data type.");
- CleanupTensorBuffers();
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ ret = mBackend->GetOutputLayerProperty(mOutputLayerProperty);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to get output layer property from backend engine.");
+ return ConvertEngineErrorToVisionError(ret);
+ }
- if (tensor_buffer.buffer == NULL) {
- LOGE("Fail to allocate output tensor buffer.");
- CleanupTensorBuffers();
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ // If the backend engine isn't able to allocate output tensor buffers internally,
+ // then allocate the buffers at here.
+ if (mOutputTensorBuffers.empty()) {
+ for (int i = 0; i < mOutputLayerProperty.tensor_infos.size(); ++i) {
+ inference_engine_tensor_info tensor_info =
+ mOutputLayerProperty.tensor_infos[i];
+ inference_engine_tensor_buffer tensor_buffer;
+ if (tensor_info.data_type ==
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
+ tensor_buffer.buffer = new float[tensor_info.size];
+ tensor_buffer.size = tensor_info.size * 4;
+ } else if (tensor_info.data_type ==
+ INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+ tensor_buffer.buffer = new char[tensor_info.size];
+ tensor_buffer.size = tensor_info.size;
+ } else if (tensor_info.data_type ==
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT16) {
+ tensor_buffer.buffer = new short[tensor_info.size];
+ tensor_buffer.size = tensor_info.size;
+ } else {
+ LOGE("Invalid output tensor data type.");
+ CleanupTensorBuffers();
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (tensor_buffer.buffer == NULL) {
+ LOGE("Fail to allocate output tensor buffer.");
+ CleanupTensorBuffers();
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ LOGI("Allocated output tensor buffer(size = %zu, data type = %d)",
+ tensor_info.size, tensor_info.data_type);
+
+ tensor_buffer.owner_is_backend = 0;
+ tensor_buffer.data_type = tensor_info.data_type;
+ mOutputTensorBuffers.push_back(tensor_buffer);
}
+ }
- LOGI("Allocated output tensor buffer(size = %zu, data type = %d)", tensor_info.size, tensor_info.data_type);
+ LOGI("Output tensor buffer count is %zu", mOutputTensorBuffers.size());
- tensor_buffer.owner_is_backend = 0;
- tensor_buffer.data_type = tensor_info.data_type;
- mOutputTensorBuffers.push_back(tensor_buffer);
- }
+ return MEDIA_VISION_ERROR_NONE;
}
- LOGI("Output tensor buffer count is %zu", mOutputTensorBuffers.size());
+ int Inference::FillOutputResult(tensor_t &outputData)
+ {
+ for (int i = 0; i < mOutputLayerProperty.tensor_infos.size(); ++i) {
+ inference_engine_tensor_info tensor_info =
+ mOutputLayerProperty.tensor_infos[i];
- return MEDIA_VISION_ERROR_NONE;
-}
+ std::vector<int> tmpDimInfo;
+ for (int i = 0; i < static_cast<int>(tensor_info.shape.size());
+ i++) {
+ tmpDimInfo.push_back(tensor_info.shape[i]);
+ }
-int Inference::FillOutputResult(tensor_t &outputData)
-{
- for (int i = 0; i < mOutputLayerProperty.tensor_infos.size(); ++i) {
- inference_engine_tensor_info tensor_info = mOutputLayerProperty.tensor_infos[i];
+ outputData.dimInfo.push_back(tmpDimInfo);
- std::vector<int> tmpDimInfo;
- for (int i = 0; i < static_cast<int>(tensor_info.shape.size()); i++) {
- tmpDimInfo.push_back(tensor_info.shape[i]);
- }
+ // Normalize output tensor data converting it to float type in case of quantized model.
+ if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+ float *new_buf = new float[tensor_info.size];
+ if (new_buf == NULL) {
+ LOGE("Fail to allocate a new output tensor buffer.");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- outputData.dimInfo.push_back(tmpDimInfo);
+ unsigned char *ori_buf = static_cast<unsigned char *>(
+ mOutputTensorBuffers[i].buffer);
- // Normalize output tensor data converting it to float type in case of quantized model.
- if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- float *new_buf = new float[tensor_info.size];
- if (new_buf == NULL) {
- LOGE("Fail to allocate a new output tensor buffer.");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ for (int j = 0; j < tensor_info.size; j++) {
+ new_buf[j] = static_cast<float>(ori_buf[j]) / 255.0f;
+ }
- unsigned char *ori_buf = static_cast<unsigned char *>(mOutputTensorBuffers[i].buffer);
+ // replace original buffer with new one, and release origin one.
+ mOutputTensorBuffers[i].buffer = new_buf;
- for (int j = 0; j < tensor_info.size; j++) {
- new_buf[j] = static_cast<float>(ori_buf[j]) / 255.0f;
+ if (!mOutputTensorBuffers[i].owner_is_backend)
+ delete[] ori_buf;
}
- // replace original buffer with new one, and release origin one.
- mOutputTensorBuffers[i].buffer = new_buf;
+ if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT16) {
+ float *new_buf = new float[tensor_info.size];
+ if (new_buf == NULL) {
+ LOGE("Fail to allocate a new output tensor buffer.");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- if (!mOutputTensorBuffers[i].owner_is_backend)
- delete[] ori_buf;
- }
+ short *ori_buf =
+ static_cast<short *>(mOutputTensorBuffers[i].buffer);
- if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT16) {
- float *new_buf = new float[tensor_info.size];
- if (new_buf == NULL) {
- LOGE("Fail to allocate a new output tensor buffer.");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ for (int j = 0; j < tensor_info.size; j++) {
+ new_buf[j] = static_cast<float>(ori_buf[j]);
+ }
- short *ori_buf = static_cast<short *>(mOutputTensorBuffers[i].buffer);
+ // replace original buffer with new one, and release origin one.
+ mOutputTensorBuffers[i].buffer = new_buf;
- for (int j = 0; j < tensor_info.size; j++) {
- new_buf[j] = static_cast<float>(ori_buf[j]);
+ if (!mOutputTensorBuffers[i].owner_is_backend)
+ delete[] ori_buf;
}
- // replace original buffer with new one, and release origin one.
- mOutputTensorBuffers[i].buffer = new_buf;
-
- if (!mOutputTensorBuffers[i].owner_is_backend)
- delete[] ori_buf;
+ outputData.data.push_back(
+ static_cast<void *>(mOutputTensorBuffers[i].buffer));
}
- outputData.data.push_back(static_cast<void *>(mOutputTensorBuffers[i].buffer));
+ return MEDIA_VISION_ERROR_NONE;
}
- return MEDIA_VISION_ERROR_NONE;
-}
+ int Inference::Bind(void)
+ {
+ LOGI("ENTER");
-int Inference::Bind(void)
-{
- LOGI("ENTER");
+ if (mConfig.mBackedType <= MV_INFERENCE_BACKEND_NONE ||
+ mConfig.mBackedType >= MV_INFERENCE_BACKEND_MAX) {
+ LOGE("NOT SUPPORTED BACKEND %d", mConfig.mBackedType);
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
- if (mConfig.mBackedType <= MV_INFERENCE_BACKEND_NONE ||
- mConfig.mBackedType >= MV_INFERENCE_BACKEND_MAX) {
- LOGE("NOT SUPPORTED BACKEND %d", mConfig.mBackedType);
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
+ auto iter = mSupportedInferenceBackend.find(mConfig.mBackedType);
+ std::string backendName = (iter->second).first;
+ LOGI("backend string name: %s", backendName.c_str());
+
+ inference_engine_config config = {
+ .backend_name = backendName,
+ .backend_type = mConfig.mBackedType,
+ // As a default, Target device is CPU. If user defined desired device type in json file
+ // then the device type will be set by Load callback.
+ .target_devices = mConfig.mTargetTypes,
+ };
+
+ // Create a backend class object.
+ try {
+ mBackend = new InferenceEngineCommon();
+ } catch (const std::bad_alloc &ex) {
+ LOGE("Fail to create backend : %s", ex.what());
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- auto iter = mSupportedInferenceBackend.find(mConfig.mBackedType);
- std::string backendName = (iter->second).first;
- LOGI("backend string name: %s", backendName.c_str());
-
- inference_engine_config config = {
- .backend_name = backendName,
- .backend_type = mConfig.mBackedType,
- // As a default, Target device is CPU. If user defined desired device type in json file
- // then the device type will be set by Load callback.
- .target_devices = mConfig.mTargetTypes,
- };
-
- // Create a backend class object.
- try {
- mBackend = new InferenceEngineCommon();
- } catch (const std::bad_alloc &ex) {
- LOGE("Fail to create backend : %s", ex.what());
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ // Bind a backend library.
+ int ret = mBackend->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to bind backend library.(%d)", mConfig.mBackedType);
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
- // Bind a backend library.
- int ret = mBackend->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to bind backend library.(%d)", mConfig.mBackedType);
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
+ // Get capacity information from a backend.
+ ret = mBackend->GetBackendCapacity(&mBackendCapacity);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get backend capacity.");
+ return ret;
+ }
- // Get capacity information from a backend.
- ret = mBackend->GetBackendCapacity(&mBackendCapacity);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to get backend capacity.");
- return ret;
+ LOGI("LEAVE");
+
+ return MEDIA_VISION_ERROR_NONE;
}
- LOGI("LEAVE");
+ int Inference::Prepare(void)
+ {
+ LOGI("ENTER");
- return MEDIA_VISION_ERROR_NONE;
-}
+ mCh = mConfig.mTensorInfo.ch;
+ mDim = mConfig.mTensorInfo.dim;
+ mInputSize =
+ cv::Size(mConfig.mTensorInfo.width, mConfig.mTensorInfo.height);
+ LOGI("InputSize is %d x %d\n", mInputSize.width, mInputSize.height);
-int Inference::Prepare(void)
-{
- LOGI("ENTER");
+ mDeviation = mConfig.mStdValue;
+ mMean = mConfig.mMeanValue;
+ LOGI("mean %.4f, deviation %.4f", mMean, mDeviation);
- mCh = mConfig.mTensorInfo.ch;
- mDim = mConfig.mTensorInfo.dim;
- mInputSize = cv::Size(mConfig.mTensorInfo.width, mConfig.mTensorInfo.height);
- LOGI("InputSize is %d x %d\n", mInputSize.width, mInputSize.height);
+ mOutputNumbers = mConfig.mMaxOutputNumbers;
+ LOGI("outputNumber %d", mOutputNumbers);
- mDeviation = mConfig.mStdValue;
- mMean = mConfig.mMeanValue;
- LOGI("mean %.4f, deviation %.4f", mMean, mDeviation);
+ mThreshold = mConfig.mConfidenceThresHold;
+ LOGI("threshold %.4f", mThreshold);
- mOutputNumbers = mConfig.mMaxOutputNumbers;
- LOGI("outputNumber %d", mOutputNumbers);
+ // Check if backend supports a given target device/devices or not.
+ if (mConfig.mTargetTypes & MV_INFERENCE_TARGET_DEVICE_CPU) {
+ if (!(mBackendCapacity.supported_accel_devices &
+ INFERENCE_TARGET_CPU)) {
+ LOGE("Backend doesn't support CPU device as an accelerator.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+ }
- mThreshold = mConfig.mConfidenceThresHold;
- LOGI("threshold %.4f", mThreshold);
+ if (mConfig.mTargetTypes & MV_INFERENCE_TARGET_DEVICE_GPU) {
+ if (!(mBackendCapacity.supported_accel_devices &
+ INFERENCE_TARGET_GPU)) {
+ LOGE("Backend doesn't support CPU device as an accelerator.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+ }
- // Check if backend supports a given target device/devices or not.
- if (mConfig.mTargetTypes & MV_INFERENCE_TARGET_DEVICE_CPU) {
- if (!(mBackendCapacity.supported_accel_devices & INFERENCE_TARGET_CPU)) {
- LOGE("Backend doesn't support CPU device as an accelerator.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ if (mConfig.mTargetTypes & MV_INFERENCE_TARGET_DEVICE_CUSTOM) {
+ if (!(mBackendCapacity.supported_accel_devices &
+ INFERENCE_TARGET_CUSTOM)) {
+ LOGE("Backend doesn't support CPU device as an accelerator.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
}
+
+ mBackend->SetTargetDevices(ConvertTargetTypes(mConfig.mTargetTypes));
+
+ LOGI("LEAVE");
+
+ return MEDIA_VISION_ERROR_NONE;
}
- if (mConfig.mTargetTypes & MV_INFERENCE_TARGET_DEVICE_GPU) {
- if (!(mBackendCapacity.supported_accel_devices & INFERENCE_TARGET_GPU)) {
- LOGE("Backend doesn't support CPU device as an accelerator.");
+ int Inference::Load(void)
+ {
+ LOGI("ENTER");
+
+ std::string label_file = mConfig.mUserFilePath;
+ size_t userFileLength = label_file.length();
+ if (userFileLength > 0 && access(label_file.c_str(), F_OK)) {
+ LOGE("Label file path in [%s] ", label_file.c_str());
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- }
- if (mConfig.mTargetTypes & MV_INFERENCE_TARGET_DEVICE_CUSTOM) {
- if (!(mBackendCapacity.supported_accel_devices & INFERENCE_TARGET_CUSTOM)) {
- LOGE("Backend doesn't support CPU device as an accelerator.");
+ int ret = (userFileLength > 0) ? SetUserFile(label_file) :
+ MEDIA_VISION_ERROR_NONE;
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to load label file.");
+ return ret;
+ }
+
+ // Check if model file is valid or not.
+ std::string ext_str = mConfig.mWeightFilePath.substr(
+ mConfig.mWeightFilePath.find_last_of(".") + 1);
+ std::map<std::string, int>::iterator key = mModelFormats.find(ext_str);
+ if (key == mModelFormats.end()) {
+ LOGE("Invalid model file format.(ext = %s)", ext_str.c_str());
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- }
- mBackend->SetTargetDevices(ConvertTargetTypes(mConfig.mTargetTypes));
+ LOGI("%s model file has been detected.", ext_str.c_str());
+
+ std::vector<std::string> models;
+
+ inference_model_format_e model_format =
+ static_cast<inference_model_format_e>(key->second);
+
+ // Push model file information to models vector properly according to detected model format.
+ switch (model_format) {
+ case INFERENCE_MODEL_CAFFE:
+ case INFERENCE_MODEL_TF:
+ case INFERENCE_MODEL_DARKNET:
+ case INFERENCE_MODEL_DLDT:
+ case INFERENCE_MODEL_ONNX:
+ case INFERENCE_MODEL_VIVANTE:
+ models.push_back(mConfig.mWeightFilePath);
+ models.push_back(mConfig.mConfigFilePath);
+ break;
+ case INFERENCE_MODEL_TFLITE:
+ case INFERENCE_MODEL_TORCH:
+ models.push_back(mConfig.mWeightFilePath);
+ break;
+ default:
+ break;
+ }
- LOGI("LEAVE");
+ // Request model loading to backend engine.
+ ret = mBackend->Load(models, model_format);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete mBackend;
+ LOGE("Fail to load model");
+ mCanRun = false;
+ std::vector<std::string>().swap(models);
+ return ConvertEngineErrorToVisionError(ret);
+ }
- return MEDIA_VISION_ERROR_NONE;
-}
+ std::vector<std::string>().swap(models);
-int Inference::Load(void)
-{
- LOGI("ENTER");
-
- std::string label_file = mConfig.mUserFilePath;
- size_t userFileLength = label_file.length();
- if (userFileLength > 0 && access(label_file.c_str(), F_OK)) {
- LOGE("Label file path in [%s] ", label_file.c_str());
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- int ret = (userFileLength > 0) ? SetUserFile(label_file) : MEDIA_VISION_ERROR_NONE;
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to load label file.");
- return ret;
- }
-
- // Check if model file is valid or not.
- std::string ext_str = mConfig.mWeightFilePath.substr(mConfig.mWeightFilePath.find_last_of(".") + 1);
- std::map<std::string, int>::iterator key = mModelFormats.find(ext_str);
- if (key == mModelFormats.end()) {
- LOGE("Invalid model file format.(ext = %s)", ext_str.c_str());
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ // Prepare input and output tensor buffers.
+ PrepareTenosrBuffers();
- LOGI("%s model file has been detected.", ext_str.c_str());
-
- std::vector<std::string> models;
-
- inference_model_format_e model_format = static_cast<inference_model_format_e>(key->second);
-
- // Push model file information to models vector properly according to detected model format.
- switch (model_format) {
- case INFERENCE_MODEL_CAFFE:
- case INFERENCE_MODEL_TF:
- case INFERENCE_MODEL_DARKNET:
- case INFERENCE_MODEL_DLDT:
- case INFERENCE_MODEL_ONNX:
- case INFERENCE_MODEL_VIVANTE:
- models.push_back(mConfig.mWeightFilePath);
- models.push_back(mConfig.mConfigFilePath);
- break;
- case INFERENCE_MODEL_TFLITE:
- case INFERENCE_MODEL_TORCH:
- models.push_back(mConfig.mWeightFilePath);
- break;
- default:
- break;
- }
+ mCanRun = true;
+
+ LOGI("LEAVE");
- // Request model loading to backend engine.
- ret = mBackend->Load(models, model_format);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- delete mBackend;
- LOGE("Fail to load model");
- mCanRun = false;
- std::vector<std::string>().swap(models);
return ConvertEngineErrorToVisionError(ret);
}
- std::vector<std::string>().swap(models);
+ int Inference::Run(std::vector<mv_source_h> &mvSources,
+ std::vector<mv_rectangle_s> &rects)
+ {
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
- // Prepare input and output tensor buffers.
- PrepareTenosrBuffers();
+ if (!mCanRun) {
+ LOGE("Invalid to run inference");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
- mCanRun = true;
+ /* convert mv_source to cv::Mat */
+ cv::Mat cvSource;
+ cv::Rect cvRoi;
+ unsigned int width = 0, height = 0;
+ unsigned int bufferSize = 0;
+ unsigned char *buffer = NULL;
- LOGI("LEAVE");
+ if (mvSources.empty()) {
+ LOGE("mvSources should contain only one cv source.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- return ConvertEngineErrorToVisionError(ret);
-}
+ // We are able to request Only one input data for the inference as of now.
+ if (mvSources.size() > 1) {
+ LOGE("It allows only one mv source for the inference.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
-int Inference::Run(std::vector<mv_source_h> &mvSources, std::vector<mv_rectangle_s> &rects)
-{
- int ret = INFERENCE_ENGINE_ERROR_NONE;
+ // TODO. Consider multiple sources.
+ mv_source_h mvSource = mvSources.front();
+ mv_rectangle_s *roi = rects.empty() ? NULL : &(rects.front());
- if (!mCanRun) {
- LOGE("Invalid to run inference");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
+ mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
- /* convert mv_source to cv::Mat */
- cv::Mat cvSource;
- cv::Rect cvRoi;
- unsigned int width = 0, height = 0;
- unsigned int bufferSize = 0;
- unsigned char *buffer = NULL;
+ if (mv_source_get_width(mvSource, &width) != MEDIA_VISION_ERROR_NONE ||
+ mv_source_get_height(mvSource, &height) !=
+ MEDIA_VISION_ERROR_NONE ||
+ mv_source_get_colorspace(mvSource, &colorspace) !=
+ MEDIA_VISION_ERROR_NONE ||
+ mv_source_get_buffer(mvSource, &buffer, &bufferSize))
+ return MEDIA_VISION_ERROR_INTERNAL;
- if (mvSources.empty()) {
- LOGE("mvSources should contain only one cv source.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ // TODO. Let's support various color spaces.
- // We are able to request Only one input data for the inference as of now.
- if (mvSources.size() > 1) {
- LOGE("It allows only one mv source for the inference.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (colorspace != MEDIA_VISION_COLORSPACE_RGB888) {
+ LOGE("Not Supported format!\n");
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
- // TODO. Consider multiple sources.
- mv_source_h mvSource = mvSources.front();
- mv_rectangle_s *roi = rects.empty() ? NULL : &(rects.front());
+ if (roi == NULL) {
+ cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3),
+ buffer)
+ .clone();
+ } else {
+ cvRoi.x = roi->point.x;
+ cvRoi.y = roi->point.y;
+ cvRoi.width = (roi->point.x + roi->width) >= width ?
+ width - roi->point.x :
+ roi->width;
+ cvRoi.height = (roi->point.y + roi->height) >= height ?
+ height - roi->point.y :
+ roi->height;
+ cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3),
+ buffer)(cvRoi)
+ .clone();
+ }
- mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+ LOGE("Size: w:%u, h:%u", cvSource.size().width, cvSource.size().height);
- if (mv_source_get_width(mvSource, &width) != MEDIA_VISION_ERROR_NONE ||
- mv_source_get_height(mvSource, &height) != MEDIA_VISION_ERROR_NONE ||
- mv_source_get_colorspace(mvSource, &colorspace) != MEDIA_VISION_ERROR_NONE ||
- mv_source_get_buffer(mvSource, &buffer, &bufferSize))
- return MEDIA_VISION_ERROR_INTERNAL;
+ if (mCh != 1 && mCh != 3) {
+ LOGE("Channel not supported.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- // TODO. Let's support various color spaces.
+ std::vector<inference_engine_tensor_buffer>::iterator iter;
+ for (iter = mInputTensorBuffers.begin();
+ iter != mInputTensorBuffers.end(); iter++) {
+ inference_engine_tensor_buffer tensor_buffer = *iter;
- if (colorspace != MEDIA_VISION_COLORSPACE_RGB888) {
- LOGE("Not Supported format!\n");
- return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
- }
+ int data_type = ConvertToCv(tensor_buffer.data_type);
- if (roi == NULL) {
- cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer).clone();
- } else {
- cvRoi.x = roi->point.x;
- cvRoi.y = roi->point.y;
- cvRoi.width = (roi->point.x + roi->width) >= width ? width - roi->point.x : roi->width;
- cvRoi.height = (roi->point.y + roi->height) >= height ? height - roi->point.y : roi->height;
- cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer)(cvRoi).clone();
- }
+ // Convert color space of input tensor data and then normalize it.
+ ret = Preprocess(cvSource,
+ cv::Mat(mInputSize.height, mInputSize.width,
+ data_type, tensor_buffer.buffer),
+ data_type);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to preprocess input tensor data.");
+ return ret;
+ }
+ }
- LOGE("Size: w:%u, h:%u", cvSource.size().width, cvSource.size().height);
+ ret = mBackend->Run(mInputTensorBuffers, mOutputTensorBuffers);
- if (mCh != 1 && mCh != 3) {
- LOGE("Channel not supported.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ return ConvertEngineErrorToVisionError(ret);
}
- std::vector<inference_engine_tensor_buffer>::iterator iter;
- for (iter = mInputTensorBuffers.begin(); iter != mInputTensorBuffers.end(); iter++) {
- inference_engine_tensor_buffer tensor_buffer = *iter;
+ std::pair<std::string, bool>
+ Inference::GetSupportedInferenceBackend(int backend)
+ {
+ return mSupportedInferenceBackend[backend];
+ }
- int data_type = ConvertToCv(tensor_buffer.data_type);
+ int Inference::GetClassficationResults(
+ ImageClassificationResults *classificationResults)
+ {
+ tensor_t outputData;
- // Convert color space of input tensor data and then normalize it.
- ret = Preprocess(cvSource, cv::Mat(mInputSize.height, mInputSize.width, data_type, tensor_buffer.buffer), data_type);
+ // Get inference result and contain it to outputData.
+ int ret = FillOutputResult(outputData);
if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to preprocess input tensor data.");
+ LOGE("Fail to get output result.");
return ret;
}
- }
-
- ret = mBackend->Run(mInputTensorBuffers, mOutputTensorBuffers);
-
- return ConvertEngineErrorToVisionError(ret);
-}
-
-std::pair<std::string, bool> Inference::GetSupportedInferenceBackend(int backend)
-{
- return mSupportedInferenceBackend[backend];
-}
-
-int Inference::GetClassficationResults(ImageClassificationResults *classificationResults)
-{
- tensor_t outputData;
- // Get inference result and contain it to outputData.
- int ret = FillOutputResult(outputData);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to get output result.");
- return ret;
- }
+ // Will contain top N results in ascending order.
+ std::vector<std::pair<float, int> > top_results;
+ std::priority_queue<std::pair<float, int>,
+ std::vector<std::pair<float, int> >,
+ std::greater<std::pair<float, int> > >
+ top_result_pq;
+ float value = 0.0f;
- // Will contain top N results in ascending order.
- std::vector<std::pair<float, int>> top_results;
- std::priority_queue<std::pair<float, int>,
- std::vector<std::pair<float, int>>,
- std::greater<std::pair<float, int>>> top_result_pq;
- float value = 0.0f;
+ std::vector<std::vector<int> > inferDimInfo(outputData.dimInfo);
+ std::vector<void *> inferResults(outputData.data.begin(),
+ outputData.data.end());
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+ int count = inferDimInfo[0][1];
+ LOGI("count: %d", count);
- int count = inferDimInfo[0][1];
- LOGI("count: %d", count);
+ float *prediction = reinterpret_cast<float *>(inferResults[0]);
+ for (int i = 0; i < count; ++i) {
+ value = prediction[i];
- float *prediction = reinterpret_cast<float*>(inferResults[0]);
- for (int i = 0; i < count; ++i) {
- value = prediction[i];
+ // Only add it if it beats the threshold and has a chance at being in
+ // the top N.
+ top_result_pq.push(std::pair<float, int>(value, i));
- // Only add it if it beats the threshold and has a chance at being in
- // the top N.
- top_result_pq.push(std::pair<float, int>(value, i));
+ // If at capacity, kick the smallest value out.
+ if (top_result_pq.size() > mOutputNumbers) {
+ top_result_pq.pop();
+ }
+ }
- // If at capacity, kick the smallest value out.
- if (top_result_pq.size() > mOutputNumbers) {
+ // Copy to output vector and reverse into descending order.
+ while (!top_result_pq.empty()) {
+ top_results.push_back(top_result_pq.top());
top_result_pq.pop();
}
- }
-
- // Copy to output vector and reverse into descending order.
- while (!top_result_pq.empty()) {
- top_results.push_back(top_result_pq.top());
- top_result_pq.pop();
- }
- std::reverse(top_results.begin(), top_results.end());
-
- int classIdx = -1;
- ImageClassificationResults results;
- results.number_of_classes = 0;
- for (int idx = 0; idx < top_results.size(); ++idx) {
- if (top_results[idx].first < mThreshold)
- continue;
- LOGI("idx:%d", idx);
- LOGI("classIdx: %d", top_results[idx].second);
- LOGI("classProb: %f", top_results[idx].first);
-
- classIdx = top_results[idx].second;
- results.indices.push_back(classIdx);
- results.confidences.push_back(top_results[idx].first);
- results.names.push_back(mUserListName[classIdx]);
- results.number_of_classes++;
- }
-
- *classificationResults = results;
- LOGE("Inference: GetClassificationResults: %d\n", results.number_of_classes);
- return MEDIA_VISION_ERROR_NONE;
-}
-
-int Inference::GetObjectDetectionResults(ObjectDetectionResults *detectionResults)
-{
- tensor_t outputData;
+ std::reverse(top_results.begin(), top_results.end());
- // Get inference result and contain it to outputData.
- int ret = FillOutputResult(outputData);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to get output result.");
- return ret;
- }
+ int classIdx = -1;
+ ImageClassificationResults results;
+ results.number_of_classes = 0;
+ for (int idx = 0; idx < top_results.size(); ++idx) {
+ if (top_results[idx].first < mThreshold)
+ continue;
+ LOGI("idx:%d", idx);
+ LOGI("classIdx: %d", top_results[idx].second);
+ LOGI("classProb: %f", top_results[idx].first);
+
+ classIdx = top_results[idx].second;
+ results.indices.push_back(classIdx);
+ results.confidences.push_back(top_results[idx].first);
+ results.names.push_back(mUserListName[classIdx]);
+ results.number_of_classes++;
+ }
- // In case of object detection,
- // a model may apply post-process but others may not.
- // Thus, those cases should be hanlded separately.
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- LOGI("inferDimInfo size: %zu", outputData.dimInfo.size());
-
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
- LOGI("inferResults size: %zu", inferResults.size());
-
- float* boxes = nullptr;
- float* classes = nullptr;
- float* scores = nullptr;
- int number_of_detections = 0;
-
- cv::Mat cvScores, cvClasses, cvBoxes;
- if (outputData.dimInfo.size() == 1) {
- // there is no way to know how many objects are detect unless the number of objects aren't
- // provided. In the case, each backend should provide the number of results manually.
- // For example, in OpenCV, MobilenetV1-SSD doesn't provide it so the number of objects are
- // written to the 1st element i.e., outputData.data[0] (the shape is 1x1xNx7 and the 1st of 7
- // indicats the image id. But it is useless if a batch mode isn't supported.
- // So, use the 1st of 7.
-
- number_of_detections = static_cast<int>(*reinterpret_cast<float*>(outputData.data[0]));
- cv::Mat cvOutputData(number_of_detections, inferDimInfo[0][3], CV_32F, outputData.data[0]);
-
- // boxes
- cv::Mat cvLeft = cvOutputData.col(3).clone();
- cv::Mat cvTop = cvOutputData.col(4).clone();
- cv::Mat cvRight = cvOutputData.col(5).clone();
- cv::Mat cvBottom = cvOutputData.col(6).clone();
-
- cv::Mat cvBoxElems[] = { cvTop, cvLeft, cvBottom, cvRight };
- cv::hconcat(cvBoxElems, 4, cvBoxes);
-
- // classes
- cvClasses = cvOutputData.col(1).clone();
-
- // scores
- cvScores = cvOutputData.col(2).clone();
-
- boxes = cvBoxes.ptr<float>(0);
- classes = cvClasses.ptr<float>(0);
- scores = cvScores.ptr<float>(0);
-
- } else {
- boxes = reinterpret_cast<float*>(inferResults[0]);
- classes = reinterpret_cast<float*>(inferResults[1]);
- scores = reinterpret_cast<float*>(inferResults[2]);
- number_of_detections = (int)(*reinterpret_cast<float*>(inferResults[3]));
+ *classificationResults = results;
+ LOGE("Inference: GetClassificationResults: %d\n",
+ results.number_of_classes);
+ return MEDIA_VISION_ERROR_NONE;
}
+ int Inference::GetObjectDetectionResults(
+ ObjectDetectionResults *detectionResults)
+ {
+ tensor_t outputData;
- LOGI("number_of_detections = %d", number_of_detections);
+ // Get inference result and contain it to outputData.
+ int ret = FillOutputResult(outputData);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get output result.");
+ return ret;
+ }
- int left, top, right, bottom;
- cv::Rect loc;
+ // In case of object detection,
+ // a model may apply post-process but others may not.
+ // Thus, those cases should be hanlded separately.
+ std::vector<std::vector<int> > inferDimInfo(outputData.dimInfo);
+ LOGI("inferDimInfo size: %zu", outputData.dimInfo.size());
+
+ std::vector<void *> inferResults(outputData.data.begin(),
+ outputData.data.end());
+ LOGI("inferResults size: %zu", inferResults.size());
+
+ float *boxes = nullptr;
+ float *classes = nullptr;
+ float *scores = nullptr;
+ int number_of_detections = 0;
+
+ cv::Mat cvScores, cvClasses, cvBoxes;
+ if (outputData.dimInfo.size() == 1) {
+ // there is no way to know how many objects are detect unless the number of objects aren't
+ // provided. In the case, each backend should provide the number of results manually.
+ // For example, in OpenCV, MobilenetV1-SSD doesn't provide it so the number of objects are
+ // written to the 1st element i.e., outputData.data[0] (the shape is 1x1xNx7 and the 1st of 7
+ // indicats the image id. But it is useless if a batch mode isn't supported.
+ // So, use the 1st of 7.
+
+ number_of_detections = static_cast<int>(
+ *reinterpret_cast<float *>(outputData.data[0]));
+ cv::Mat cvOutputData(number_of_detections, inferDimInfo[0][3],
+ CV_32F, outputData.data[0]);
+
+ // boxes
+ cv::Mat cvLeft = cvOutputData.col(3).clone();
+ cv::Mat cvTop = cvOutputData.col(4).clone();
+ cv::Mat cvRight = cvOutputData.col(5).clone();
+ cv::Mat cvBottom = cvOutputData.col(6).clone();
+
+ cv::Mat cvBoxElems[] = { cvTop, cvLeft, cvBottom, cvRight };
+ cv::hconcat(cvBoxElems, 4, cvBoxes);
+
+ // classes
+ cvClasses = cvOutputData.col(1).clone();
+
+ // scores
+ cvScores = cvOutputData.col(2).clone();
+
+ boxes = cvBoxes.ptr<float>(0);
+ classes = cvClasses.ptr<float>(0);
+ scores = cvScores.ptr<float>(0);
+
+ } else {
+ boxes = reinterpret_cast<float *>(inferResults[0]);
+ classes = reinterpret_cast<float *>(inferResults[1]);
+ scores = reinterpret_cast<float *>(inferResults[2]);
+ number_of_detections =
+ (int) (*reinterpret_cast<float *>(inferResults[3]));
+ }
- ObjectDetectionResults results;
- results.number_of_objects = 0;
- for (int idx = 0; idx < number_of_detections; ++idx) {
- if (scores[idx] < mThreshold)
- continue;
+ LOGI("number_of_detections = %d", number_of_detections);
- left = static_cast<int>(boxes[idx*4 + 1] * mSourceSize.width);
- top = static_cast<int>(boxes[idx*4 + 0] * mSourceSize.height);
- right = static_cast<int>(boxes[idx*4 + 3] * mSourceSize.width);
- bottom = static_cast<int>(boxes[idx*4 + 2] * mSourceSize.height);
+ int left, top, right, bottom;
+ cv::Rect loc;
- loc.x = left;
- loc.y = top;
- loc.width = right -left + 1;
- loc.height = bottom - top + 1;
+ ObjectDetectionResults results;
+ results.number_of_objects = 0;
+ for (int idx = 0; idx < number_of_detections; ++idx) {
+ if (scores[idx] < mThreshold)
+ continue;
- results.indices.push_back(static_cast<int>(classes[idx]));
- results.confidences.push_back(scores[idx]);
- results.names.push_back(mUserListName[static_cast<int>(classes[idx])]);
- results.locations.push_back(loc);
- results.number_of_objects++;
+ left = static_cast<int>(boxes[idx * 4 + 1] * mSourceSize.width);
+ top = static_cast<int>(boxes[idx * 4 + 0] * mSourceSize.height);
+ right = static_cast<int>(boxes[idx * 4 + 3] * mSourceSize.width);
+ bottom = static_cast<int>(boxes[idx * 4 + 2] * mSourceSize.height);
+
+ loc.x = left;
+ loc.y = top;
+ loc.width = right - left + 1;
+ loc.height = bottom - top + 1;
+
+ results.indices.push_back(static_cast<int>(classes[idx]));
+ results.confidences.push_back(scores[idx]);
+ results.names.push_back(
+ mUserListName[static_cast<int>(classes[idx])]);
+ results.locations.push_back(loc);
+ results.number_of_objects++;
+
+ LOGI("objectClass: %d", static_cast<int>(classes[idx]));
+ LOGI("confidence:%f", scores[idx]);
+ LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right,
+ bottom);
+ }
- LOGI("objectClass: %d", static_cast<int>(classes[idx]));
- LOGI("confidence:%f", scores[idx]);
- LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
+ *detectionResults = results;
+ LOGE("Inference: GetObjectDetectionResults: %d\n",
+ results.number_of_objects);
+ return MEDIA_VISION_ERROR_NONE;
}
- *detectionResults = results;
- LOGE("Inference: GetObjectDetectionResults: %d\n", results.number_of_objects);
- return MEDIA_VISION_ERROR_NONE;
-}
+ int
+ Inference::GetFaceDetectionResults(FaceDetectionResults *detectionResults)
+ {
+ tensor_t outputData;
-int Inference::GetFaceDetectionResults(FaceDetectionResults *detectionResults)
-{
- tensor_t outputData;
+ // Get inference result and contain it to outputData.
+ int ret = FillOutputResult(outputData);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get output result.");
+ return ret;
+ }
- // Get inference result and contain it to outputData.
- int ret = FillOutputResult(outputData);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to get output result.");
- return ret;
- }
+ // In case of object detection,
+ // a model may apply post-process but others may not.
+ // Thus, those cases should be hanlded separately.
+ std::vector<std::vector<int> > inferDimInfo(outputData.dimInfo);
+ LOGI("inferDimInfo size: %zu", outputData.dimInfo.size());
+
+ std::vector<void *> inferResults(outputData.data.begin(),
+ outputData.data.end());
+ LOGI("inferResults size: %zu", inferResults.size());
+
+ float *boxes = nullptr;
+ float *classes = nullptr;
+ float *scores = nullptr;
+ int number_of_detections = 0;
+
+ cv::Mat cvScores, cvClasses, cvBoxes;
+ if (outputData.dimInfo.size() == 1) {
+ // there is no way to know how many objects are detect unless the number of objects aren't
+ // provided. In the case, each backend should provide the number of results manually.
+ // For example, in OpenCV, MobilenetV1-SSD doesn't provide it so the number of objects are
+ // written to the 1st element i.e., outputData.data[0] (the shape is 1x1xNx7 and the 1st of 7
+ // indicats the image id. But it is useless if a batch mode isn't supported.
+ // So, use the 1st of 7.
+
+ number_of_detections = static_cast<int>(
+ *reinterpret_cast<float *>(outputData.data[0]));
+ cv::Mat cvOutputData(number_of_detections, inferDimInfo[0][3],
+ CV_32F, outputData.data[0]);
+
+ // boxes
+ cv::Mat cvLeft = cvOutputData.col(3).clone();
+ cv::Mat cvTop = cvOutputData.col(4).clone();
+ cv::Mat cvRight = cvOutputData.col(5).clone();
+ cv::Mat cvBottom = cvOutputData.col(6).clone();
+
+ cv::Mat cvBoxElems[] = { cvTop, cvLeft, cvBottom, cvRight };
+ cv::hconcat(cvBoxElems, 4, cvBoxes);
+
+ // classes
+ cvClasses = cvOutputData.col(1).clone();
+
+ // scores
+ cvScores = cvOutputData.col(2).clone();
+
+ boxes = cvBoxes.ptr<float>(0);
+ classes = cvClasses.ptr<float>(0);
+ scores = cvScores.ptr<float>(0);
+
+ } else {
+ boxes = reinterpret_cast<float *>(inferResults[0]);
+ classes = reinterpret_cast<float *>(inferResults[1]);
+ scores = reinterpret_cast<float *>(inferResults[2]);
+ number_of_detections = static_cast<int>(
+ *reinterpret_cast<float *>(inferResults[3]));
+ }
- // In case of object detection,
- // a model may apply post-process but others may not.
- // Thus, those cases should be hanlded separately.
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- LOGI("inferDimInfo size: %zu", outputData.dimInfo.size());
-
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
- LOGI("inferResults size: %zu", inferResults.size());
-
- float* boxes = nullptr;
- float* classes = nullptr;
- float* scores = nullptr;
- int number_of_detections = 0;
-
- cv::Mat cvScores, cvClasses, cvBoxes;
- if (outputData.dimInfo.size() == 1) {
- // there is no way to know how many objects are detect unless the number of objects aren't
- // provided. In the case, each backend should provide the number of results manually.
- // For example, in OpenCV, MobilenetV1-SSD doesn't provide it so the number of objects are
- // written to the 1st element i.e., outputData.data[0] (the shape is 1x1xNx7 and the 1st of 7
- // indicats the image id. But it is useless if a batch mode isn't supported.
- // So, use the 1st of 7.
-
- number_of_detections = static_cast<int>(*reinterpret_cast<float*>(outputData.data[0]));
- cv::Mat cvOutputData(number_of_detections, inferDimInfo[0][3], CV_32F, outputData.data[0]);
-
- // boxes
- cv::Mat cvLeft = cvOutputData.col(3).clone();
- cv::Mat cvTop = cvOutputData.col(4).clone();
- cv::Mat cvRight = cvOutputData.col(5).clone();
- cv::Mat cvBottom = cvOutputData.col(6).clone();
-
- cv::Mat cvBoxElems[] = { cvTop, cvLeft, cvBottom, cvRight };
- cv::hconcat(cvBoxElems, 4, cvBoxes);
-
- // classes
- cvClasses = cvOutputData.col(1).clone();
-
- // scores
- cvScores = cvOutputData.col(2).clone();
-
- boxes = cvBoxes.ptr<float>(0);
- classes = cvClasses.ptr<float>(0);
- scores = cvScores.ptr<float>(0);
-
- } else {
- boxes = reinterpret_cast<float*>(inferResults[0]);
- classes = reinterpret_cast<float*>(inferResults[1]);
- scores = reinterpret_cast<float*>(inferResults[2]);
- number_of_detections = static_cast<int>(*reinterpret_cast<float*>(inferResults[3]));
- }
+ int left, top, right, bottom;
+ cv::Rect loc;
+
+ FaceDetectionResults results;
+ results.number_of_faces = 0;
+ for (int idx = 0; idx < number_of_detections; ++idx) {
+ if (scores[idx] < mThreshold)
+ continue;
- int left, top, right, bottom;
- cv::Rect loc;
-
- FaceDetectionResults results;
- results.number_of_faces = 0;
- for (int idx = 0; idx < number_of_detections; ++idx) {
- if (scores[idx] < mThreshold)
- continue;
-
- left = static_cast<int>(boxes[idx*4 + 1] * mSourceSize.width);
- top = static_cast<int>(boxes[idx*4 + 0] * mSourceSize.height);
- right = static_cast<int>(boxes[idx*4 + 3] * mSourceSize.width);
- bottom = static_cast<int>(boxes[idx*4 + 2] * mSourceSize.height);
-
- loc.x = left;
- loc.y = top;
- loc.width = right -left + 1;
- loc.height = bottom - top + 1;
-
- results.confidences.push_back(scores[idx]);
- results.locations.push_back(loc);
- results.number_of_faces++;
-
- LOGI("confidence:%f", scores[idx]);
- LOGI("class: %f", classes[idx]);
- LOGI("left:%f, top:%f, right:%f, bottom:%f", boxes[idx*4 + 1], boxes[idx*4 + 0], boxes[idx*4 + 3], boxes[idx*4 + 2]);
- LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
+ left = static_cast<int>(boxes[idx * 4 + 1] * mSourceSize.width);
+ top = static_cast<int>(boxes[idx * 4 + 0] * mSourceSize.height);
+ right = static_cast<int>(boxes[idx * 4 + 3] * mSourceSize.width);
+ bottom = static_cast<int>(boxes[idx * 4 + 2] * mSourceSize.height);
+
+ loc.x = left;
+ loc.y = top;
+ loc.width = right - left + 1;
+ loc.height = bottom - top + 1;
+
+ results.confidences.push_back(scores[idx]);
+ results.locations.push_back(loc);
+ results.number_of_faces++;
+
+ LOGI("confidence:%f", scores[idx]);
+ LOGI("class: %f", classes[idx]);
+ LOGI("left:%f, top:%f, right:%f, bottom:%f", boxes[idx * 4 + 1],
+ boxes[idx * 4 + 0], boxes[idx * 4 + 3], boxes[idx * 4 + 2]);
+ LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right,
+ bottom);
+ }
+
+ *detectionResults = results;
+ LOGE("Inference: GetFaceDetectionResults: %d\n",
+ results.number_of_faces);
+ return MEDIA_VISION_ERROR_NONE;
}
- *detectionResults = results;
- LOGE("Inference: GetFaceDetectionResults: %d\n", results.number_of_faces);
- return MEDIA_VISION_ERROR_NONE;
-}
+ int Inference::GetFacialLandMarkDetectionResults(
+ FacialLandMarkDetectionResults *detectionResults)
+ {
+ tensor_t outputData;
-int Inference::GetFacialLandMarkDetectionResults(FacialLandMarkDetectionResults *detectionResults)
-{
- tensor_t outputData;
+ // Get inference result and contain it to outputData.
+ int ret = FillOutputResult(outputData);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get output result.");
+ return ret;
+ }
- // Get inference result and contain it to outputData.
- int ret = FillOutputResult(outputData);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to get output result.");
- return ret;
- }
+ std::vector<std::vector<int> > inferDimInfo(outputData.dimInfo);
+ std::vector<void *> inferResults(outputData.data.begin(),
+ outputData.data.end());
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+ long number_of_detections = inferDimInfo[0][1];
+ float *loc = reinterpret_cast<float *>(inferResults[0]);
- long number_of_detections = inferDimInfo[0][1];
- float* loc = reinterpret_cast<float*>(inferResults[0]);
+ FacialLandMarkDetectionResults results;
+ results.number_of_landmarks = 0;
- FacialLandMarkDetectionResults results;
- results.number_of_landmarks = 0;
+ cv::Point point(0, 0);
+ results.number_of_landmarks = 0;
+ LOGI("imgW:%d, imgH:%d", mSourceSize.width, mSourceSize.height);
+ for (int idx = 0; idx < number_of_detections; idx += 2) {
+ point.x = static_cast<int>(loc[idx] * mSourceSize.width);
+ point.y = static_cast<int>(loc[idx + 1] * mSourceSize.height);
- cv::Point point(0,0);
- results.number_of_landmarks = 0;
- LOGI("imgW:%d, imgH:%d", mSourceSize.width, mSourceSize.height);
- for (int idx = 0; idx < number_of_detections; idx+=2) {
- point.x = static_cast<int>(loc[idx] * mSourceSize.width);
- point.y = static_cast<int>(loc[idx+1] * mSourceSize.height);
+ results.locations.push_back(point);
+ results.number_of_landmarks++;
- results.locations.push_back(point);
- results.number_of_landmarks++;
+ LOGI("x:%d, y:%d", point.x, point.y);
+ }
- LOGI("x:%d, y:%d", point.x, point.y);
+ *detectionResults = results;
+ LOGE("Inference: FacialLandmarkDetectionResults: %d\n",
+ results.number_of_landmarks);
+ return MEDIA_VISION_ERROR_NONE;
}
- *detectionResults = results;
- LOGE("Inference: FacialLandmarkDetectionResults: %d\n", results.number_of_landmarks);
- return MEDIA_VISION_ERROR_NONE;
-}
+ int Inference::GetPoseEstimationDetectionResults(
+ PoseEstimationResults *detectionResults)
+ {
+ tensor_t outputData;
-int Inference::GetPoseEstimationDetectionResults(PoseEstimationResults *detectionResults)
-{
- tensor_t outputData;
+ // Get inference result and contain it to outputData.
+ int ret = FillOutputResult(outputData);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get output result.");
+ return ret;
+ }
- // Get inference result and contain it to outputData.
- int ret = FillOutputResult(outputData);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to get output result.");
- return ret;
- }
+ std::vector<std::vector<int> > inferDimInfo(outputData.dimInfo);
+ std::vector<void *> inferResults(outputData.data.begin(),
+ outputData.data.end());
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+ long number_of_pose = inferDimInfo[0][3];
+ float *tmp = static_cast<float *>(inferResults[0]);
+ cv::Size heatMapSize(inferDimInfo[0][1], inferDimInfo[0][2]);
- long number_of_pose = inferDimInfo[0][3];
- float * tmp = static_cast<float*>(inferResults[0]);
- cv::Size heatMapSize(inferDimInfo[0][1], inferDimInfo[0][2]);
+ cv::Point loc;
+ double score;
+ cv::Mat blurredHeatMap;
- cv::Point loc;
- double score;
- cv::Mat blurredHeatMap;
+ cv::Mat reShapeTest(cv::Size(inferDimInfo[0][2], inferDimInfo[0][1]),
+ CV_32FC(inferDimInfo[0][3]), (void *) tmp);
- cv::Mat reShapeTest( cv::Size(inferDimInfo[0][2], inferDimInfo[0][1]), CV_32FC(inferDimInfo[0][3]), (void*)tmp);
+ cv::Mat multiChannels[inferDimInfo[0][3]];
+ split(reShapeTest, multiChannels);
- cv::Mat multiChannels[inferDimInfo[0][3]];
- split(reShapeTest, multiChannels);
+ float ratioX = static_cast<float>(mSourceSize.width) /
+ static_cast<float>(inferDimInfo[0][2]);
+ float ratioY = static_cast<float>(mSourceSize.height) /
+ static_cast<float>(inferDimInfo[0][1]);
- float ratioX = static_cast<float>(mSourceSize.width) / static_cast<float>(inferDimInfo[0][2]);
- float ratioY = static_cast<float>(mSourceSize.height) / static_cast<float>(inferDimInfo[0][1]);
+ PoseEstimationResults results;
+ results.number_of_pose_estimation = 0;
+ for (int poseIdx = 0; poseIdx < number_of_pose; poseIdx++) {
+ cv::Mat heatMap = multiChannels[poseIdx];
- PoseEstimationResults results;
- results.number_of_pose_estimation = 0;
- for (int poseIdx = 0; poseIdx < number_of_pose; poseIdx++) {
- cv::Mat heatMap = multiChannels[poseIdx];
+ cv::GaussianBlur(heatMap, blurredHeatMap, cv::Size(), 5.0, 5.0);
+ cv::minMaxLoc(heatMap, NULL, &score, NULL, &loc);
- cv::GaussianBlur(heatMap, blurredHeatMap, cv::Size(), 5.0, 5.0);
- cv::minMaxLoc(heatMap, NULL, &score, NULL, &loc);
+ LOGI("PoseIdx[%2d]: x[%2d], y[%2d], score[%.3f]", poseIdx, loc.x,
+ loc.y, score);
+ LOGI("PoseIdx[%2d]: x[%2d], y[%2d], score[%.3f]", poseIdx,
+ static_cast<int>(static_cast<float>(loc.x + 1) * ratioX),
+ static_cast<int>(static_cast<float>(loc.y + 1) * ratioY),
+ score);
- LOGI("PoseIdx[%2d]: x[%2d], y[%2d], score[%.3f]", poseIdx, loc.x, loc.y, score);
- LOGI("PoseIdx[%2d]: x[%2d], y[%2d], score[%.3f]", poseIdx, static_cast<int>(static_cast<float>(loc.x+1) * ratioX), static_cast<int>(static_cast<float>(loc.y+1) * ratioY), score);
+ loc.x = static_cast<int>(static_cast<float>(loc.x + 1) * ratioX);
+ loc.y = static_cast<int>(static_cast<float>(loc.y + 1) * ratioY);
+ results.locations.push_back(loc);
+ results.number_of_pose_estimation++;
+ }
- loc.x = static_cast<int>(static_cast<float>(loc.x+1) * ratioX);
- loc.y = static_cast<int>(static_cast<float>(loc.y+1) * ratioY);
- results.locations.push_back(loc);
- results.number_of_pose_estimation++;
+ *detectionResults = results;
+ LOGE("Inference: PoseEstimationResults: %d\n",
+ results.number_of_pose_estimation);
+ return MEDIA_VISION_ERROR_NONE;
}
- *detectionResults = results;
- LOGE("Inference: PoseEstimationResults: %d\n", results.number_of_pose_estimation);
- return MEDIA_VISION_ERROR_NONE;
-}
-
} /* Inference */
} /* MediaVision */
diff --git a/mv_inference/inference/src/InferenceIni.cpp b/mv_inference/inference/src/InferenceIni.cpp
index 68b32b8a..a258cd8d 100644
--- a/mv_inference/inference/src/InferenceIni.cpp
+++ b/mv_inference/inference/src/InferenceIni.cpp
@@ -21,79 +21,84 @@
#include "mv_private.h"
#include "InferenceIni.h"
-namespace mediavision {
-namespace inference {
-
-const std::string INFERENCE_INI_FILENAME = "/inference/inference_engine.ini";
-
-static inline std::string& rtrim(std::string& s, const char* t = " \t\n\r\f\v")
-{
- s.erase(s.find_last_not_of(t) + 1);
- return s;
-}
-
-static inline std::string& ltrim(std::string& s, const char* t = " \t\n\r\f\v")
-{
- s.erase(s.find_first_not_of(t) + 1);
- return s;
-}
-
-static inline std::string& trim(std::string& s, const char* t = " \t\n\r\f\v")
+namespace mediavision
{
- return ltrim(rtrim(s,t), t);
-}
-
-InferenceInI::InferenceInI() :
- mIniDefaultPath(SYSCONFDIR),
- mDefaultBackend("OPENCV"),
- mDelimeter(",")
+namespace inference
{
- mIniDefaultPath += INFERENCE_INI_FILENAME;
-}
-
-InferenceInI::~InferenceInI()
-{
-
-}
-
-int InferenceInI::LoadInI()
-{
- LOGE("ENTER");
- dictionary *dict = iniparser_load(mIniDefaultPath.c_str());
- if (dict == NULL) {
- LOGE("Fail to load ini");
- return -1;
+ const std::string INFERENCE_INI_FILENAME =
+ "/inference/inference_engine.ini";
+
+ static inline std::string &rtrim(std::string &s,
+ const char *t = " \t\n\r\f\v")
+ {
+ s.erase(s.find_last_not_of(t) + 1);
+ return s;
}
- std::string list = std::string(iniparser_getstring(dict, "inference backend:supported backend types", (char*)mDefaultBackend.c_str()));
-
- size_t pos = 0;
- while((pos = list.find(mDelimeter)) != std::string::npos) {
- std::string tmp = list.substr(0, pos);
- mSupportedInferenceBackend.push_back(atoi(tmp.c_str()));
+ static inline std::string &ltrim(std::string &s,
+ const char *t = " \t\n\r\f\v")
+ {
+ s.erase(s.find_first_not_of(t) + 1);
+ return s;
+ }
- list.erase(0, pos + mDelimeter.length());
+ static inline std::string &trim(std::string &s,
+ const char *t = " \t\n\r\f\v")
+ {
+ return ltrim(rtrim(s, t), t);
}
- mSupportedInferenceBackend.push_back(atoi(list.c_str()));
- if(dict) {
- iniparser_freedict(dict);
- dict = NULL;
+ InferenceInI::InferenceInI()
+ : mIniDefaultPath(SYSCONFDIR)
+ , mDefaultBackend("OPENCV")
+ , mDelimeter(",")
+ {
+ mIniDefaultPath += INFERENCE_INI_FILENAME;
}
- LOGE("LEAVE");
- return 0;
-}
+ InferenceInI::~InferenceInI()
+ {}
+
+ int InferenceInI::LoadInI()
+ {
+ LOGE("ENTER");
+ dictionary *dict = iniparser_load(mIniDefaultPath.c_str());
+ if (dict == NULL) {
+ LOGE("Fail to load ini");
+ return -1;
+ }
+
+ std::string list = std::string(iniparser_getstring(
+ dict, "inference backend:supported backend types",
+ (char *) mDefaultBackend.c_str()));
+
+ size_t pos = 0;
+ while ((pos = list.find(mDelimeter)) != std::string::npos) {
+ std::string tmp = list.substr(0, pos);
+ mSupportedInferenceBackend.push_back(atoi(tmp.c_str()));
+
+ list.erase(0, pos + mDelimeter.length());
+ }
+ mSupportedInferenceBackend.push_back(atoi(list.c_str()));
+
+ if (dict) {
+ iniparser_freedict(dict);
+ dict = NULL;
+ }
+
+ LOGE("LEAVE");
+ return 0;
+ }
-void InferenceInI::UnLoadInI()
-{
- ;
-}
+ void InferenceInI::UnLoadInI()
+ {
+ ;
+ }
-std::vector<int> InferenceInI::GetSupportedInferenceEngines()
-{
- return mSupportedInferenceBackend;
-}
+ std::vector<int> InferenceInI::GetSupportedInferenceEngines()
+ {
+ return mSupportedInferenceBackend;
+ }
} /* Inference */
} /* MediaVision */
diff --git a/mv_inference/inference/src/mv_inference.c b/mv_inference/inference/src/mv_inference.c
index 56ca9993..50e0f130 100755..100644
--- a/mv_inference/inference/src/mv_inference.c
+++ b/mv_inference/inference/src/mv_inference.c
@@ -36,7 +36,8 @@
int mv_inference_create(mv_inference_h *infer)
{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_SUPPORT_CHECK(
+ __mv_inference_check_system_info_feature_supported());
MEDIA_VISION_NULL_ARG_CHECK(infer);
MEDIA_VISION_FUNCTION_ENTER();
@@ -59,7 +60,8 @@ int mv_inference_create(mv_inference_h *infer)
int mv_inference_destroy(mv_inference_h infer)
{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_SUPPORT_CHECK(
+ __mv_inference_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(infer);
MEDIA_VISION_FUNCTION_ENTER();
@@ -80,11 +82,13 @@ int mv_inference_destroy(mv_inference_h infer)
return ret;
}
-int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_config)
+int mv_inference_configure(mv_inference_h infer,
+ mv_engine_config_h engine_config)
{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_SUPPORT_CHECK(
+ __mv_inference_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(infer);
- MEDIA_VISION_INSTANCE_CHECK(engine_config);
+ MEDIA_VISION_INSTANCE_CHECK(engine_config);
MEDIA_VISION_FUNCTION_ENTER();
@@ -96,11 +100,11 @@ int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_confi
#else
- ret = mv_inference_configure_engine_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE){
- LOGE("Fail to configure engine and target");
- return ret;
- }
+ ret = mv_inference_configure_engine_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure engine and target");
+ return ret;
+ }
#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
@@ -108,17 +112,17 @@ int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_confi
return ret;
}
-
int mv_inference_prepare(mv_inference_h infer)
{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_SUPPORT_CHECK(
+ __mv_inference_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(infer);
MEDIA_VISION_FUNCTION_ENTER();
int ret = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
+ mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
@@ -127,21 +131,21 @@ int mv_inference_prepare(mv_inference_h infer)
#else
ret = mv_inference_configure_model_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE){
- LOGE("Fail to configure model");
- return ret;
- }
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure model");
+ return ret;
+ }
// input tensor, input layer
ret = mv_inference_configure_input_info_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE){
- LOGE("Fail to configure input info");
- return ret;
- }
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure input info");
+ return ret;
+ }
// output layer
ret = mv_inference_configure_output_info_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE) {
+ if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to configure output info");
return ret;
}
@@ -162,11 +166,11 @@ int mv_inference_prepare(mv_inference_h infer)
}
int mv_inference_foreach_supported_engine(
- mv_inference_h infer,
- mv_inference_supported_engine_cb callback,
- void *user_data)
+ mv_inference_h infer, mv_inference_supported_engine_cb callback,
+ void *user_data)
{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_SUPPORT_CHECK(
+ __mv_inference_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(infer);
MEDIA_VISION_NULL_ARG_CHECK(callback);
MEDIA_VISION_FUNCTION_ENTER();
@@ -179,7 +183,8 @@ int mv_inference_foreach_supported_engine(
#else
- ret = mv_inference_foreach_supported_engine_open(infer, callback, user_data);
+ ret = mv_inference_foreach_supported_engine_open(infer, callback,
+ user_data);
#endif
@@ -188,15 +193,13 @@ int mv_inference_foreach_supported_engine(
return ret;
}
-
-int mv_inference_image_classify(
- mv_source_h source,
- mv_inference_h infer,
- mv_rectangle_s *roi,
- mv_inference_image_classified_cb classified_cb,
- void *user_data)
+int mv_inference_image_classify(mv_source_h source, mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_image_classified_cb classified_cb,
+ void *user_data)
{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_image_check_system_info_feature_supported());
+ MEDIA_VISION_SUPPORT_CHECK(
+ __mv_inference_image_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(source);
MEDIA_VISION_INSTANCE_CHECK(infer);
MEDIA_VISION_NULL_ARG_CHECK(classified_cb);
@@ -213,7 +216,8 @@ int mv_inference_image_classify(
#else
- ret = mv_inference_image_classify_open(source, infer, roi, classified_cb, user_data);
+ ret = mv_inference_image_classify_open(source, infer, roi, classified_cb,
+ user_data);
#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
@@ -222,13 +226,12 @@ int mv_inference_image_classify(
return ret;
}
-int mv_inference_object_detect(
- mv_source_h source,
- mv_inference_h infer,
- mv_inference_object_detected_cb detected_cb,
- void *user_data)
+int mv_inference_object_detect(mv_source_h source, mv_inference_h infer,
+ mv_inference_object_detected_cb detected_cb,
+ void *user_data)
{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_image_check_system_info_feature_supported());
+ MEDIA_VISION_SUPPORT_CHECK(
+ __mv_inference_image_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(source);
MEDIA_VISION_INSTANCE_CHECK(infer);
MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
@@ -245,7 +248,8 @@ int mv_inference_object_detect(
#else
- ret = mv_inference_object_detect_open(source, infer, detected_cb, user_data);
+ ret = mv_inference_object_detect_open(source, infer, detected_cb,
+ user_data);
#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
@@ -254,13 +258,12 @@ int mv_inference_object_detect(
return ret;
}
-int mv_inference_face_detect(
- mv_source_h source,
- mv_inference_h infer,
- mv_inference_face_detected_cb detected_cb,
- void *user_data)
+int mv_inference_face_detect(mv_source_h source, mv_inference_h infer,
+ mv_inference_face_detected_cb detected_cb,
+ void *user_data)
{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_face_check_system_info_feature_supported());
+ MEDIA_VISION_SUPPORT_CHECK(
+ __mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(source);
MEDIA_VISION_INSTANCE_CHECK(infer);
MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
@@ -285,13 +288,11 @@ int mv_inference_face_detect(
}
int mv_inference_facial_landmark_detect(
- mv_source_h source,
- mv_inference_h infer,
- mv_rectangle_s *roi,
- mv_inference_facial_landmark_detected_cb detected_cb,
- void *user_data)
+ mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+ mv_inference_facial_landmark_detected_cb detected_cb, void *user_data)
{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_face_check_system_info_feature_supported());
+ MEDIA_VISION_SUPPORT_CHECK(
+ __mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(source);
MEDIA_VISION_INSTANCE_CHECK(infer);
MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
@@ -306,7 +307,8 @@ int mv_inference_facial_landmark_detect(
*/
#else
- ret = mv_inference_facial_landmark_detect_open(source, infer, roi, detected_cb, user_data);
+ ret = mv_inference_facial_landmark_detect_open(source, infer, roi,
+ detected_cb, user_data);
MEDIA_VISION_FUNCTION_LEAVE();
@@ -316,13 +318,11 @@ int mv_inference_facial_landmark_detect(
}
int mv_inference_pose_estimation_detect(
- mv_source_h source,
- mv_inference_h infer,
- mv_rectangle_s *roi,
- mv_inference_pose_estimation_detected_cb detected_cb,
- void *user_data)
+ mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+ mv_inference_pose_estimation_detected_cb detected_cb, void *user_data)
{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_face_check_system_info_feature_supported());
+ MEDIA_VISION_SUPPORT_CHECK(
+ __mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(source);
MEDIA_VISION_INSTANCE_CHECK(infer);
MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
@@ -337,7 +337,8 @@ int mv_inference_pose_estimation_detect(
*/
#else
- ret = mv_inference_pose_estimation_detect_open(source, infer, roi, detected_cb, user_data);
+ ret = mv_inference_pose_estimation_detect_open(source, infer, roi,
+ detected_cb, user_data);
MEDIA_VISION_FUNCTION_LEAVE();
diff --git a/mv_inference/inference/src/mv_inference_open.cpp b/mv_inference/inference/src/mv_inference_open.cpp
index 57cdfa92..e63d2b92 100755..100644
--- a/mv_inference/inference/src/mv_inference_open.cpp
+++ b/mv_inference/inference/src/mv_inference_open.cpp
@@ -25,19 +25,18 @@
using namespace mediavision::inference;
-static int check_mv_inference_engine_version(mv_engine_config_h engine_config, bool *is_new_version)
+static int check_mv_inference_engine_version(mv_engine_config_h engine_config,
+ bool *is_new_version)
{
int oldType = 0, newType = 0;
- int ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_TARGET_TYPE,
- &oldType);
+ int ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_TARGET_TYPE, &oldType);
if (ret != MEDIA_VISION_ERROR_NONE)
oldType = -1;
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_TARGET_DEVICE_TYPE,
- &newType);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &newType);
if (ret != MEDIA_VISION_ERROR_NONE)
newType = -1;
@@ -48,7 +47,8 @@ static int check_mv_inference_engine_version(mv_engine_config_h engine_config, b
// If values of both types are changed then return an error.
// only one of two types should be used.
- if (oldType != MV_INFERENCE_TARGET_CPU && newType != MV_INFERENCE_TARGET_DEVICE_CPU) {
+ if (oldType != MV_INFERENCE_TARGET_CPU &&
+ newType != MV_INFERENCE_TARGET_DEVICE_CPU) {
LOGE("Please use only one of below two device types.");
LOGE("MV_INFERENCE_TARGET_TYPE(deprecated) or MV_INFERENCE_TARGET_DEVICE_TYPE(recommended).");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -62,7 +62,8 @@ static int check_mv_inference_engine_version(mv_engine_config_h engine_config, b
// (oldType == MV_INFERENCE_TARGET_CPU && newType == MV_INFERENCE_TARGET_DEVICE_CPU)
// - default value of only new type is changed.
// (oldType == MV_INFERENCE_TARGET_CPU && (newType != -1 && newType != MV_INFERENCE_TARGET_DEVICE_CPU))
- if ((oldType != -1 && oldType != MV_INFERENCE_TARGET_CPU) && newType == MV_INFERENCE_TARGET_DEVICE_CPU)
+ if ((oldType != -1 && oldType != MV_INFERENCE_TARGET_CPU) &&
+ newType == MV_INFERENCE_TARGET_DEVICE_CPU)
*is_new_version = false;
else
*is_new_version = true;
@@ -78,12 +79,12 @@ mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer)
int mv_inference_create_open(mv_inference_h *infer)
{
- if (infer == NULL ) {
+ if (infer == NULL) {
LOGE("Handle can't be created because handle pointer is NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- (*infer) = static_cast<mv_inference_h>(new (std::nothrow)Inference());
+ (*infer) = static_cast<mv_inference_h>(new (std::nothrow) Inference());
if (*infer == NULL) {
LOGE("Failed to create inference handle");
@@ -103,86 +104,85 @@ int mv_inference_destroy_open(mv_inference_h infer)
}
LOGD("Destroying inference handle [%p]", infer);
- delete static_cast<Inference*>(infer);
+ delete static_cast<Inference *>(infer);
LOGD("Inference handle has been destroyed");
return MEDIA_VISION_ERROR_NONE;
}
-
-int mv_inference_configure_model_open(mv_inference_h infer, mv_engine_config_h engine_config)
+int mv_inference_configure_model_open(mv_inference_h infer,
+ mv_engine_config_h engine_config)
{
LOGI("ENTER");
- Inference *pInfer = static_cast<Inference *>(infer);
+ Inference *pInfer = static_cast<Inference *>(infer);
- int ret = MEDIA_VISION_ERROR_NONE;
+ int ret = MEDIA_VISION_ERROR_NONE;
char *modelConfigFilePath = NULL;
char *modelWeightFilePath = NULL;
char *modelUserFilePath = NULL;
double modelMeanValue = 0.0;
- int backendType= 0;
+ int backendType = 0;
int targetTypes = 0;
size_t userFileLength = 0;
- ret = mv_engine_config_get_string_attribute(engine_config,
- MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
- &modelConfigFilePath);
+ ret = mv_engine_config_get_string_attribute(
+ engine_config, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+ &modelConfigFilePath);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get model configuration file path");
goto _ERROR_;
}
- ret = mv_engine_config_get_string_attribute(engine_config,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- &modelWeightFilePath);
+ ret = mv_engine_config_get_string_attribute(
+ engine_config, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ &modelWeightFilePath);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get model weight file path");
goto _ERROR_;
}
- ret = mv_engine_config_get_string_attribute(engine_config,
- MV_INFERENCE_MODEL_USER_FILE_PATH,
- &modelUserFilePath);
+ ret = mv_engine_config_get_string_attribute(
+ engine_config, MV_INFERENCE_MODEL_USER_FILE_PATH,
+ &modelUserFilePath);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get model user file path");
goto _ERROR_;
}
- ret = mv_engine_config_get_double_attribute(engine_config,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- &modelMeanValue);
+ ret = mv_engine_config_get_double_attribute(
+ engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &modelMeanValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get model mean value");
goto _ERROR_;
}
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_BACKEND_TYPE,
- &backendType);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference backend type");
goto _ERROR_;
}
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_TARGET_DEVICE_TYPE,
- &targetTypes);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &targetTypes);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference target type");
goto _ERROR_;
}
- if ( access(modelWeightFilePath, F_OK)) {
+ if (access(modelWeightFilePath, F_OK)) {
LOGE("weightFilePath in [%s] ", modelWeightFilePath);
ret = MEDIA_VISION_ERROR_INVALID_PATH;
goto _ERROR_;
}
- if ( (backendType > MV_INFERENCE_BACKEND_NONE && backendType < MV_INFERENCE_BACKEND_MAX)
- && (backendType != MV_INFERENCE_BACKEND_TFLITE) && (backendType != MV_INFERENCE_BACKEND_ARMNN)) {
- if ( access(modelConfigFilePath, F_OK)) {
+ if ((backendType > MV_INFERENCE_BACKEND_NONE &&
+ backendType < MV_INFERENCE_BACKEND_MAX) &&
+ (backendType != MV_INFERENCE_BACKEND_TFLITE) &&
+ (backendType != MV_INFERENCE_BACKEND_ARMNN)) {
+ if (access(modelConfigFilePath, F_OK)) {
LOGE("modelConfigFilePath in [%s] ", modelConfigFilePath);
ret = MEDIA_VISION_ERROR_INVALID_PATH;
goto _ERROR_;
@@ -210,21 +210,23 @@ int mv_inference_configure_model_open(mv_inference_h infer, mv_engine_config_h e
if (is_new_version) {
// Use new type.
- if (pInfer->ConfigureTargetDevices(targetTypes) != MEDIA_VISION_ERROR_NONE) {
+ if (pInfer->ConfigureTargetDevices(targetTypes) !=
+ MEDIA_VISION_ERROR_NONE) {
LOGE("Tried to configure invalid target types.");
goto _ERROR_;
}
} else {
// Convert old type to new one and then use it.
- if (pInfer->ConfigureTargetTypes(targetTypes) != MEDIA_VISION_ERROR_NONE) {
+ if (pInfer->ConfigureTargetTypes(targetTypes) !=
+ MEDIA_VISION_ERROR_NONE) {
LOGE("Tried to configure invalid target types.");
goto _ERROR_;
}
}
pInfer->ConfigureModelFiles(std::string(modelConfigFilePath),
- std::string(modelWeightFilePath),
- std::string(modelUserFilePath));
+ std::string(modelWeightFilePath),
+ std::string(modelUserFilePath));
_ERROR_:
if (modelConfigFilePath)
@@ -238,78 +240,76 @@ _ERROR_:
LOGI("LEAVE");
- return ret;
+ return ret;
}
-int mv_inference_configure_tensor_info_open(mv_inference_h infer, mv_engine_config_h engine_config)
+int mv_inference_configure_tensor_info_open(mv_inference_h infer,
+ mv_engine_config_h engine_config)
{
LOGI("ENTER");
- Inference *pInfer = static_cast<Inference *>(infer);
+ Inference *pInfer = static_cast<Inference *>(infer);
- int ret = MEDIA_VISION_ERROR_NONE;
+ int ret = MEDIA_VISION_ERROR_NONE;
int tensorWidth, tensorHeight, tensorDim, tensorCh;
double meanValue, stdValue;
// This should be one. only one batch is supported
tensorDim = 1;
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- &tensorWidth);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_INPUT_TENSOR_WIDTH, &tensorWidth);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor width");
goto _ERROR_;
}
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- &tensorHeight);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_INPUT_TENSOR_HEIGHT, &tensorHeight);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor height");
goto _ERROR_;
}
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- &tensorCh);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_INPUT_TENSOR_CHANNELS, &tensorCh);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor channels");
goto _ERROR_;
}
- ret = mv_engine_config_get_double_attribute(engine_config,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- &meanValue);
+ ret = mv_engine_config_get_double_attribute(
+ engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &meanValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get meanValue");
goto _ERROR_;
}
- ret = mv_engine_config_get_double_attribute(engine_config,
- MV_INFERENCE_MODEL_STD_VALUE,
- &stdValue);
+ ret = mv_engine_config_get_double_attribute(
+ engine_config, MV_INFERENCE_MODEL_STD_VALUE, &stdValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get stdValue");
goto _ERROR_;
}
- pInfer->ConfigureTensorInfo(tensorWidth, tensorHeight, tensorDim, tensorCh, stdValue, meanValue);
+ pInfer->ConfigureTensorInfo(tensorWidth, tensorHeight, tensorDim, tensorCh,
+ stdValue, meanValue);
-_ERROR_ :
+_ERROR_:
LOGI("LEAVE");
- return ret;
+ return ret;
}
-int mv_inference_configure_input_info_open(mv_inference_h infer, mv_engine_config_h engine_config)
+int mv_inference_configure_input_info_open(mv_inference_h infer,
+ mv_engine_config_h engine_config)
{
LOGI("ENTER");
- Inference *pInfer = static_cast<Inference *>(infer);
+ Inference *pInfer = static_cast<Inference *>(infer);
- int ret = MEDIA_VISION_ERROR_NONE;
+ int ret = MEDIA_VISION_ERROR_NONE;
int tensorWidth, tensorHeight, tensorDim, tensorCh;
double meanValue, stdValue;
@@ -318,72 +318,60 @@ int mv_inference_configure_input_info_open(mv_inference_h infer, mv_engine_confi
// This should be one. only one batch is supported
tensorDim = 1;
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- &tensorWidth);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_INPUT_TENSOR_WIDTH, &tensorWidth);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor width");
goto _ERROR_;
}
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- &tensorHeight);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_INPUT_TENSOR_HEIGHT, &tensorHeight);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor height");
goto _ERROR_;
}
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- &tensorCh);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_INPUT_TENSOR_CHANNELS, &tensorCh);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor channels");
goto _ERROR_;
}
- ret = mv_engine_config_get_double_attribute(engine_config,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- &meanValue);
+ ret = mv_engine_config_get_double_attribute(
+ engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &meanValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get meanValue");
goto _ERROR_;
}
- ret = mv_engine_config_get_double_attribute(engine_config,
- MV_INFERENCE_MODEL_STD_VALUE,
- &stdValue);
+ ret = mv_engine_config_get_double_attribute(
+ engine_config, MV_INFERENCE_MODEL_STD_VALUE, &stdValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get stdValue");
goto _ERROR_;
}
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_INPUT_DATA_TYPE,
- &dataType);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_INPUT_DATA_TYPE, &dataType);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get a input tensor data type");
goto _ERROR_;
}
- ret = mv_engine_config_get_string_attribute(engine_config,
- MV_INFERENCE_INPUT_NODE_NAME,
- &node_name);
+ ret = mv_engine_config_get_string_attribute(
+ engine_config, MV_INFERENCE_INPUT_NODE_NAME, &node_name);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor width");
goto _ERROR_;
}
- pInfer->ConfigureInputInfo(tensorWidth,
- tensorHeight,
- tensorDim,
- tensorCh,
- stdValue,
- meanValue,
- dataType,
- std::vector<std::string>(1, std::string(node_name)));
+ pInfer->ConfigureInputInfo(
+ tensorWidth, tensorHeight, tensorDim, tensorCh, stdValue, meanValue,
+ dataType, std::vector<std::string>(1, std::string(node_name)));
-_ERROR_ :
+_ERROR_:
if (node_name) {
free(node_name);
@@ -392,32 +380,33 @@ _ERROR_ :
LOGI("LEAVE");
- return ret;
+ return ret;
}
-int mv_inference_configure_engine_open(mv_inference_h infer, mv_engine_config_h engine_config)
+int mv_inference_configure_engine_open(mv_inference_h infer,
+ mv_engine_config_h engine_config)
{
LOGI("ENTER");
- Inference *pInfer = static_cast<Inference *>(infer);
+ Inference *pInfer = static_cast<Inference *>(infer);
int backendType = 0;
- int ret = MEDIA_VISION_ERROR_NONE;
+ int ret = MEDIA_VISION_ERROR_NONE;
pInfer->SetEngineConfig(engine_config);
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_BACKEND_TYPE,
- &backendType);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference backend type");
goto _ERROR_;
}
- ret = pInfer->ConfigureBackendType((mv_inference_backend_type_e)backendType);
- if (ret != MEDIA_VISION_ERROR_NONE) {
+ ret = pInfer->ConfigureBackendType(
+ (mv_inference_backend_type_e) backendType);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to configure a backend type.");
goto _ERROR_;
- }
+ }
// Create a inference-engine-common class object and load its corresponding library.
ret = pInfer->Bind();
@@ -430,7 +419,8 @@ _ERROR_:
return ret;
}
-int mv_inference_configure_output_open(mv_inference_h infer, mv_engine_config_h engine_config)
+int mv_inference_configure_output_open(mv_inference_h infer,
+ mv_engine_config_h engine_config)
{
LOGI("ENTER");
@@ -439,9 +429,8 @@ int mv_inference_configure_output_open(mv_inference_h infer, mv_engine_config_h
int maxOutput = 0;
int ret = MEDIA_VISION_ERROR_NONE;
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_OUTPUT_MAX_NUMBER,
- &maxOutput);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference output maximum numbers");
goto _ERROR_;
@@ -454,7 +443,8 @@ _ERROR_:
return ret;
}
-int mv_inference_configure_confidence_threshold_open(mv_inference_h infer, mv_engine_config_h engine_config)
+int mv_inference_configure_confidence_threshold_open(
+ mv_inference_h infer, mv_engine_config_h engine_config)
{
LOGI("ENTER");
@@ -463,9 +453,8 @@ int mv_inference_configure_confidence_threshold_open(mv_inference_h infer, mv_en
double threshold = 0;
int ret = MEDIA_VISION_ERROR_NONE;
- ret = mv_engine_config_get_double_attribute(engine_config,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- &threshold);
+ ret = mv_engine_config_get_double_attribute(
+ engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference confidence threshold value");
goto _ERROR_;
@@ -478,7 +467,8 @@ _ERROR_:
return ret;
}
-int mv_inference_configure_post_process_info_open(mv_inference_h infer, mv_engine_config_h engine_config)
+int mv_inference_configure_post_process_info_open(
+ mv_inference_h infer, mv_engine_config_h engine_config)
{
LOGI("ENTER");
@@ -488,9 +478,8 @@ int mv_inference_configure_post_process_info_open(mv_inference_h infer, mv_engin
double threshold = 0;
int ret = MEDIA_VISION_ERROR_NONE;
- ret = mv_engine_config_get_int_attribute(engine_config,
- MV_INFERENCE_OUTPUT_MAX_NUMBER,
- &maxOutput);
+ ret = mv_engine_config_get_int_attribute(
+ engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference output maximum numbers");
goto _ERROR_;
@@ -498,9 +487,8 @@ int mv_inference_configure_post_process_info_open(mv_inference_h infer, mv_engin
pInfer->ConfigureOutput(maxOutput);
- ret = mv_engine_config_get_double_attribute(engine_config,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- &threshold);
+ ret = mv_engine_config_get_double_attribute(
+ engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference confidence threshold value");
goto _ERROR_;
@@ -513,27 +501,26 @@ _ERROR_:
return ret;
}
-int mv_inference_configure_output_info_open(mv_inference_h infer, mv_engine_config_h engine_config)
+int mv_inference_configure_output_info_open(mv_inference_h infer,
+ mv_engine_config_h engine_config)
{
LOGI("ENTER");
- Inference *pInfer = static_cast<Inference *>(infer);
+ Inference *pInfer = static_cast<Inference *>(infer);
- int ret = MEDIA_VISION_ERROR_NONE;
+ int ret = MEDIA_VISION_ERROR_NONE;
int idx = 0;
char **node_names = NULL;
int size = 0;
std::vector<std::string> names;
- ret = mv_engine_config_get_array_string_attribute(engine_config,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- &node_names,
- &size);
+ ret = mv_engine_config_get_array_string_attribute(
+ engine_config, MV_INFERENCE_OUTPUT_NODE_NAMES, &node_names, &size);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get _output_node_names");
goto _ERROR_;
}
- for (idx = 0 ; idx < size; ++idx)
+ for (idx = 0; idx < size; ++idx)
names.push_back(std::string(node_names[idx]));
pInfer->ConfigureOutputInfo(names);
@@ -561,17 +548,17 @@ int mv_inference_prepare_open(mv_inference_h infer)
int ret = MEDIA_VISION_ERROR_NONE;
- // Pass parameters needed to load model files to a backend engine.
+ // Pass parameters needed to load model files to a backend engine.
ret = pInfer->Prepare();
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to prepare inference");
- return ret;
- }
+ return ret;
+ }
- // Request to load model files to a backend engine.
- ret = pInfer->Load();
- if (ret != MEDIA_VISION_ERROR_NONE)
- LOGE("Fail to load model files.");
+ // Request to load model files to a backend engine.
+ ret = pInfer->Load();
+ if (ret != MEDIA_VISION_ERROR_NONE)
+ LOGE("Fail to load model files.");
LOGI("LEAVE");
@@ -579,9 +566,8 @@ int mv_inference_prepare_open(mv_inference_h infer)
}
int mv_inference_foreach_supported_engine_open(
- mv_inference_h infer,
- mv_inference_supported_engine_cb callback,
- void *user_data)
+ mv_inference_h infer, mv_inference_supported_engine_cb callback,
+ void *user_data)
{
LOGI("ENTER");
@@ -603,11 +589,8 @@ int mv_inference_foreach_supported_engine_open(
}
int mv_inference_image_classify_open(
- mv_source_h source,
- mv_inference_h infer,
- mv_rectangle_s *roi,
- mv_inference_image_classified_cb classified_cb,
- void *user_data)
+ mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+ mv_inference_image_classified_cb classified_cb, void *user_data)
{
Inference *pInfer = static_cast<Inference *>(infer);
@@ -637,33 +620,31 @@ int mv_inference_image_classify_open(
numberOfOutputs = classificationResults.number_of_classes;
-
int *indices = classificationResults.indices.data();
float *confidences = classificationResults.confidences.data();
static const int START_CLASS_NUMBER = 10;
- static std::vector<const char*> names(START_CLASS_NUMBER);
+ static std::vector<const char *> names(START_CLASS_NUMBER);
if (numberOfOutputs > START_CLASS_NUMBER)
names.resize(numberOfOutputs);
- LOGE("mv_inference_open: number_of_classes: %d\n", classificationResults.number_of_classes);
+ LOGE("mv_inference_open: number_of_classes: %d\n",
+ classificationResults.number_of_classes);
for (int n = 0; n < numberOfOutputs; ++n) {
LOGE("names: %s", classificationResults.names[n].c_str());
names[n] = classificationResults.names[n].c_str();
}
- classified_cb(source, numberOfOutputs, indices, names.data(), confidences, user_data);
+ classified_cb(source, numberOfOutputs, indices, names.data(), confidences,
+ user_data);
return ret;
}
-
-int mv_inference_object_detect_open(
- mv_source_h source,
- mv_inference_h infer,
- mv_inference_object_detected_cb detected_cb,
- void *user_data)
+int mv_inference_object_detect_open(mv_source_h source, mv_inference_h infer,
+ mv_inference_object_detected_cb detected_cb,
+ void *user_data)
{
Inference *pInfer = static_cast<Inference *>(infer);
@@ -692,7 +673,7 @@ int mv_inference_object_detect_open(
int *indices = objectDetectionResults.indices.data();
float *confidences = objectDetectionResults.confidences.data();
static const int START_OBJECT_NUMBER = 20;
- static std::vector<const char*> names(START_OBJECT_NUMBER);
+ static std::vector<const char *> names(START_OBJECT_NUMBER);
static std::vector<mv_rectangle_s> locations(START_OBJECT_NUMBER);
if (numberOfOutputs > START_OBJECT_NUMBER) {
@@ -710,16 +691,15 @@ int mv_inference_object_detect_open(
locations[n].height = objectDetectionResults.locations[n].height;
}
- detected_cb(source, numberOfOutputs, indices, names.data(), confidences, locations.data(), user_data);
+ detected_cb(source, numberOfOutputs, indices, names.data(), confidences,
+ locations.data(), user_data);
return ret;
}
-int mv_inference_face_detect_open(
- mv_source_h source,
- mv_inference_h infer,
- mv_inference_face_detected_cb detected_cb,
- void *user_data)
+int mv_inference_face_detect_open(mv_source_h source, mv_inference_h infer,
+ mv_inference_face_detected_cb detected_cb,
+ void *user_data)
{
Inference *pInfer = static_cast<Inference *>(infer);
@@ -755,17 +735,15 @@ int mv_inference_face_detect_open(
locations[n].height = faceDetectionResults.locations[n].height;
}
- detected_cb(source, numberOfOutputs, confidences, locations.data(), user_data);
+ detected_cb(source, numberOfOutputs, confidences, locations.data(),
+ user_data);
return ret;
}
int mv_inference_facial_landmark_detect_open(
- mv_source_h source,
- mv_inference_h infer,
- mv_rectangle_s *roi,
- mv_inference_facial_landmark_detected_cb detected_cb,
- void *user_data)
+ mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+ mv_inference_facial_landmark_detected_cb detected_cb, void *user_data)
{
Inference *pInfer = static_cast<Inference *>(infer);
@@ -786,7 +764,8 @@ int mv_inference_facial_landmark_detect_open(
}
FacialLandMarkDetectionResults facialLandMarkDetectionResults;
- ret = pInfer->GetFacialLandMarkDetectionResults(&facialLandMarkDetectionResults);
+ ret = pInfer->GetFacialLandMarkDetectionResults(
+ &facialLandMarkDetectionResults);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference results");
return ret;
@@ -797,7 +776,6 @@ int mv_inference_facial_landmark_detect_open(
std::vector<mv_point_s> locations(numberOfLandmarks);
for (int n = 0; n < numberOfLandmarks; ++n) {
-
locations[n].x = facialLandMarkDetectionResults.locations[n].x;
locations[n].y = facialLandMarkDetectionResults.locations[n].y;
}
@@ -808,11 +786,8 @@ int mv_inference_facial_landmark_detect_open(
}
int mv_inference_pose_estimation_detect_open(
- mv_source_h source,
- mv_inference_h infer,
- mv_rectangle_s *roi,
- mv_inference_pose_estimation_detected_cb detected_cb,
- void *user_data)
+ mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
+ mv_inference_pose_estimation_detected_cb detected_cb, void *user_data)
{
Inference *pInfer = static_cast<Inference *>(infer);
@@ -844,7 +819,6 @@ int mv_inference_pose_estimation_detect_open(
std::vector<mv_point_s> locations(numberOfPoseEstimation);
for (int n = 0; n < numberOfPoseEstimation; ++n) {
-
locations[n].x = poseEstimationResults.locations[n].x;
locations[n].y = poseEstimationResults.locations[n].y;
}
diff --git a/test/testsuites/inference/inference_test_suite.c b/test/testsuites/inference/inference_test_suite.c
index 11761256..3b4d52f8 100644
--- a/test/testsuites/inference/inference_test_suite.c
+++ b/test/testsuites/inference/inference_test_suite.c
@@ -31,43 +31,63 @@
#include <limits.h>
#include <time.h>
-#define ARRAY_SIZE(x) (sizeof((x))/sizeof((x)[0]))
+#define ARRAY_SIZE(x) (sizeof((x)) / sizeof((x)[0]))
#define FILE_PATH_SIZE 1024
//Image Classification
-#define IC_LABEL_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_label.txt"
-#define IC_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite"
-
-#define IC_VIVANTE_LABEL_PATH "/usr/share/capi-media-vision/models/IC/vivante/ic_label.txt"
-#define IC_VIVANTE_WEIGHT_PATH "/usr/share/capi-media-vision/models/IC/vivante/ic_vivante_model.nb"
-#define IC_VIVANTE_CONFIG_PATH "/usr/share/capi-media-vision/models/IC/vivante/ic_vivante_model.so"
-
-#define IC_OPENCV_LABEL_CAFFE_PATH "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_label_squeezenet.txt"
-#define IC_OPENCV_WEIGHT_CAFFE_PATH "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel"
-#define IC_OPENCV_CONFIG_CAFFE_PATH "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt"
+#define IC_LABEL_PATH \
+ "/usr/share/capi-media-vision/models/IC/tflite/ic_label.txt"
+#define IC_TFLITE_WEIGHT_PATH \
+ "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite"
+
+#define IC_VIVANTE_LABEL_PATH \
+ "/usr/share/capi-media-vision/models/IC/vivante/ic_label.txt"
+#define IC_VIVANTE_WEIGHT_PATH \
+ "/usr/share/capi-media-vision/models/IC/vivante/ic_vivante_model.nb"
+#define IC_VIVANTE_CONFIG_PATH \
+ "/usr/share/capi-media-vision/models/IC/vivante/ic_vivante_model.so"
+
+#define IC_OPENCV_LABEL_CAFFE_PATH \
+ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_label_squeezenet.txt"
+#define IC_OPENCV_WEIGHT_CAFFE_PATH \
+ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel"
+#define IC_OPENCV_CONFIG_CAFFE_PATH \
+ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt"
//Obeject Detection
-#define OD_LABEL_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_label.txt"
-#define OD_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite"
-
-#define OD_OPENCV_LABEL_CAFFE_PATH "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_label_mobilenetv1ssd.txt"
-#define OD_OPENCV_WEIGHT_CAFFE_PATH "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel"
-#define OD_OPENCV_CONFIG_CAFFE_PATH "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt"
+#define OD_LABEL_PATH \
+ "/usr/share/capi-media-vision/models/OD/tflite/od_label.txt"
+#define OD_TFLITE_WEIGHT_PATH \
+ "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite"
+
+#define OD_OPENCV_LABEL_CAFFE_PATH \
+ "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_label_mobilenetv1ssd.txt"
+#define OD_OPENCV_WEIGHT_CAFFE_PATH \
+ "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel"
+#define OD_OPENCV_CONFIG_CAFFE_PATH \
+ "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt"
//Face Detection
-#define FD_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite"
+#define FD_TFLITE_WEIGHT_PATH \
+ "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite"
-#define FD_OPENCV_WEIGHT_CAFFE_PATH "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel"
-#define FD_OPENCV_CONFIG_CAFFE_PATH "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt"
+#define FD_OPENCV_WEIGHT_CAFFE_PATH \
+ "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel"
+#define FD_OPENCV_CONFIG_CAFFE_PATH \
+ "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt"
//Facila LandmarkDetection
-#define FLD_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/FLD/tflite/fld_tflite_model1.tflite"
+#define FLD_TFLITE_WEIGHT_PATH \
+ "/usr/share/capi-media-vision/models/FLD/tflite/fld_tflite_model1.tflite"
-#define FLD_OPENCV_WEIGHT_CAFFE_PATH "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel"
-#define FLD_OPENCV_CONFIG_CAFFE_PATH "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt"
+#define FLD_OPENCV_WEIGHT_CAFFE_PATH \
+ "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel"
+#define FLD_OPENCV_CONFIG_CAFFE_PATH \
+ "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt"
//Pose Estimation
-#define PE_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite"
+#define PE_TFLITE_WEIGHT_PATH \
+ "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite"
/******
* Public model:
@@ -81,1069 +101,929 @@
*/
#define NANO_PER_SEC ((__clock_t) 1000000000)
-#define NANO_PER_MILLI ((__clock_t) 1000000)
-#define MILLI_PER_SEC ((__clock_t) 1000)
+#define NANO_PER_MILLI ((__clock_t) 1000000)
+#define MILLI_PER_SEC ((__clock_t) 1000)
struct timespec diff(struct timespec start, struct timespec end)
{
- struct timespec temp;
- if ((end.tv_nsec - start.tv_nsec) < 0) {
- temp.tv_sec = end.tv_sec - start.tv_sec - 1;
- temp.tv_nsec = NANO_PER_SEC + end.tv_nsec - start.tv_nsec;
- }
- else {
- temp.tv_sec = end.tv_sec - start.tv_sec;
- temp.tv_nsec = end.tv_nsec - start.tv_nsec;
- }
- return temp;
+ struct timespec temp;
+ if ((end.tv_nsec - start.tv_nsec) < 0) {
+ temp.tv_sec = end.tv_sec - start.tv_sec - 1;
+ temp.tv_nsec = NANO_PER_SEC + end.tv_nsec - start.tv_nsec;
+ } else {
+ temp.tv_sec = end.tv_sec - start.tv_sec;
+ temp.tv_nsec = end.tv_nsec - start.tv_nsec;
+ }
+ return temp;
}
unsigned long gettotalmillisec(const struct timespec time)
{
- return time.tv_sec * MILLI_PER_SEC + time.tv_nsec / NANO_PER_MILLI;
+ return time.tv_sec * MILLI_PER_SEC + time.tv_nsec / NANO_PER_MILLI;
}
-void _object_detected_cb (
- mv_source_h source,
- const int number_of_objects,
- const int *indices,
- const char **names,
- const float *confidences,
- const mv_rectangle_s *locations,
- void *user_data)
+void _object_detected_cb(mv_source_h source, const int number_of_objects,
+ const int *indices, const char **names,
+ const float *confidences,
+ const mv_rectangle_s *locations, void *user_data)
{
- printf("In callback: %d objects\n", number_of_objects);
-
- for (int n = 0; n < number_of_objects; n++) {
- printf("%2d\n", indices[n]);
- printf("%s\n", names[n]);
- printf("%.3f\n", confidences[n]);
- printf("%d,%d,%d,%d\n", locations[n].point.x,
- locations[n].point.y,
- locations[n].width,
- locations[n].height);
- }
+ printf("In callback: %d objects\n", number_of_objects);
+
+ for (int n = 0; n < number_of_objects; n++) {
+ printf("%2d\n", indices[n]);
+ printf("%s\n", names[n]);
+ printf("%.3f\n", confidences[n]);
+ printf("%d,%d,%d,%d\n", locations[n].point.x, locations[n].point.y,
+ locations[n].width, locations[n].height);
+ }
}
-void _face_detected_cb (
- mv_source_h source,
- const int number_of_faces,
- const float *confidences,
- const mv_rectangle_s *locations,
- void *user_data)
+void _face_detected_cb(mv_source_h source, const int number_of_faces,
+ const float *confidences,
+ const mv_rectangle_s *locations, void *user_data)
{
- printf("In callback: %d faces\n", number_of_faces);
-
- for (int n = 0; n < number_of_faces; n++) {
- printf("%.3f\n", confidences[n]);
- printf("%d,%d,%d,%d\n", locations[n].point.x,
- locations[n].point.y,
- locations[n].width,
- locations[n].height);
- }
+ printf("In callback: %d faces\n", number_of_faces);
+ for (int n = 0; n < number_of_faces; n++) {
+ printf("%.3f\n", confidences[n]);
+ printf("%d,%d,%d,%d\n", locations[n].point.x, locations[n].point.y,
+ locations[n].width, locations[n].height);
+ }
}
-void _facial_landmark_detected_cb (
- mv_source_h source,
- const int number_of_landmarks,
- const mv_point_s *locations,
- void *user_data)
+void _facial_landmark_detected_cb(mv_source_h source,
+ const int number_of_landmarks,
+ const mv_point_s *locations, void *user_data)
{
- printf("In callback, %d landmarks\n", number_of_landmarks);
- for (int n = 0; n < number_of_landmarks; n++) {
- printf("%d: x[%d], y[%d]\n", n, locations[n].x, locations[n].y);
- }
+ printf("In callback, %d landmarks\n", number_of_landmarks);
+ for (int n = 0; n < number_of_landmarks; n++) {
+ printf("%d: x[%d], y[%d]\n", n, locations[n].x, locations[n].y);
+ }
}
-void _pose_estimation_detected_cb (
- mv_source_h source,
- const int number_of_pose_estimation,
- const mv_point_s *locations,
- void *user_data)
+void _pose_estimation_detected_cb(mv_source_h source,
+ const int number_of_pose_estimation,
+ const mv_point_s *locations, void *user_data)
{
- printf("In callback, %d pose estimation\n", number_of_pose_estimation);
- for (int n = 0; n < number_of_pose_estimation; n++) {
- printf("%d: x[%d], y[%d]\n", n, locations[n].x, locations[n].y);
- }
+ printf("In callback, %d pose estimation\n", number_of_pose_estimation);
+ for (int n = 0; n < number_of_pose_estimation; n++) {
+ printf("%d: x[%d], y[%d]\n", n, locations[n].x, locations[n].y);
+ }
}
-void _image_classified_cb (
- mv_source_h source,
- const int number_of_classes,
- const int *indices,
- const char **names,
- const float *confidences,
- void *user_data)
+void _image_classified_cb(mv_source_h source, const int number_of_classes,
+ const int *indices, const char **names,
+ const float *confidences, void *user_data)
{
- printf("In callback: %d classes\n", number_of_classes);
+ printf("In callback: %d classes\n", number_of_classes);
- for (int n=0; n<number_of_classes; ++n) {
- printf("%2d\n", indices[n]);
- printf("%s\n", names[n]);
- printf("%.3f\n", confidences[n]);
- }
+ for (int n = 0; n < number_of_classes; ++n) {
+ printf("%2d\n", indices[n]);
+ printf("%s\n", names[n]);
+ printf("%.3f\n", confidences[n]);
+ }
}
-int show_menu(const char *title, const int *options, const char **names, int cnt)
+int show_menu(const char *title, const int *options, const char **names,
+ int cnt)
{
- printf("*********************************************\n");
- printf("* %38s *\n", title);
- printf("*-------------------------------------------*\n");
- int i = 0;
- for (i = 0; i < cnt; ++i)
- printf("* %2i. %34s *\n", options[i], names[i]);
-
- printf("*********************************************\n\n");
- int selection = 0;
- printf("Your choice: ");
- if (scanf("%20i", &selection) == 0) {
- if (scanf("%*[^\n]%*c") != 0) {
- printf("ERROR: Reading the input line error.\n");
- return -1;
- }
- printf("ERROR: Incorrect input.\n");
- }
-
- return selection;
+ printf("*********************************************\n");
+ printf("* %38s *\n", title);
+ printf("*-------------------------------------------*\n");
+ int i = 0;
+ for (i = 0; i < cnt; ++i)
+ printf("* %2i. %34s *\n", options[i], names[i]);
+
+ printf("*********************************************\n\n");
+ int selection = 0;
+ printf("Your choice: ");
+ if (scanf("%20i", &selection) == 0) {
+ if (scanf("%*[^\n]%*c") != 0) {
+ printf("ERROR: Reading the input line error.\n");
+ return -1;
+ }
+ printf("ERROR: Incorrect input.\n");
+ }
+
+ return selection;
}
int perform_configure_set_model_config_path(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- char *filePath = NULL;
- while (-1 == input_string(
- "Model configuration file path:",
- FILE_PATH_SIZE,
- &(filePath))) {
- printf("Incorrect file path! Try again.\n");
- }
-
- err = mv_engine_config_set_string_attribute(engine_cfg,
- MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
- filePath);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set model configuration file path: %s\n", filePath);
- }
-
- free(filePath);
- filePath = NULL;
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ char *filePath = NULL;
+ while (-1 == input_string("Model configuration file path:", FILE_PATH_SIZE,
+ &(filePath))) {
+ printf("Incorrect file path! Try again.\n");
+ }
+
+ err = mv_engine_config_set_string_attribute(
+ engine_cfg, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH, filePath);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set model configuration file path: %s\n", filePath);
+ }
+
+ free(filePath);
+ filePath = NULL;
+
+ return err;
}
int perform_configure_set_model_weights_path(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- char *filePath = NULL;
- while (-1 == input_string(
- "Model weights file path:",
- FILE_PATH_SIZE,
- &(filePath))) {
- printf("Incorrect file path! Try again.\n");
- }
-
- err = mv_engine_config_set_string_attribute(engine_cfg,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- filePath);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set model weights file path: %s\n", filePath);
- }
-
- free(filePath);
- filePath = NULL;
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ char *filePath = NULL;
+ while (-1 == input_string("Model weights file path:", FILE_PATH_SIZE,
+ &(filePath))) {
+ printf("Incorrect file path! Try again.\n");
+ }
+
+ err = mv_engine_config_set_string_attribute(
+ engine_cfg, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, filePath);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set model weights file path: %s\n", filePath);
+ }
+
+ free(filePath);
+ filePath = NULL;
+
+ return err;
}
int perform_configure_set_input_data_type(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- int dataType = 0;
- while (-1 == input_int(
- "Input Tensor Data Type:",
- 0,
- 4,
- &dataType)) {
- printf("Invalid type! Try again.\n");
- }
-
- err = mv_engine_config_set_int_attribute(engine_cfg,
- MV_INFERENCE_INPUT_DATA_TYPE,
- (mv_inference_data_type_e)dataType);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set input tensor data type: %d\n", dataType);
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ int dataType = 0;
+ while (-1 == input_int("Input Tensor Data Type:", 0, 4, &dataType)) {
+ printf("Invalid type! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(
+ engine_cfg, MV_INFERENCE_INPUT_DATA_TYPE,
+ (mv_inference_data_type_e) dataType);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set input tensor data type: %d\n", dataType);
+ }
+
+ return err;
}
int perform_configure_set_model_userfile_path(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- char *filePath = NULL;
- while (-1 == input_string(
- "Model user file (category list) path:",
- FILE_PATH_SIZE,
- &(filePath))) {
- printf("Incorrect file path! Try again.\n");
- }
-
- err = mv_engine_config_set_string_attribute(engine_cfg,
- MV_INFERENCE_MODEL_USER_FILE_PATH,
- filePath);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set model user file path: %s\n", filePath);
- }
-
- free(filePath);
- filePath = NULL;
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ char *filePath = NULL;
+ while (-1 == input_string("Model user file (category list) path:",
+ FILE_PATH_SIZE, &(filePath))) {
+ printf("Incorrect file path! Try again.\n");
+ }
+
+ err = mv_engine_config_set_string_attribute(
+ engine_cfg, MV_INFERENCE_MODEL_USER_FILE_PATH, filePath);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set model user file path: %s\n", filePath);
+ }
+
+ free(filePath);
+ filePath = NULL;
+
+ return err;
}
int perform_configure_set_model_mean_value(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- double meanValue = 0.0;
- while (-1 == input_double(
- "mean value:",
- 0.0,
- 255.0,
- &meanValue)) {
- printf("Invalid value! Try again.\n");
- }
-
- err = mv_engine_config_set_double_attribute(engine_cfg,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- meanValue);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set model mean value: %f\n", meanValue);
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ double meanValue = 0.0;
+ while (-1 == input_double("mean value:", 0.0, 255.0, &meanValue)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_double_attribute(
+ engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, meanValue);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set model mean value: %f\n", meanValue);
+ }
+
+ return err;
}
int perform_configure_set_image_scale(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- double stdValue = 0.0;
- while (-1 == input_double(
- "Image scale factor:",
- 1.0,
- 255.0,
- &stdValue)) {
- printf("Invalid value! Try again.\n");
- }
-
- err = mv_engine_config_set_double_attribute(engine_cfg,
- MV_INFERENCE_MODEL_STD_VALUE,
- stdValue);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set std value: %lf\n", stdValue);
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ double stdValue = 0.0;
+ while (-1 == input_double("Image scale factor:", 1.0, 255.0, &stdValue)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_double_attribute(
+ engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, stdValue);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set std value: %lf\n", stdValue);
+ }
+
+ return err;
}
int perform_configure_set_confidence_threshold(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- double threshold = 0.0;
- while (-1 == input_double(
- "threshold:",
- 0.0,
- 1.0,
- &threshold)) {
- printf("Invalid value! Try again.\n");
- }
-
- err = mv_engine_config_set_double_attribute(engine_cfg,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- threshold);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set image scale value: %lf\n", threshold);
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ double threshold = 0.0;
+ while (-1 == input_double("threshold:", 0.0, 1.0, &threshold)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_double_attribute(
+ engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, threshold);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set image scale value: %lf\n", threshold);
+ }
+
+ return err;
}
int perform_configure_set_backend(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- int backendType = 0;
- while (-1 == input_int(
- "Backend Type:",
- 1,
- 3,
- &backendType)) {
- printf("Invalid type! Try again.\n");
- }
-
- err = mv_engine_config_set_int_attribute(engine_cfg,
- MV_INFERENCE_BACKEND_TYPE,
- (mv_inference_backend_type_e)backendType);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set backend type: %d\n", backendType);
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ int backendType = 0;
+ while (-1 == input_int("Backend Type:", 1, 3, &backendType)) {
+ printf("Invalid type! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(
+ engine_cfg, MV_INFERENCE_BACKEND_TYPE,
+ (mv_inference_backend_type_e) backendType);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set backend type: %d\n", backendType);
+ }
+
+ return err;
}
int perform_configure_set_target(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- int targetType = 0;
- while (-1 == input_int(
- "Backend Type:",
- 1,
- 2,
- &targetType)) {
- printf("Invalid type! Try again.\n");
- }
-
- err = mv_engine_config_set_int_attribute(engine_cfg,
- MV_INFERENCE_TARGET_TYPE,
- (mv_inference_target_type_e)targetType);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set target type: %d\n", targetType);
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ int targetType = 0;
+ while (-1 == input_int("Backend Type:", 1, 2, &targetType)) {
+ printf("Invalid type! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(
+ engine_cfg, MV_INFERENCE_TARGET_TYPE,
+ (mv_inference_target_type_e) targetType);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set target type: %d\n", targetType);
+ }
+
+ return err;
}
int perform_configure_set_tensor_width(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- int tensorW = 0;
- while (-1 == input_int(
- "Tensor Width:",
- INT_MIN,
- INT_MAX,
- &tensorW)) {
- printf("Invalid value! Try again.\n");
- }
-
- err = mv_engine_config_set_int_attribute(engine_cfg,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- tensorW);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set tensor width: %d\n", tensorW);
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ int tensorW = 0;
+ while (-1 == input_int("Tensor Width:", INT_MIN, INT_MAX, &tensorW)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(
+ engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, tensorW);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set tensor width: %d\n", tensorW);
+ }
+
+ return err;
}
int perform_configure_set_tensor_height(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- int tensorH = 0;
- while (-1 == input_int(
- "Tensor Height:",
- INT_MIN,
- INT_MAX,
- &tensorH)) {
- printf("Invalid value! Try again.\n");
- }
-
- err = mv_engine_config_set_int_attribute(engine_cfg,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- tensorH);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set tensor height: %d\n", tensorH);
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ int tensorH = 0;
+ while (-1 == input_int("Tensor Height:", INT_MIN, INT_MAX, &tensorH)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(
+ engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, tensorH);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set tensor height: %d\n", tensorH);
+ }
+
+ return err;
}
int perform_configure_set_tensor_channels(mv_engine_config_h engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
- int tensorC = 0;
- while (-1 == input_int(
- "Tensor Channels:",
- INT_MIN,
- INT_MAX,
- &tensorC)) {
- printf("Invalid value! Try again.\n");
- }
-
- err = mv_engine_config_set_int_attribute(engine_cfg,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- tensorC);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to set tensor channels: %d\n", tensorC);
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+ int tensorC = 0;
+ while (-1 == input_int("Tensor Channels:", INT_MIN, INT_MAX, &tensorC)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(
+ engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, tensorC);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set tensor channels: %d\n", tensorC);
+ }
+
+ return err;
}
int perform_configuration(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- int sel_opt = 0;
- const int options[13] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ,12, 13};
- const char *names[13] = { "Set Model Configuration",
- "Set Model Weights",
- "Set Model Data Type",
- "Set Model UserFile",
- "Set Model MeanFile",
- "Set Image Scale",
- "Set Confidence Threshold",
- "Set Backend",
- "Set Target",
- "Set InputTensor Width",
- "Set InputTensor Height",
- "Set InputTensor Channels",
- "Back" };
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- while (sel_opt == 0) {
- sel_opt = show_menu("Select Actions: ", options, names, ARRAY_SIZE(options));
- switch (sel_opt) {
- case 1:
- err = perform_configure_set_model_config_path(handle);
- sel_opt = 0;
- break;
- case 2:
- err = perform_configure_set_model_weights_path(handle);
- sel_opt = 0;
- break;
- case 3:
- err = perform_configure_set_input_data_type(handle);
- sel_opt = 0;
- break;
- case 4:
- err = perform_configure_set_model_userfile_path(handle);
- sel_opt = 0;
- break;
- case 5:
- err = perform_configure_set_model_mean_value(handle);
- sel_opt = 0;
- break;
- case 6:
- err = perform_configure_set_image_scale(handle);
- sel_opt = 0;
- break;
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[13] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 };
+ const char *names[13] = { "Set Model Configuration",
+ "Set Model Weights",
+ "Set Model Data Type",
+ "Set Model UserFile",
+ "Set Model MeanFile",
+ "Set Image Scale",
+ "Set Confidence Threshold",
+ "Set Backend",
+ "Set Target",
+ "Set InputTensor Width",
+ "Set InputTensor Height",
+ "Set InputTensor Channels",
+ "Back" };
+
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select Actions: ", options, names,
+ ARRAY_SIZE(options));
+ switch (sel_opt) {
+ case 1:
+ err = perform_configure_set_model_config_path(handle);
+ sel_opt = 0;
+ break;
+ case 2:
+ err = perform_configure_set_model_weights_path(handle);
+ sel_opt = 0;
+ break;
+ case 3:
+ err = perform_configure_set_input_data_type(handle);
+ sel_opt = 0;
+ break;
+ case 4:
+ err = perform_configure_set_model_userfile_path(handle);
+ sel_opt = 0;
+ break;
+ case 5:
+ err = perform_configure_set_model_mean_value(handle);
+ sel_opt = 0;
+ break;
+ case 6:
+ err = perform_configure_set_image_scale(handle);
+ sel_opt = 0;
+ break;
case 7:
err = perform_configure_set_confidence_threshold(handle);
sel_opt = 0;
- break;
- case 8:
- err = perform_configure_set_backend(handle);
- sel_opt = 0;
- break;
- case 9:
- err = perform_configure_set_target(handle);
- sel_opt = 0;
- break;
- case 10:
- err = perform_configure_set_tensor_width(handle);
- sel_opt = 0;
- break;
- case 11:
- err = perform_configure_set_tensor_height(handle);
- sel_opt = 0;
- break;
- case 12:
- err = perform_configure_set_tensor_channels(handle);
- sel_opt = 0;
- break;
- case 13:
- err = MEDIA_VISION_ERROR_NONE;
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- }
- }
-
-
- *engine_cfg = handle;
- return err;
+ break;
+ case 8:
+ err = perform_configure_set_backend(handle);
+ sel_opt = 0;
+ break;
+ case 9:
+ err = perform_configure_set_target(handle);
+ sel_opt = 0;
+ break;
+ case 10:
+ err = perform_configure_set_tensor_width(handle);
+ sel_opt = 0;
+ break;
+ case 11:
+ err = perform_configure_set_tensor_height(handle);
+ sel_opt = 0;
+ break;
+ case 12:
+ err = perform_configure_set_tensor_channels(handle);
+ sel_opt = 0;
+ break;
+ case 13:
+ err = MEDIA_VISION_ERROR_NONE;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ *engine_cfg = handle;
+ return err;
}
int perform_tflite_mobilenetv1_config(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- char *inputNodeName = "input_2";
- char *outputNodeName[1] = {"dense_3/Softmax"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- IC_TFLITE_WEIGHT_PATH);
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ char *inputNodeName = "input_2";
+ char *outputNodeName[1] = { "dense_3/Softmax" };
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_USER_FILE_PATH,
- IC_LABEL_PATH);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, IC_TFLITE_WEIGHT_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 127.0);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 127.0);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_LABEL_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- 0.6);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_TFLITE);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 127.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_double_attribute(
+ handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 224);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_TFLITE);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 224);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 224);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 224);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 1);
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
+
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1);
- *engine_cfg = handle;
- return err;
+ *engine_cfg = handle;
+ return err;
}
int perform_armnn_mobilenetv1_config(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- char *inputNodeName = "input_2";
- char *outputNodeName[1] = {"dense_3/Softmax"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- IC_TFLITE_WEIGHT_PATH);
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_USER_FILE_PATH,
- IC_LABEL_PATH);
+ char *inputNodeName = "input_2";
+ char *outputNodeName[1] = { "dense_3/Softmax" };
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 127.0);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, IC_TFLITE_WEIGHT_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 127.0);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- 0.6);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_LABEL_PATH);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_ARMNN);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 127.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 224);
+ mv_engine_config_set_double_attribute(
+ handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 224);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_ARMNN);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 224);
+
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 224);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 1);
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1);
- *engine_cfg = handle;
- return err;
+ *engine_cfg = handle;
+ return err;
}
int perform_vivante_inceptionv3_config(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- char *inputNodeName = "input";
- char *outputNodeName[1] = {"InceptionV3/Predictions/Peshape_1"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- IC_VIVANTE_WEIGHT_PATH);
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
- IC_VIVANTE_CONFIG_PATH);
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_UINT8);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ char *inputNodeName = "input";
+ char *outputNodeName[1] = { "InceptionV3/Predictions/Peshape_1" };
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ IC_VIVANTE_WEIGHT_PATH);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_USER_FILE_PATH,
- IC_VIVANTE_LABEL_PATH);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+ IC_VIVANTE_CONFIG_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 0.0);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_UINT8);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 1.0);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_USER_FILE_PATH, IC_VIVANTE_LABEL_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- 0.6);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 0.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_MLAPI);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 1.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_DEVICE_TYPE,
- MV_INFERENCE_TARGET_DEVICE_CUSTOM);
+ mv_engine_config_set_double_attribute(
+ handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.6);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 299);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_MLAPI);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 299);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_DEVICE_TYPE,
+ MV_INFERENCE_TARGET_DEVICE_CUSTOM);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 299);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 299);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 1);
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
+
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1);
- *engine_cfg = handle;
- return err;
+ *engine_cfg = handle;
+ return err;
}
int perform_opencv_caffe_squeezenet_config(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- char *inputNodeName = "data";
- char *outputNodeName[1] = {"prob"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- IC_OPENCV_WEIGHT_CAFFE_PATH);
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
- IC_OPENCV_CONFIG_CAFFE_PATH);
+ char *inputNodeName = "data";
+ char *outputNodeName[1] = { "prob" };
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_USER_FILE_PATH,
- IC_OPENCV_LABEL_CAFFE_PATH);
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ IC_OPENCV_WEIGHT_CAFFE_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 0.0);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 1.0);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+ IC_OPENCV_CONFIG_CAFFE_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- 0.3);
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_USER_FILE_PATH,
+ IC_OPENCV_LABEL_CAFFE_PATH);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_OPENCV);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 0.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 1.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 227);
+ mv_engine_config_set_double_attribute(
+ handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 227);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_OPENCV);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 227);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 227);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 1);
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
- *engine_cfg = handle;
- return err;
-}
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1);
+ *engine_cfg = handle;
+ return err;
+}
int perform_image_classification()
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- int sel_opt = 0;
- const int options[8] = { 1, 2, 3, 4, 5, 6, 7, 8};
- const char *names[8] = { "Configuration",
- "TFLite(cpu + Mobilenet)",
- "OpenCV(cpu + Squeezenet)",
- "ARMNN(cpu + Mobilenet)",
- "Vivante(NPU + Inceptionv3)",
- "Prepare",
- "Run",
- "Back"};
-
- mv_engine_config_h engine_cfg = NULL;
- mv_inference_h infer = NULL;
- mv_source_h mvSource = NULL;
-
- while(sel_opt == 0) {
- sel_opt = show_menu("Select Action:", options, names, ARRAY_SIZE(options));
- switch (sel_opt) {
- case 1:
- {
- //perform configuration
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- engine_cfg = NULL;
- }
-
- err = perform_configuration(&engine_cfg);
- }
- break;
- case 2:
- {
- // perform TFLite
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- engine_cfg = NULL;
- }
-
- err = perform_tflite_mobilenetv1_config(&engine_cfg);
- }
- break;
-
- case 3:
- {
- // perform OpenCV
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- engine_cfg = NULL;
- }
-
- err = perform_opencv_caffe_squeezenet_config(&engine_cfg);
- }
- break;
- case 4:
- {
- // perform ARMNN
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- }
-
- err = perform_armnn_mobilenetv1_config(&engine_cfg);
- }
- break;
- case 5:
- {
- // perform Vivante
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- }
-
- err = perform_vivante_inceptionv3_config(&engine_cfg);
- }
- break;
- case 6:
- {
- //create - configure - prepare
- if (infer) {
- int err2 = mv_inference_destroy(infer);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err2);
- infer = NULL;
- }
-
- // inference
- // create handle
- err = mv_inference_create(&infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create inference handle [err:%i]\n", err);
- break;
- }
-
- // configure
- err = mv_inference_configure(infer, engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to configure inference handle\n");
- break;
- }
-
- // prepare
- err = mv_inference_prepare(infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to prepare inference handle.");
- break;
- }
- }
- break;
- case 7:
- {
- if (mvSource) {
- int err2 = mv_destroy_source(mvSource);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource.\n");
- mvSource = NULL;
- }
-
- char *in_file_name = NULL;
- /* Load media source */
- while (input_string("Input file name to be inferred:", 1024, &(in_file_name)) == -1)
- printf("Incorrect input! Try again.\n");
-
- err = mv_create_source(&mvSource);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create mvSource.\n");
- free(in_file_name);
- break;
- }
-
- err = load_mv_source_from_file(in_file_name, mvSource);
- if (MEDIA_VISION_ERROR_NONE != err) {
- int err2 = mv_destroy_source(mvSource);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource.\n", err2);
- mvSource = NULL;
- free(in_file_name);
- break;
- }
- free(in_file_name);
-
- struct timespec s_tspec;
- struct timespec e_tspec;
-
- clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
- // Run
- err = mv_inference_image_classify(mvSource, infer, NULL, _image_classified_cb, NULL);
-
- clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-
- struct timespec diffspec = diff(s_tspec, e_tspec);
- unsigned long timeDiff = gettotalmillisec(diffspec);
- printf("elased time : %lu(ms)\n", timeDiff);
-
- }
- break;
- case 8:
- {
- //perform destroy
- if (engine_cfg) {
- err = mv_destroy_engine_config(engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err);
- engine_cfg = NULL;
- }
-
- if (infer) {
- err = mv_inference_destroy(infer);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err);
- infer = NULL;
- }
-
- if (mvSource) {
- err = mv_destroy_source(mvSource);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource [err:%i]\n", err);
- mvSource = NULL;
- }
- }
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- continue;
- }
-
- int do_another = 0;
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("ERROR: Action is finished with error code: %i\n");
- }
-
- sel_opt = 0;
- const int options_last[2] = { 1, 2 };
- const char *names_last[2] = { "Yes", "No" };
-
- while (sel_opt == 0) {
- sel_opt = show_menu("Run Image Classification again?: ", options_last, names_last, ARRAY_SIZE(options_last));
- switch (sel_opt) {
- case 1:
- do_another = 1;
- break;
- case 2:
- do_another = 0;
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- }
- }
-
- sel_opt = (do_another == 1) ? 0 : 1;
- }
-
- if (engine_cfg) {
- err = mv_destroy_engine_config(engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err);
- engine_cfg = NULL;
- }
-
- if (infer) {
- err = mv_inference_destroy(infer);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err);
- infer = NULL;
- }
-
- if (mvSource) {
- err = mv_destroy_source(mvSource);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource [err:%i]\n", err);
- mvSource = NULL;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[8] = { 1, 2, 3, 4, 5, 6, 7, 8 };
+ const char *names[8] = { "Configuration",
+ "TFLite(cpu + Mobilenet)",
+ "OpenCV(cpu + Squeezenet)",
+ "ARMNN(cpu + Mobilenet)",
+ "Vivante(NPU + Inceptionv3)",
+ "Prepare",
+ "Run",
+ "Back" };
+
+ mv_engine_config_h engine_cfg = NULL;
+ mv_inference_h infer = NULL;
+ mv_source_h mvSource = NULL;
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names,
+ ARRAY_SIZE(options));
+ switch (sel_opt) {
+ case 1: {
+ //perform configuration
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ engine_cfg = NULL;
+ }
+
+ err = perform_configuration(&engine_cfg);
+ } break;
+ case 2: {
+ // perform TFLite
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ engine_cfg = NULL;
+ }
+
+ err = perform_tflite_mobilenetv1_config(&engine_cfg);
+ } break;
+
+ case 3: {
+ // perform OpenCV
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ engine_cfg = NULL;
+ }
+
+ err = perform_opencv_caffe_squeezenet_config(&engine_cfg);
+ } break;
+ case 4: {
+ // perform ARMNN
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_armnn_mobilenetv1_config(&engine_cfg);
+ } break;
+ case 5: {
+ // perform Vivante
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_vivante_inceptionv3_config(&engine_cfg);
+ } break;
+ case 6: {
+ //create - configure - prepare
+ if (infer) {
+ int err2 = mv_inference_destroy(infer);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err2);
+ infer = NULL;
+ }
+
+ // inference
+ // create handle
+ err = mv_inference_create(&infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create inference handle [err:%i]\n", err);
+ break;
+ }
+
+ // configure
+ err = mv_inference_configure(infer, engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to configure inference handle\n");
+ break;
+ }
+
+ // prepare
+ err = mv_inference_prepare(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to prepare inference handle.");
+ break;
+ }
+ } break;
+ case 7: {
+ if (mvSource) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource.\n");
+ mvSource = NULL;
+ }
+
+ char *in_file_name = NULL;
+ /* Load media source */
+ while (input_string("Input file name to be inferred:", 1024,
+ &(in_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ err = mv_create_source(&mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create mvSource.\n");
+ free(in_file_name);
+ break;
+ }
+
+ err = load_mv_source_from_file(in_file_name, mvSource);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource.\n", err2);
+ mvSource = NULL;
+ free(in_file_name);
+ break;
+ }
+ free(in_file_name);
+
+ struct timespec s_tspec;
+ struct timespec e_tspec;
+
+ clock_gettime(CLOCK_MONOTONIC, &s_tspec);
+
+ // Run
+ err = mv_inference_image_classify(mvSource, infer, NULL,
+ _image_classified_cb, NULL);
+
+ clock_gettime(CLOCK_MONOTONIC, &e_tspec);
+
+ struct timespec diffspec = diff(s_tspec, e_tspec);
+ unsigned long timeDiff = gettotalmillisec(diffspec);
+ printf("elased time : %lu(ms)\n", timeDiff);
+
+ } break;
+ case 8: {
+ //perform destroy
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ engine_cfg = NULL;
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ infer = NULL;
+ }
+
+ if (mvSource) {
+ err = mv_destroy_source(mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource [err:%i]\n", err);
+ mvSource = NULL;
+ }
+ } break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt =
+ show_menu("Run Image Classification again?: ", options_last,
+ names_last, ARRAY_SIZE(options_last));
+ switch (sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ engine_cfg = NULL;
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ infer = NULL;
+ }
+
+ if (mvSource) {
+ err = mv_destroy_source(mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource [err:%i]\n", err);
+ mvSource = NULL;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
/*
@@ -1153,1640 +1033,1485 @@ int perform_image_classification()
*/
int perform_tflite_mobilenetv1ssd_config(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
+ int err = MEDIA_VISION_ERROR_NONE;
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
- char *inputNodeName = "normalized_input_image_tensor";
- char *outputNodeName[4] = {"TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3"};
+ char *inputNodeName = "normalized_input_image_tensor";
+ char *outputNodeName[4] = { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" };
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- OD_TFLITE_WEIGHT_PATH);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, OD_TFLITE_WEIGHT_PATH);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_USER_FILE_PATH,
- OD_LABEL_PATH);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_USER_FILE_PATH, OD_LABEL_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 127.5);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.5);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 127.5);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 127.5);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- 0.3);
+ mv_engine_config_set_double_attribute(
+ handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_TFLITE);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_TFLITE);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 300);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 300);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 300);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 300);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 4);
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4);
- *engine_cfg = handle;
- return err;
+ *engine_cfg = handle;
+ return err;
}
int perform_opencv_mobilenetv1ssd_config(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
-
- char *inputNodeName = "data";
- char *outputNodeName[1] = {"detection_out"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- OD_OPENCV_WEIGHT_CAFFE_PATH);
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
- OD_OPENCV_CONFIG_CAFFE_PATH);
-
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_USER_FILE_PATH,
- OD_OPENCV_LABEL_CAFFE_PATH);
-
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 127.5);
-
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 127.5);
-
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- 0.3);
-
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_OPENCV);
-
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
-
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 300);
-
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 300);
-
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
-
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 1);
-
- *engine_cfg = handle;
- return err;
-}
+ int err = MEDIA_VISION_ERROR_NONE;
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
-int perform_armnn_mobilenetv1ssd_config(mv_engine_config_h *engine_cfg)
-{
- int err = MEDIA_VISION_ERROR_NONE;
+ char *inputNodeName = "data";
+ char *outputNodeName[1] = { "detection_out" };
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ OD_OPENCV_WEIGHT_CAFFE_PATH);
+
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+ OD_OPENCV_CONFIG_CAFFE_PATH);
+
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_USER_FILE_PATH,
+ OD_OPENCV_LABEL_CAFFE_PATH);
+
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.5);
+
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 127.5);
+
+ mv_engine_config_set_double_attribute(
+ handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3);
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_OPENCV);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
- char *inputNodeName = "normalized_input_image_tensor";
- char *outputNodeName[4] = {"TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3"};
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 300);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- OD_TFLITE_WEIGHT_PATH);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 300);
mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_USER_FILE_PATH,
- OD_LABEL_PATH);
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 127.5);
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 127.5);
+ *engine_cfg = handle;
+ return err;
+}
+
+int perform_armnn_mobilenetv1ssd_config(mv_engine_config_h *engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- 0.3);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_ARMNN);
+ char *inputNodeName = "normalized_input_image_tensor";
+ char *outputNodeName[4] = { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" };
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, OD_TFLITE_WEIGHT_PATH);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 300);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 300);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_USER_FILE_PATH, OD_LABEL_PATH);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.5);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 127.5);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 4);
+ mv_engine_config_set_double_attribute(
+ handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3);
- *engine_cfg = handle;
- return err;
-}
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_ARMNN);
+
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
+
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 300);
+
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 300);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
+
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4);
+ *engine_cfg = handle;
+ return err;
+}
int perform_object_detection()
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- int sel_opt = 0;
- const int options[7] = {1, 2, 3, 4, 5, 6, 7};
- const char *names[7] = { "Configuration",
- "TFLITE(CPU) + MobileNetV1+SSD",
- "OPENCV(CPU) + MobileNetV1+SSD",
- "ARMNN(CPU) + MobileNetV1+SSD",
- "Prepare",
- "Run",
- "Back"};
-
- mv_engine_config_h engine_cfg = NULL;
- mv_inference_h infer = NULL;
- mv_source_h mvSource = NULL;
-
- while(sel_opt == 0) {
- sel_opt = show_menu("Select Action:", options, names, ARRAY_SIZE(options));
- switch (sel_opt) {
- case 1:
- {
- //perform configuration
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- engine_cfg = NULL;
- }
-
- err = perform_configuration(&engine_cfg);
- }
- break;
- case 2:
- {
- //perform TFlite MobileSSD config
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- engine_cfg = NULL;
- }
-
- err = perform_tflite_mobilenetv1ssd_config(&engine_cfg);
- }
- break;
-
- case 3:
- {
- //perform OpenCV MobileSSD config
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- }
-
- err = perform_opencv_mobilenetv1ssd_config(&engine_cfg);
- }
- break;
-
- case 4:
- {
- //perform ARMNN MobileSSD config
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- }
-
- err = perform_armnn_mobilenetv1ssd_config(&engine_cfg);
- }
- break;
- case 5:
- {
- // create - configure - prepare
- if (infer) {
- int err2 = mv_inference_destroy(infer);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err2);
- infer = NULL;
- }
-
- // inference
- // create handle
- err = mv_inference_create(&infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create inference handle [err:%i]\n", err);
- break;
- }
-
- //configure
- err = mv_inference_configure(infer, engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to configure inference handle [err:%i]\n", err);
- break;
- }
-
- //prepare
- err = mv_inference_prepare(infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to prepare inference handle");
- break;
- }
- }
- break;
- case 6:
- {
- if (mvSource) {
- int err2 = mv_destroy_source(mvSource);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource\n");
- mvSource = NULL;
- }
-
- char *in_file_name = NULL;
- /* Load media source */
- while (input_string("Input file name to be inferred:", 1024, &(in_file_name)) == -1)
- printf("Incorrect input! Try again.\n");
-
- err = mv_create_source(&mvSource);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create mvSource.\n");
- free(in_file_name);
- break;
- }
-
- err = load_mv_source_from_file(in_file_name, mvSource);
- if (err != MEDIA_VISION_ERROR_NONE) {
- int err2 = mv_destroy_source(mvSource);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource.\n");
- mvSource = NULL;
- free(in_file_name);
- break;
- }
- free(in_file_name);
-
- struct timespec s_tspec;
- struct timespec e_tspec;
-
- clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
- // Object Detect
- err = mv_inference_object_detect(mvSource, infer, _object_detected_cb, NULL);
-
- clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-
- struct timespec diffspec = diff(s_tspec, e_tspec);
- unsigned long timeDiff = gettotalmillisec(diffspec);
- printf("elased time : %lu(ms)\n", timeDiff);
-
- }
- break;
- case 7:
- {
- //perform destroy
- if (engine_cfg) {
- err = mv_destroy_engine_config(engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err);
- engine_cfg = NULL;
- }
-
- if (infer) {
- err = mv_inference_destroy(infer);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err);
- infer = NULL;
- }
-
- if (mvSource) {
- err = mv_destroy_source(mvSource);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource.\n");
- mvSource = NULL;
- }
- }
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- continue;
- }
-
- int do_another = 0;
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("ERROR: Action is finished with error code: %i\n");
- }
-
- sel_opt = 0;
- const int options_last[2] = {1, 2};
- const char *names_last[2] = { "Yes", "No" };
-
- while (sel_opt == 0) {
- sel_opt = show_menu("Run Object Detection again?:", options_last, names_last, ARRAY_SIZE(options_last));
- switch(sel_opt) {
- case 1:
- do_another = 1;
- break;
- case 2:
- do_another = 0;
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- }
- }
-
- sel_opt = (do_another == 1) ? 0 : 1;
- }
-
- if (engine_cfg) {
- err = mv_destroy_engine_config(engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err);
- engine_cfg = NULL;
- }
-
- if (infer) {
- err = mv_inference_destroy(infer);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err);
- infer = NULL;
- }
-
- if (mvSource) {
- err = mv_destroy_source(mvSource);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource\n");
- mvSource = NULL;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[7] = { 1, 2, 3, 4, 5, 6, 7 };
+ const char *names[7] = { "Configuration",
+ "TFLITE(CPU) + MobileNetV1+SSD",
+ "OPENCV(CPU) + MobileNetV1+SSD",
+ "ARMNN(CPU) + MobileNetV1+SSD",
+ "Prepare",
+ "Run",
+ "Back" };
+
+ mv_engine_config_h engine_cfg = NULL;
+ mv_inference_h infer = NULL;
+ mv_source_h mvSource = NULL;
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names,
+ ARRAY_SIZE(options));
+ switch (sel_opt) {
+ case 1: {
+ //perform configuration
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ engine_cfg = NULL;
+ }
+
+ err = perform_configuration(&engine_cfg);
+ } break;
+ case 2: {
+ //perform TFlite MobileSSD config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ engine_cfg = NULL;
+ }
+
+ err = perform_tflite_mobilenetv1ssd_config(&engine_cfg);
+ } break;
+
+ case 3: {
+ //perform OpenCV MobileSSD config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_opencv_mobilenetv1ssd_config(&engine_cfg);
+ } break;
+
+ case 4: {
+ //perform ARMNN MobileSSD config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_armnn_mobilenetv1ssd_config(&engine_cfg);
+ } break;
+ case 5: {
+ // create - configure - prepare
+ if (infer) {
+ int err2 = mv_inference_destroy(infer);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err2);
+ infer = NULL;
+ }
+
+ // inference
+ // create handle
+ err = mv_inference_create(&infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //configure
+ err = mv_inference_configure(infer, engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to configure inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //prepare
+ err = mv_inference_prepare(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to prepare inference handle");
+ break;
+ }
+ } break;
+ case 6: {
+ if (mvSource) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource\n");
+ mvSource = NULL;
+ }
+
+ char *in_file_name = NULL;
+ /* Load media source */
+ while (input_string("Input file name to be inferred:", 1024,
+ &(in_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ err = mv_create_source(&mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create mvSource.\n");
+ free(in_file_name);
+ break;
+ }
+
+ err = load_mv_source_from_file(in_file_name, mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource.\n");
+ mvSource = NULL;
+ free(in_file_name);
+ break;
+ }
+ free(in_file_name);
+
+ struct timespec s_tspec;
+ struct timespec e_tspec;
+
+ clock_gettime(CLOCK_MONOTONIC, &s_tspec);
+
+ // Object Detect
+ err = mv_inference_object_detect(mvSource, infer,
+ _object_detected_cb, NULL);
+
+ clock_gettime(CLOCK_MONOTONIC, &e_tspec);
+
+ struct timespec diffspec = diff(s_tspec, e_tspec);
+ unsigned long timeDiff = gettotalmillisec(diffspec);
+ printf("elased time : %lu(ms)\n", timeDiff);
+
+ } break;
+ case 7: {
+ //perform destroy
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ engine_cfg = NULL;
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ infer = NULL;
+ }
+
+ if (mvSource) {
+ err = mv_destroy_source(mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource.\n");
+ mvSource = NULL;
+ }
+ } break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Run Object Detection again?:", options_last,
+ names_last, ARRAY_SIZE(options_last));
+ switch (sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ engine_cfg = NULL;
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ infer = NULL;
+ }
+
+ if (mvSource) {
+ err = mv_destroy_source(mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource\n");
+ mvSource = NULL;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int perform_tflite_mobilenetv1ssd_face(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- char *inputNodeName = "normalized_input_image_tensor";
- char *outputNodeName[4] = {"TFLite_Detection_PostProcess",
- "TFLite_Detection_PostProcess:1",
- "TFLite_Detection_PostProcess:2",
- "TFLite_Detection_PostProcess:3"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- FD_TFLITE_WEIGHT_PATH);
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ char *inputNodeName = "normalized_input_image_tensor";
+ char *outputNodeName[4] = { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" };
+
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, FD_TFLITE_WEIGHT_PATH);
+
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 127.5);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.5);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 127.5);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 127.5);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- 0.3);
+ mv_engine_config_set_double_attribute(
+ handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_TFLITE);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_TFLITE);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 300);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 300);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 300);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 300);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 4);
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4);
- *engine_cfg = handle;
- return err;
+ *engine_cfg = handle;
+ return err;
}
int perform_opencv_resnet10ssd_face(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- char *inputNodeName = "data";
- char *outputNodeName[1] = {"detection_out"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- FD_OPENCV_WEIGHT_CAFFE_PATH);
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ char *inputNodeName = "data";
+ char *outputNodeName[1] = { "detection_out" };
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
- FD_OPENCV_CONFIG_CAFFE_PATH);
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ FD_OPENCV_WEIGHT_CAFFE_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 135.7);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 1.0);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+ FD_OPENCV_CONFIG_CAFFE_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- 0.3);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 135.7);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_OPENCV);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 1.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_double_attribute(
+ handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 300);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_OPENCV);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 300);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 300);
+
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 300);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 1);
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1);
- *engine_cfg = handle;
- return err;
+ *engine_cfg = handle;
+ return err;
}
int perform_armnn_mobilenetv1ssd_face(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- char *inputNodeName = "normalized_input_image_tensor";
- char *outputNodeName[4] = {"TFLite_Detection_PostProcess",
- "TFLite_Detection_PostProcess:1",
- "TFLite_Detection_PostProcess:2",
- "TFLite_Detection_PostProcess:3"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- FD_TFLITE_WEIGHT_PATH);
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ char *inputNodeName = "normalized_input_image_tensor";
+ char *outputNodeName[4] = { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" };
+
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, FD_TFLITE_WEIGHT_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 127.5);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 127.5);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.5);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_CONFIDENCE_THRESHOLD,
- 0.3);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 127.5);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_ARMNN);
+ mv_engine_config_set_double_attribute(
+ handle, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_ARMNN);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 300);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 300);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 300);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 300);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
+
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 4);
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 4);
- *engine_cfg = handle;
- return err;
+ *engine_cfg = handle;
+ return err;
}
int perform_face_detection()
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- int sel_opt = 0;
- const int options[7] = {1, 2, 3, 4, 5, 6, 7};
- const char *names[7] = { "Configuration",
- "TFLite(CPU) + MobileNetV1 + SSD",
- "OPENCV(CPU) + Resnet10 + SSD",
- "ARMNN(CPU) + MobileNetV1 + SSD",
- "Prepare",
- "Run",
- "Back"};
-
- mv_engine_config_h engine_cfg = NULL;
- mv_inference_h infer = NULL;
- mv_source_h mvSource = NULL;
-
- while(sel_opt == 0) {
- sel_opt = show_menu("Select Action:", options, names, ARRAY_SIZE(options));
- switch (sel_opt) {
- case 1:
- {
- //perform configuration
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- engine_cfg = NULL;
- }
-
- err = perform_configuration(&engine_cfg);
- }
- break;
- case 2:
- {
- //perform TF Mobilenetssd config
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- engine_cfg = NULL;
- }
-
- err = perform_tflite_mobilenetv1ssd_face(&engine_cfg);
- }
- break;
- case 3:
- {
- //perform TF Lite Mobilenetssd config
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- }
-
- err = perform_opencv_resnet10ssd_face(&engine_cfg);
- }
- break;
- case 4:
- {
- //perform TF Lite Mobilenetssd config
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- }
-
- err = perform_armnn_mobilenetv1ssd_face(&engine_cfg);
- }
- break;
- case 5:
- {
- // create - configure - prepare
- if (infer) {
- int err2 = mv_inference_destroy(infer);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err2);
- infer = NULL;
- }
-
- // inference
- // create handle
- err = mv_inference_create(&infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create inference handle [err:%i]\n", err);
- break;
- }
-
- //configure
- err = mv_inference_configure(infer, engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to configure inference handle [err:%i]\n", err);
- break;
- }
-
- //prepare
- err = mv_inference_prepare(infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to prepare inference handle");
- break;
- }
- }
- break;
- case 6:
- {
- if (mvSource) {
- int err2 = mv_destroy_source(mvSource);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource\n");
- mvSource = NULL;
- }
-
- char *in_file_name = NULL;
- /* Load media source */
- while (input_string("Input file name to be inferred:", 1024, &(in_file_name)) == -1)
- printf("Incorrect input! Try again.\n");
-
- err = mv_create_source(&mvSource);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create mvSource.\n");
- free(in_file_name);
- break;
- }
-
- err = load_mv_source_from_file(in_file_name, mvSource);
- if (err != MEDIA_VISION_ERROR_NONE) {
- int err2 = mv_destroy_source(mvSource);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource.\n", err2);
- mvSource = NULL;
- free(in_file_name);
- break;
- }
- free(in_file_name);
-
- struct timespec s_tspec;
- struct timespec e_tspec;
-
- clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
- // Object Detect
- err = mv_inference_face_detect(mvSource, infer, _face_detected_cb, NULL);
-
- clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-
- struct timespec diffspec = diff(s_tspec, e_tspec);
- unsigned long timeDiff = gettotalmillisec(diffspec);
- printf("elased time : %lu(ms)\n", timeDiff);
- }
- break;
- case 7:
- {
- //perform destroy
- if (engine_cfg) {
- err = mv_destroy_engine_config(engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err);
- engine_cfg = NULL;
- }
-
- if (infer) {
- err = mv_inference_destroy(infer);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err);
- infer = NULL;
- }
-
- if (mvSource) {
- err = mv_destroy_source(mvSource);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource [err:%i]\n", err);
- mvSource = NULL;
- }
- }
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- continue;
- }
-
- int do_another = 0;
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("ERROR: Action is finished with error code: %i\n");
- }
-
- sel_opt = 0;
- const int options_last[2] = {1, 2};
- const char *names_last[2] = { "Yes", "No" };
-
- while (sel_opt == 0) {
- sel_opt = show_menu("Run Face Detection again?:", options_last, names_last, ARRAY_SIZE(options_last));
- switch(sel_opt) {
- case 1:
- do_another = 1;
- break;
- case 2:
- do_another = 0;
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- }
- }
-
- sel_opt = (do_another == 1) ? 0 : 1;
- }
-
- if (engine_cfg) {
- err = mv_destroy_engine_config(engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err);
- engine_cfg = NULL;
- }
-
- if (infer) {
- err = mv_inference_destroy(infer);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err);
- infer = NULL;
- }
-
- if (mvSource) {
- err = mv_destroy_source(mvSource);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource [err:%i]\n", err);
- mvSource = NULL;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[7] = { 1, 2, 3, 4, 5, 6, 7 };
+ const char *names[7] = { "Configuration",
+ "TFLite(CPU) + MobileNetV1 + SSD",
+ "OPENCV(CPU) + Resnet10 + SSD",
+ "ARMNN(CPU) + MobileNetV1 + SSD",
+ "Prepare",
+ "Run",
+ "Back" };
+
+ mv_engine_config_h engine_cfg = NULL;
+ mv_inference_h infer = NULL;
+ mv_source_h mvSource = NULL;
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names,
+ ARRAY_SIZE(options));
+ switch (sel_opt) {
+ case 1: {
+ //perform configuration
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ engine_cfg = NULL;
+ }
+
+ err = perform_configuration(&engine_cfg);
+ } break;
+ case 2: {
+ //perform TF Mobilenetssd config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ engine_cfg = NULL;
+ }
+
+ err = perform_tflite_mobilenetv1ssd_face(&engine_cfg);
+ } break;
+ case 3: {
+ //perform TF Lite Mobilenetssd config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_opencv_resnet10ssd_face(&engine_cfg);
+ } break;
+ case 4: {
+ //perform TF Lite Mobilenetssd config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_armnn_mobilenetv1ssd_face(&engine_cfg);
+ } break;
+ case 5: {
+ // create - configure - prepare
+ if (infer) {
+ int err2 = mv_inference_destroy(infer);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err2);
+ infer = NULL;
+ }
+
+ // inference
+ // create handle
+ err = mv_inference_create(&infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //configure
+ err = mv_inference_configure(infer, engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to configure inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //prepare
+ err = mv_inference_prepare(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to prepare inference handle");
+ break;
+ }
+ } break;
+ case 6: {
+ if (mvSource) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource\n");
+ mvSource = NULL;
+ }
+
+ char *in_file_name = NULL;
+ /* Load media source */
+ while (input_string("Input file name to be inferred:", 1024,
+ &(in_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ err = mv_create_source(&mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create mvSource.\n");
+ free(in_file_name);
+ break;
+ }
+
+ err = load_mv_source_from_file(in_file_name, mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource.\n", err2);
+ mvSource = NULL;
+ free(in_file_name);
+ break;
+ }
+ free(in_file_name);
+
+ struct timespec s_tspec;
+ struct timespec e_tspec;
+
+ clock_gettime(CLOCK_MONOTONIC, &s_tspec);
+
+ // Object Detect
+ err = mv_inference_face_detect(mvSource, infer, _face_detected_cb,
+ NULL);
+
+ clock_gettime(CLOCK_MONOTONIC, &e_tspec);
+
+ struct timespec diffspec = diff(s_tspec, e_tspec);
+ unsigned long timeDiff = gettotalmillisec(diffspec);
+ printf("elased time : %lu(ms)\n", timeDiff);
+ } break;
+ case 7: {
+ //perform destroy
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ engine_cfg = NULL;
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ infer = NULL;
+ }
+
+ if (mvSource) {
+ err = mv_destroy_source(mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource [err:%i]\n", err);
+ mvSource = NULL;
+ }
+ } break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Run Face Detection again?:", options_last,
+ names_last, ARRAY_SIZE(options_last));
+ switch (sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ engine_cfg = NULL;
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ infer = NULL;
+ }
+
+ if (mvSource) {
+ err = mv_destroy_source(mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource [err:%i]\n", err);
+ mvSource = NULL;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int perform_tflite_TweakCNN(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- char *inputNodeName = "INPUT_TENSOR_NAME";
- char *outputNodeName[1] = {"OUTPUT_TENSOR_NAME"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- FLD_TFLITE_WEIGHT_PATH);
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ char *inputNodeName = "INPUT_TENSOR_NAME";
+ char *outputNodeName[1] = { "OUTPUT_TENSOR_NAME" };
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ FLD_TFLITE_WEIGHT_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 0.0);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 1.0);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 0.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_TFLITE);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 1.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_TFLITE);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 128);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 128);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 128);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 128);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
+
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 1);
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1);
- *engine_cfg = handle;
- return err;
+ *engine_cfg = handle;
+ return err;
}
int perform_opencv_cnncascade(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- char *inputNodeName = "data";
- char *outputNodeName[1] = {"Sigmoid_fc2"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- FLD_OPENCV_WEIGHT_CAFFE_PATH);
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
- FLD_OPENCV_CONFIG_CAFFE_PATH);
+ char *inputNodeName = "data";
+ char *outputNodeName[1] = { "Sigmoid_fc2" };
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 127.5);
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ FLD_OPENCV_WEIGHT_CAFFE_PATH);
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 127.5);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_OPENCV);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+ FLD_OPENCV_CONFIG_CAFFE_PATH);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.5);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 128);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 127.5);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 128);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_OPENCV);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 128);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 1);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 128);
- *engine_cfg = handle;
- return err;
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
+
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1);
+
+ *engine_cfg = handle;
+ return err;
}
int perform_facial_landmark_detection()
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- int sel_opt = 0;
- const int options[6] = {1, 2, 3, 4, 5, 6};
- const *names[6] = { "Configuration",
- "Tflite(CPU) + TweakCNN",
- "OPENCV(CPU) + TweakCNN",
- "Prepare",
- "Run",
- "Back"};
-
- mv_engine_config_h engine_cfg = NULL;
- mv_inference_h infer = NULL;
- mv_source_h mvSource = NULL;
-
- while(sel_opt == 0) {
- sel_opt = show_menu("Select Action:", options, names, ARRAY_SIZE(options));
- switch (sel_opt) {
- case 1:
- {
- //perform configuration
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- engine_cfg = NULL;
- }
-
- err = perform_configuration(&engine_cfg);
- }
- break;
- case 2:
- {
- //perform SRID TweakCNN config
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- engine_cfg = NULL;
- }
- err = perform_tflite_TweakCNN(&engine_cfg);
- }
- break;
- case 3:
- {
- //perform CNN cascade
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- }
- err = perform_opencv_cnncascade(&engine_cfg);
- }
- break;
- case 4:
- {
- // create - configure - prepare
- if (infer) {
- int err2 = mv_inference_destroy(infer);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err2);
- infer = NULL;
- }
-
- // inference
- // create handle
- err = mv_inference_create(&infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create inference handle [err:%i]\n", err);
- break;
- }
-
- //configure
- err = mv_inference_configure(infer, engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to configure inference handle [err:%i]\n", err);
- break;
- }
-
- //prepare
- err = mv_inference_prepare(infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to prepare inference handle");
- break;
- }
- }
- break;
- case 5:
- {
- if (mvSource) {
- int err2 = mv_destroy_source(mvSource);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource\n");
- mvSource = NULL;
- }
-
- char *in_file_name = NULL;
- /* Load media source */
- while (input_string("Input file name to be inferred:", 1024, &(in_file_name)) == -1)
- printf("Incorrect input! Try again.\n");
-
- err = mv_create_source(&mvSource);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create mvSource.\n");
- free(in_file_name);
- break;
- }
-
- err = load_mv_source_from_file(in_file_name, mvSource);
- if (err != MEDIA_VISION_ERROR_NONE) {
- int err2 = mv_destroy_source(mvSource);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource.\n", err2);
- mvSource = NULL;
- free(in_file_name);
- break;
- }
- free(in_file_name);
-
- struct timespec s_tspec;
- struct timespec e_tspec;
-
- clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
- // Object Detect
- err = mv_inference_facial_landmark_detect(mvSource, infer, NULL, _facial_landmark_detected_cb, NULL);
-
- clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-
- struct timespec diffspec = diff(s_tspec, e_tspec);
- unsigned long timeDiff = gettotalmillisec(diffspec);
- printf("elased time : %lu(ms)\n", timeDiff);
- }
- break;
- case 6:
- {
- //perform destroy
- if (engine_cfg) {
- err = mv_destroy_engine_config(engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err);
- engine_cfg = NULL;
- }
-
- if (infer) {
- err = mv_inference_destroy(infer);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err);
- infer = NULL;
- }
-
- if (mvSource) {
- err = mv_destroy_source(mvSource);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource [err:%i]\n", err);
- mvSource = NULL;
- }
- }
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- continue;
- }
-
- int do_another = 0;
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("ERROR: Action is finished with error code: %i\n");
- }
-
- sel_opt = 0;
- const int options_last[2] = {1, 2};
- const char *names_last[2] = { "Yes", "No" };
-
- while (sel_opt == 0) {
- sel_opt = show_menu("Run Facial Landmark Detection again?:", options_last, names_last, ARRAY_SIZE(options_last));
- switch(sel_opt) {
- case 1:
- do_another = 1;
- break;
- case 2:
- do_another = 0;
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- }
- }
-
- sel_opt = (do_another == 1) ? 0 : 1;
- }
-
- if (engine_cfg) {
- err = mv_destroy_engine_config(engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err);
- engine_cfg = NULL;
- }
-
- if (infer) {
- err = mv_inference_destroy(infer);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy inference handle [err:%i]\n", err);
- infer = NULL;
- }
-
- if (mvSource) {
- err = mv_destroy_source(mvSource);
- if (err != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy mvSource [err:%i]\n", err);
- mvSource = NULL;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[6] = { 1, 2, 3, 4, 5, 6 };
+ const *names[6] = { "Configuration",
+ "Tflite(CPU) + TweakCNN",
+ "OPENCV(CPU) + TweakCNN",
+ "Prepare",
+ "Run",
+ "Back" };
+
+ mv_engine_config_h engine_cfg = NULL;
+ mv_inference_h infer = NULL;
+ mv_source_h mvSource = NULL;
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names,
+ ARRAY_SIZE(options));
+ switch (sel_opt) {
+ case 1: {
+ //perform configuration
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ engine_cfg = NULL;
+ }
+
+ err = perform_configuration(&engine_cfg);
+ } break;
+ case 2: {
+ //perform SRID TweakCNN config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ engine_cfg = NULL;
+ }
+ err = perform_tflite_TweakCNN(&engine_cfg);
+ } break;
+ case 3: {
+ //perform CNN cascade
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+ err = perform_opencv_cnncascade(&engine_cfg);
+ } break;
+ case 4: {
+ // create - configure - prepare
+ if (infer) {
+ int err2 = mv_inference_destroy(infer);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err2);
+ infer = NULL;
+ }
+
+ // inference
+ // create handle
+ err = mv_inference_create(&infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //configure
+ err = mv_inference_configure(infer, engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to configure inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //prepare
+ err = mv_inference_prepare(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to prepare inference handle");
+ break;
+ }
+ } break;
+ case 5: {
+ if (mvSource) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource\n");
+ mvSource = NULL;
+ }
+
+ char *in_file_name = NULL;
+ /* Load media source */
+ while (input_string("Input file name to be inferred:", 1024,
+ &(in_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ err = mv_create_source(&mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create mvSource.\n");
+ free(in_file_name);
+ break;
+ }
+
+ err = load_mv_source_from_file(in_file_name, mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource.\n", err2);
+ mvSource = NULL;
+ free(in_file_name);
+ break;
+ }
+ free(in_file_name);
+
+ struct timespec s_tspec;
+ struct timespec e_tspec;
+
+ clock_gettime(CLOCK_MONOTONIC, &s_tspec);
+
+ // Object Detect
+ err = mv_inference_facial_landmark_detect(
+ mvSource, infer, NULL, _facial_landmark_detected_cb, NULL);
+
+ clock_gettime(CLOCK_MONOTONIC, &e_tspec);
+
+ struct timespec diffspec = diff(s_tspec, e_tspec);
+ unsigned long timeDiff = gettotalmillisec(diffspec);
+ printf("elased time : %lu(ms)\n", timeDiff);
+ } break;
+ case 6: {
+ //perform destroy
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ engine_cfg = NULL;
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ infer = NULL;
+ }
+
+ if (mvSource) {
+ err = mv_destroy_source(mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource [err:%i]\n", err);
+ mvSource = NULL;
+ }
+ } break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu(
+ "Run Facial Landmark Detection again?:", options_last,
+ names_last, ARRAY_SIZE(options_last));
+ switch (sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ engine_cfg = NULL;
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ infer = NULL;
+ }
+
+ if (mvSource) {
+ err = mv_destroy_source(mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource [err:%i]\n", err);
+ mvSource = NULL;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int perform_armnn_pose_estimation_detection(mv_engine_config_h *engine_cfg)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_engine_config_h handle = NULL;
- err = mv_create_engine_config(&handle);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create engine configuration handle.\n");
- if (handle) {
- int err2 = mv_destroy_engine_config(handle);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine cofniguration.\n");
- }
- }
- return err;
- }
-
- char *inputNodeName = "image";
- char *outputNodeName[1] = {"Convolutional_Pose_Machine/stage_5_out"};
-
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
- PE_TFLITE_WEIGHT_PATH);
+ int err = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_DATA_TYPE,
- MV_INFERENCE_DATA_FLOAT32);
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_MEAN_VALUE,
- 0.0);
+ char *inputNodeName = "image";
+ char *outputNodeName[1] = { "Convolutional_Pose_Machine/stage_5_out" };
- mv_engine_config_set_double_attribute(handle,
- MV_INFERENCE_MODEL_STD_VALUE,
- 1.0);
+ mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, PE_TFLITE_WEIGHT_PATH);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_BACKEND_TYPE,
- MV_INFERENCE_BACKEND_ARMNN);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_TARGET_TYPE,
- MV_INFERENCE_TARGET_CPU);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_MEAN_VALUE,
+ 0.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_WIDTH,
- 192);
+ mv_engine_config_set_double_attribute(handle, MV_INFERENCE_MODEL_STD_VALUE,
+ 1.0);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_HEIGHT,
- 192);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_ARMNN);
- mv_engine_config_set_int_attribute(handle,
- MV_INFERENCE_INPUT_TENSOR_CHANNELS,
- 3);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
- mv_engine_config_set_string_attribute(handle,
- MV_INFERENCE_INPUT_NODE_NAME,
- inputNodeName);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 192);
- mv_engine_config_set_array_string_attribute(handle,
- MV_INFERENCE_OUTPUT_NODE_NAMES,
- outputNodeName,
- 1);
+ mv_engine_config_set_int_attribute(handle, MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 192);
- *engine_cfg = handle;
- return err;
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3);
+
+ mv_engine_config_set_string_attribute(handle, MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(
+ handle, MV_INFERENCE_OUTPUT_NODE_NAMES, outputNodeName, 1);
+
+ *engine_cfg = handle;
+ return err;
}
int perform_pose_estimation_detection()
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- int sel_opt = 0;
- const int options[5] = {1, 2, 3, 4, 5};
- const *names[5] = { "Configuration",
- "ARMNN(CPU) + PoseEstimation",
- "Prepare",
- "Run",
- "Back"};
-
- mv_engine_config_h engine_cfg = NULL;
- mv_inference_h infer = NULL;
- mv_source_h mvSource = NULL;
-
- while(sel_opt == 0) {
- sel_opt = show_menu("Select Action:", options, names, ARRAY_SIZE(options));
- switch (sel_opt) {
- case 1:
- {
- //perform configuration
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- }
-
- err = perform_configuration(&engine_cfg);
- }
- break;
- case 2:
- {
- //perform pose estimation config
- if (engine_cfg) {
- int err2 = mv_destroy_engine_config(engine_cfg);
- if (err2 != MEDIA_VISION_ERROR_NONE)
- printf("Fail to destroy engine_cfg [err:%i]\n", err2);
- }
- err = perform_armnn_pose_estimation_detection(&engine_cfg);
- }
- break;
- case 3:
- {
- // create - configure - prepare
- if (infer) {
- int err2 = mv_inference_destroy(infer);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy inference handle [err:%i]\n", err2);
- }
- }
-
- // inference
- // create handle
- err = mv_inference_create(&infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create inference handle [err:%i]\n", err);
- break;
- }
-
- //configure
- err = mv_inference_configure(infer, engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to configure inference handle [err:%i]\n", err);
- break;
- }
-
- //prepare
- err = mv_inference_prepare(infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to prepare inference handle");
- break;
- }
- }
- break;
- case 4:
- {
- if (mvSource) {
- int err2 = mv_destroy_source(mvSource);
- if (err2 != MEDIA_VISION_ERROR_NONE);
- printf("Fail to destroy mvSource\n");
- }
-
- char *in_file_name = NULL;
- /* Load media source */
- while (input_string("Input file name to be inferred:", 1024, &(in_file_name)) == -1)
- printf("Incorrect input! Try again.\n");
-
- err = mv_create_source(&mvSource);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to create mvSource.\n");
- free(in_file_name);
- break;
- }
-
- err = load_mv_source_from_file(in_file_name, mvSource);
- if (err != MEDIA_VISION_ERROR_NONE) {
- int err2 = mv_destroy_source(mvSource);
- if (err2 != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy mvSource.\n", err2);
- }
- free(in_file_name);
- break;
- }
- free(in_file_name);
-
- struct timespec s_tspec;
- struct timespec e_tspec;
-
- clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
- // Pose estimation
- err = mv_inference_pose_estimation_detect(mvSource, infer, NULL, _pose_estimation_detected_cb, NULL);
-
- clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-
- struct timespec diffspec = diff(s_tspec, e_tspec);
- unsigned long timeDiff = gettotalmillisec(diffspec);
- printf("elased time : %lu(ms)\n", timeDiff);
-
- break;
- }
- case 5:
- {
- //perform destroy
- if (engine_cfg) {
- err = mv_destroy_engine_config(engine_cfg);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy engine_cfg [err:%i]\n", err);
- }
- }
-
- if (infer) {
- err = mv_inference_destroy(infer);
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to destroy inference handle [err:%i]\n", err);
- }
- }
- }
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- continue;
- }
-
- int do_another = 0;
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("ERROR: Action is finished with error code: %i\n");
- }
-
- sel_opt = 0;
- const int options_last[2] = {1, 2};
- const char *names_last[2] = { "Yes", "No" };
-
- while (sel_opt == 0) {
- sel_opt = show_menu("Run Pose Estimation Detection again?:", options_last, names_last, ARRAY_SIZE(options_last));
- switch(sel_opt) {
- case 1:
- do_another = 1;
- break;
- case 2:
- do_another = 0;
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- }
- }
-
- sel_opt = (do_another == 1) ? 0 : 1;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[5] = { 1, 2, 3, 4, 5 };
+ const *names[5] = { "Configuration", "ARMNN(CPU) + PoseEstimation",
+ "Prepare", "Run", "Back" };
+
+ mv_engine_config_h engine_cfg = NULL;
+ mv_inference_h infer = NULL;
+ mv_source_h mvSource = NULL;
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names,
+ ARRAY_SIZE(options));
+ switch (sel_opt) {
+ case 1: {
+ //perform configuration
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_configuration(&engine_cfg);
+ } break;
+ case 2: {
+ //perform pose estimation config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+ err = perform_armnn_pose_estimation_detection(&engine_cfg);
+ } break;
+ case 3: {
+ // create - configure - prepare
+ if (infer) {
+ int err2 = mv_inference_destroy(infer);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy inference handle [err:%i]\n", err2);
+ }
+ }
+
+ // inference
+ // create handle
+ err = mv_inference_create(&infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //configure
+ err = mv_inference_configure(infer, engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to configure inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //prepare
+ err = mv_inference_prepare(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to prepare inference handle");
+ break;
+ }
+ } break;
+ case 4: {
+ if (mvSource) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ ;
+ printf("Fail to destroy mvSource\n");
+ }
+
+ char *in_file_name = NULL;
+ /* Load media source */
+ while (input_string("Input file name to be inferred:", 1024,
+ &(in_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ err = mv_create_source(&mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create mvSource.\n");
+ free(in_file_name);
+ break;
+ }
+
+ err = load_mv_source_from_file(in_file_name, mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy mvSource.\n", err2);
+ }
+ free(in_file_name);
+ break;
+ }
+ free(in_file_name);
+
+ struct timespec s_tspec;
+ struct timespec e_tspec;
+
+ clock_gettime(CLOCK_MONOTONIC, &s_tspec);
+
+ // Pose estimation
+ err = mv_inference_pose_estimation_detect(
+ mvSource, infer, NULL, _pose_estimation_detected_cb, NULL);
+
+ clock_gettime(CLOCK_MONOTONIC, &e_tspec);
+
+ struct timespec diffspec = diff(s_tspec, e_tspec);
+ unsigned long timeDiff = gettotalmillisec(diffspec);
+ printf("elased time : %lu(ms)\n", timeDiff);
+
+ break;
+ }
+ case 5: {
+ //perform destroy
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ }
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ }
+ }
+ } break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu(
+ "Run Pose Estimation Detection again?:", options_last,
+ names_last, ARRAY_SIZE(options_last));
+ switch (sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int main()
{
- int sel_opt = 0;
-
- const int options[6] = {1, 2, 3, 4, 5, 6};
- const char *names[6] = { "Image Classification",
- "Object Detection",
- "Face Detection",
- "Facial LandmarkDetection",
- "Pose Estimation",
- "Exit"};
-
- int err = MEDIA_VISION_ERROR_NONE;
- while (sel_opt == 0) {
- sel_opt = show_menu("Select Action:", options, names, ARRAY_SIZE(options));
- switch (sel_opt) {
- case 1:
- {
- err = perform_image_classification();
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to perform image classification\n");
- }
- }
- break;
- case 2:
- {
- err = perform_object_detection();
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to perform object detection\n");
- }
- }
- break;
- case 3:
- {
- err = perform_face_detection();
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to perform face detection\n");
- }
- }
- break;
- case 4:
- {
- err = perform_facial_landmark_detection();
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to perform facial landmark detection");
- }
- break;
- }
- case 5:
- {
- err = perform_pose_estimation_detection();
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("Fail to perform pose estimation");
- }
- break;
- }
- case 6:
- {
- printf("Exit");
- }
- break;
- default:
- printf("Invalid option");
- sel_opt = 0;
- continue;
- }
-
- int do_another = 0;
- if (err != MEDIA_VISION_ERROR_NONE) {
- printf("ERROR: Action is finished with error code: %i\n");
- }
-
- sel_opt = 0;
- const int options_last[2] = { 1, 2 };
- const char *names_last[2] = { "Yes", "No" };
-
- while (sel_opt == 0) {
- sel_opt = show_menu("Another action?: ", options_last, names_last, 2);
- switch (sel_opt) {
- case 1:
- do_another = 1;
- break;
- case 2:
- do_another = 0;
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- }
- }
-
- sel_opt = (do_another == 1) ? 0 : 1;
- }
-
- return 0;
+ int sel_opt = 0;
+
+ const int options[6] = { 1, 2, 3, 4, 5, 6 };
+ const char *names[6] = { "Image Classification", "Object Detection",
+ "Face Detection", "Facial LandmarkDetection",
+ "Pose Estimation", "Exit" };
+
+ int err = MEDIA_VISION_ERROR_NONE;
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names,
+ ARRAY_SIZE(options));
+ switch (sel_opt) {
+ case 1: {
+ err = perform_image_classification();
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to perform image classification\n");
+ }
+ } break;
+ case 2: {
+ err = perform_object_detection();
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to perform object detection\n");
+ }
+ } break;
+ case 3: {
+ err = perform_face_detection();
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to perform face detection\n");
+ }
+ } break;
+ case 4: {
+ err = perform_facial_landmark_detection();
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to perform facial landmark detection");
+ }
+ break;
+ }
+ case 5: {
+ err = perform_pose_estimation_detection();
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to perform pose estimation");
+ }
+ break;
+ }
+ case 6: {
+ printf("Exit");
+ } break;
+ default:
+ printf("Invalid option");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt =
+ show_menu("Another action?: ", options_last, names_last, 2);
+ switch (sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+
+ return 0;
}