summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2020-03-10 15:50:33 +0900
committerInki Dae <inki.dae@samsung.com>2020-04-14 09:42:19 +0900
commit49e2a24f07a06a5148b58608228f6e5557c51a79 (patch)
tree93f4e811ced3c452cd98bf4e33fc6bfbc587d041
parentc45b075e46f0b68dd77710d93c4f8ab87a5b584e (diff)
downloadmediavision-49e2a24f07a06a5148b58608228f6e5557c51a79.tar.gz
mediavision-49e2a24f07a06a5148b58608228f6e5557c51a79.tar.bz2
mediavision-49e2a24f07a06a5148b58608228f6e5557c51a79.zip
mv_inference: Add inference data type support
This patch adds inference input data type support for Mediavision. With this, we can use user-given input tensor data type for OpenCV backend. Change-Id: I681f203f4cbe9b3cc7b59a746cfab650009f65c1 Signed-off-by: Inki Dae <inki.dae@samsung.com>
-rw-r--r--include/mv_inference.h10
-rw-r--r--include/mv_inference_type.h14
-rw-r--r--media-vision-config.json5
-rwxr-xr-xmv_inference/inference/include/Inference.h6
-rwxr-xr-xmv_inference/inference/src/Inference.cpp37
-rwxr-xr-xmv_inference/inference/src/mv_inference_open.cpp10
6 files changed, 78 insertions, 4 deletions
diff --git a/include/mv_inference.h b/include/mv_inference.h
index 4e8ddb17..397944dc 100644
--- a/include/mv_inference.h
+++ b/include/mv_inference.h
@@ -150,6 +150,16 @@ extern "C" {
#define MV_INFERENCE_INPUT_TENSOR_CHANNELS "MV_INFERENCE_INPUT_TENSOR_CHANNELS"
/**
+ * @brief Defines #MV_INFERENCE_INPUT_DATA_TYPE to set data type of input tensor.
+ * @details Data type of input tensor can be changed according to a given weight file.
+ *
+ * @since_tizen 6.0
+ * @see mv_engine_config_set_int_attribute()
+ * @see mv_engine_config_get_int_attribute()
+ */
+#define MV_INFERENCE_INPUT_DATA_TYPE "MV_INFERENCE_INPUT_DATA_TYPE"
+
+/**
* @brief Defines #MV_INFERENCE_INPUT_NODE_NAME to set the input node name.
*
* @since_tizen 5.5
diff --git a/include/mv_inference_type.h b/include/mv_inference_type.h
index 11ca8c2e..f31a900a 100644
--- a/include/mv_inference_type.h
+++ b/include/mv_inference_type.h
@@ -62,6 +62,20 @@ typedef enum {
} mv_inference_target_type_e;
/**
+ * @brief Enumeration for input data type.
+ *
+ * @since_tizen 6.0
+ *
+ */
+typedef enum {
+ MV_INFERENCE_DATA_FLOAT16 = 0,
+ MV_INFERENCE_DATA_FLOAT32,
+ MV_INFERENCE_DATA_UINT8,
+ MV_INFERENCE_DATA_UINT16,
+ MV_INFERENCE_DATA_UINT32
+} mv_inference_data_type_e;
+
+/**
* @brief The inference handle.
*
* @since_tizen 5.5
diff --git a/media-vision-config.json b/media-vision-config.json
index f5dc6be9..01f006b0 100644
--- a/media-vision-config.json
+++ b/media-vision-config.json
@@ -157,6 +157,11 @@
"value" : -1
},
{
+ "name" : "MV_INFERENCE_INPUT_DATA_TYPE",
+ "type" : "integer",
+ "value" : -1
+ },
+ {
"name" : "MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH",
"type" : "string",
"value" : ""
diff --git a/mv_inference/inference/include/Inference.h b/mv_inference/inference/include/Inference.h
index 6cb25f10..03966687 100755
--- a/mv_inference/inference/include/Inference.h
+++ b/mv_inference/inference/include/Inference.h
@@ -91,6 +91,8 @@ struct InferenceConfig {
TensorInfo mTensorInfo; /**< Tensor information */
+ mv_inference_data_type_e mDataType; /**< Data type of a input tensor */
+
mv_inference_backend_type_e mBackedType; /**< Backed type of model files */
int mTargetTypes; /**< Target type to run inference */
@@ -159,6 +161,7 @@ public:
int ch,
double stdValue,
double meanValue,
+ int dataType,
const std::vector<std::string> names);
void ConfigureOutputInfo(std::vector<std::string> names);
@@ -351,7 +354,8 @@ private:
void CheckSupportedInferenceBackend();
int ConvertEngineErrorToVisionError(int error);
int ConvertTargetTypes(int given_types);
- int ConvertDataTypes(int given_type);
+ int ConvertToCv(int given_type);
+ inference_tensor_data_type_e ConvertToIE(int given_type);
int Preprocess(cv::Mat cvImg, cv::Mat cvDst, int data_type);
int PrepareTenosrBuffers(void);
void CleanupTensorBuffers(void);
diff --git a/mv_inference/inference/src/Inference.cpp b/mv_inference/inference/src/Inference.cpp
index b81ea8b3..a650964f 100755
--- a/mv_inference/inference/src/Inference.cpp
+++ b/mv_inference/inference/src/Inference.cpp
@@ -209,7 +209,7 @@ int Inference::ConvertTargetTypes(int given_types)
return target_types;
}
-int Inference::ConvertDataTypes(int given_type)
+int Inference::ConvertToCv(int given_type)
{
int type = 0;
@@ -231,6 +231,34 @@ int Inference::ConvertDataTypes(int given_type)
return type;
}
+inference_tensor_data_type_e Inference::ConvertToIE(int given_type)
+{
+ inference_tensor_data_type_e type = TENSOR_DATA_TYPE_FLOAT32;
+
+ switch (given_type) {
+ case MV_INFERENCE_DATA_FLOAT16:
+ type = TENSOR_DATA_TYPE_FLOAT16;
+ break;
+ case MV_INFERENCE_DATA_FLOAT32:
+ type = TENSOR_DATA_TYPE_FLOAT32;
+ break;
+ case MV_INFERENCE_DATA_UINT8:
+ type = TENSOR_DATA_TYPE_UINT8;
+ break;
+ case MV_INFERENCE_DATA_UINT16:
+ type = TENSOR_DATA_TYPE_UINT16;
+ break;
+ case MV_INFERENCE_DATA_UINT32:
+ type = TENSOR_DATA_TYPE_UINT32;
+ break;
+ default:
+ LOGI("unknown data type so FLOAT32 data type will be used in default");
+ break;
+ }
+
+ return type;
+}
+
int Inference::Preprocess(cv::Mat cvImg, cv::Mat cvDst, int data_type)
{
mSourceSize = cvImg.size();
@@ -328,6 +356,7 @@ void Inference::ConfigureInputInfo(int width,
int ch,
double stdValue,
double meanValue,
+ int dataType,
const std::vector<std::string> names)
{
LOGI("ENTER");
@@ -335,6 +364,7 @@ void Inference::ConfigureInputInfo(int width,
mConfig.mTensorInfo = {width, height, dim, ch};
mConfig.mStdValue = stdValue;
mConfig.mMeanValue = meanValue;
+ mConfig.mDataType = (mv_inference_data_type_e)dataType;
mConfig.mInputLayerNames = names;
inference_engine_layer_property property;
@@ -343,7 +373,8 @@ void Inference::ConfigureInputInfo(int width,
// If the plugin supports that, the given info will be ignored.
inference_engine_tensor_info tensor_info;
- tensor_info.data_type = TENSOR_DATA_TYPE_FLOAT32;
+ tensor_info.data_type = ConvertToIE(dataType);
+
// In case of OpenCV, only supports NCHW
tensor_info.shape_type = TENSOR_SHAPE_NCHW;
// modify to handle multiple tensor infos
@@ -886,7 +917,7 @@ int Inference::Run(std::vector<mv_source_h> &mvSources, std::vector<mv_rectangle
for (iter = mInputTensorBuffers.begin(); iter != mInputTensorBuffers.end(); iter++) {
inference_engine_tensor_buffer tensor_buffer = *iter;
- int data_type = ConvertDataTypes(tensor_buffer.data_type);
+ int data_type = ConvertToCv(tensor_buffer.data_type);
// Convert color space of input tensor data and then normalize it.
ret = Preprocess(cvSource, cv::Mat(mInputSize.height, mInputSize.width, data_type, tensor_buffer.buffer), data_type);
diff --git a/mv_inference/inference/src/mv_inference_open.cpp b/mv_inference/inference/src/mv_inference_open.cpp
index f4b16a27..c4407432 100755
--- a/mv_inference/inference/src/mv_inference_open.cpp
+++ b/mv_inference/inference/src/mv_inference_open.cpp
@@ -249,6 +249,7 @@ int mv_inference_configure_input_info_open(mv_inference_h infer, mv_engine_confi
int tensorWidth, tensorHeight, tensorDim, tensorCh;
double meanValue, stdValue;
char *node_name = NULL;
+ int dataType = 0;
// This should be one. only one batch is supported
tensorDim = 1;
@@ -292,6 +293,14 @@ int mv_inference_configure_input_info_open(mv_inference_h infer, mv_engine_confi
goto _ERROR_;
}
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_INPUT_DATA_TYPE,
+ &dataType);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get a input tensor data type");
+ goto _ERROR_;
+ }
+
ret = mv_engine_config_get_string_attribute(engine_config,
MV_INFERENCE_INPUT_NODE_NAME,
&node_name);
@@ -306,6 +315,7 @@ int mv_inference_configure_input_info_open(mv_inference_h infer, mv_engine_confi
tensorCh,
stdValue,
meanValue,
+ dataType,
std::vector<std::string>(1, std::string(node_name)));
_ERROR_ :