summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHyunsoo Park <hance.park@samsung.com>2021-09-07 07:59:35 +0900
committerHyunsoo Park <hance.park@samsung.com>2021-09-13 13:06:58 +0900
commitbe15da7fd87bc993fc9a897cd4fb352def67a8e3 (patch)
treea5247e1b30cd4b213ac188be16cc36216700859c
parent523cff7e4f0ae9326a1f0f0413aa839c76962c5a (diff)
downloadinference-engine-interface-be15da7fd87bc993fc9a897cd4fb352def67a8e3.tar.gz
inference-engine-interface-be15da7fd87bc993fc9a897cd4fb352def67a8e3.tar.bz2
inference-engine-interface-be15da7fd87bc993fc9a897cd4fb352def67a8e3.zip
Change-Id: Ib4d9f82d97bf8aabae8fb5c3cc8a7375b050ee99 Signed-off-by: Hyunsoo Park <hance.park@samsung.com>
-rw-r--r--packaging/inference-engine-interface.spec2
-rw-r--r--tools/inference_engine_cltuner.cpp259
2 files changed, 137 insertions, 124 deletions
diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec
index 4d85389..92bf495 100644
--- a/packaging/inference-engine-interface.spec
+++ b/packaging/inference-engine-interface.spec
@@ -1,7 +1,7 @@
Name: inference-engine-interface
Summary: Interface of inference engines
Version: 0.0.2
-Release: 15
+Release: 16
Group: Multimedia/Framework
License: Apache-2.0
Source0: %{name}-%{version}.tar.gz
diff --git a/tools/inference_engine_cltuner.cpp b/tools/inference_engine_cltuner.cpp
index 927821b..d077566 100644
--- a/tools/inference_engine_cltuner.cpp
+++ b/tools/inference_engine_cltuner.cpp
@@ -71,7 +71,18 @@ inference_engine_layer_property output_property;
int menu_idx=0;
-static void show_menu(const char *title, int idx){
+typedef enum {
+ TFLITE_IMAGE_CLASSIFICATION_MODEL = 1,
+ TFLITE_IMAGE_CLASSIFICATION_QUANTIZED_MODEL,
+ TFLITE_OBJECT_DETECTION_MODEL,
+ TFLITE_FACE_DETECTION_MODEL,
+ TFLITE_POSE_ESTIMATION_MODEL,
+ TFLITE_POSE_DETECTION_1_MODEL,
+ TFLITE_POSE_DETECTION_2_MODEL
+} model_type_e;
+
+static void show_menu(const char *title, int idx)
+{
g_print("*********************************************\n");
g_print("* %38s *\n", title);
g_print("*-------------------------------------------*\n");
@@ -82,18 +93,130 @@ static void show_menu(const char *title, int idx){
g_print("* %2i. %34s *\n", 3, "INFERENCE_ENGINE_CLTUNER_RAPID");
g_print("* %2c. %34s *\n", 'q', "Exit");
} else if (idx == 1) {
- g_print("* %2i. %34s *\n", 1, "ic_tflite_model.tflite");
- g_print("* %2i. %34s *\n", 2, "ic_tflite_q_model.tflite");
- g_print("* %2i. %34s *\n", 3, "od_tflite_model.tflite");
- g_print("* %2i. %34s *\n", 4, "fd_tflite_model1.tflite");
- g_print("* %2i. %34s *\n", 5, "ped_tflite_model.tflite");
- g_print("* %2i. %34s *\n", 6, "posenet1_lite_224.tflite");
- g_print("* %2i. %34s *\n", 7, "posenet2_lite_224.tflite");
+ g_print("* %2i. %34s *\n", TFLITE_IMAGE_CLASSIFICATION_MODEL, "ic_tflite_model.tflite");
+ g_print("* %2i. %34s *\n", TFLITE_IMAGE_CLASSIFICATION_QUANTIZED_MODEL, "ic_tflite_q_model.tflite");
+ g_print("* %2i. %34s *\n", TFLITE_OBJECT_DETECTION_MODEL, "od_tflite_model.tflite");
+ g_print("* %2i. %34s *\n", TFLITE_FACE_DETECTION_MODEL, "fd_tflite_model1.tflite");
+ g_print("* %2i. %34s *\n", TFLITE_POSE_ESTIMATION_MODEL, "ped_tflite_model.tflite");
+ g_print("* %2i. %34s *\n", TFLITE_POSE_DETECTION_1_MODEL, "posenet1_lite_224.tflite");
+ g_print("* %2i. %34s *\n", TFLITE_POSE_DETECTION_2_MODEL, "posenet2_lite_224.tflite");
g_print("* %2c. %34s *\n", 'q', "Exit");
}
g_print("*********************************************\n\n");
}
+void SetupModelInfo(model_type_e model_type)
+{
+ switch (model_type) {
+ case TFLITE_IMAGE_CLASSIFICATION_MODEL:
+ g_print("ic_tflite_model.tflite is selected\n");
+ height = 224;
+ width = 224;
+ ch = 3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/image_classification.bin");
+ input_layers.push_back("input_2");
+ output_layers.push_back("dense_3/Softmax");
+ model_paths.push_back("/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite");
+ break;
+ case TFLITE_IMAGE_CLASSIFICATION_QUANTIZED_MODEL:
+ g_print("ic_tflite_q_model.tflite is selected\n");
+ height = 224;
+ width = 224;
+ ch = 3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/image_classification_q.bin");
+ input_layers.push_back("input");
+ output_layers.push_back("MobilenetV1/Predictions/Reshape_1");
+ model_paths.push_back("/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite");
+ break;
+ case TFLITE_OBJECT_DETECTION_MODEL:
+ g_print("od_tflite_model.tflite is selected\n");
+ height = 300;
+ width = 300;
+ ch = 3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/object_detection.bin");
+ input_layers.push_back("normalized_input_image_tensor");
+ output_layers.push_back("TFLite_Detection_PostProcess");
+ output_layers.push_back("TFLite_Detection_PostProcess:1");
+ output_layers.push_back("TFLite_Detection_PostProcess:2");
+ output_layers.push_back("TFLite_Detection_PostProcess:3");
+ model_paths.push_back("/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite");
+ break;
+ case TFLITE_FACE_DETECTION_MODEL:
+ g_print("fd_tflite_model1.tflite is selected\n");
+ height = 300;
+ width = 300;
+ ch = 3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/face_detection.bin");
+ input_layers.push_back("normalized_input_image_tensor");
+ output_layers.push_back("TFLite_Detection_PostProcess");
+ output_layers.push_back("TFLite_Detection_PostProcess:1");
+ output_layers.push_back("TFLite_Detection_PostProcess:2");
+ output_layers.push_back("TFLite_Detection_PostProcess:3");
+ model_paths.push_back("/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite");
+ break;
+ case TFLITE_POSE_ESTIMATION_MODEL:
+ g_print("ped_tflite_model.tflite is selected\n");
+ height = 192;
+ width = 192;
+ ch = 3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/pose_estimation.bin");
+ input_layers.push_back("image");
+ output_layers.push_back("Convolutional_Pose_Machine/stage_5_out");
+ model_paths.push_back("/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite");
+ break;
+ case TFLITE_POSE_DETECTION_1_MODEL:
+ g_print("posenet1_lite_224.tflite is selected\n");
+ height = 224;
+ width = 224;
+ ch = 3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/hand.bin");
+ input_layers.push_back("input");
+ output_layers.push_back("mobilenetv2/boundingbox");
+ output_layers.push_back("mobilenetv2/heatmap");
+ model_paths.push_back("/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite");
+ break;
+ case TFLITE_POSE_DETECTION_2_MODEL:
+ g_print("posenet2_lite_224.tflite is selected\n");
+ height = 56;
+ width = 56;
+ ch = 21;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/hand.bin");
+ input_layers.push_back("input");
+ output_layers.push_back("mobilenetv2/coord_refine");
+ output_layers.push_back("mobilenetv2/gesture");
+ model_paths.push_back("/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite");
+ break;
+ default :
+ break;
+ }
+ input_tensor_info = {
+ { 1, ch, height, width },
+ static_cast<inference_tensor_shape_type_e>(tensor_shape),
+ static_cast<inference_tensor_data_type_e>(tensor_type),
+ static_cast<size_t>(1 * ch * height * width)
+ };
+
+ output_tensor_info = {
+ std::vector<size_t>{1},
+ static_cast<inference_tensor_shape_type_e>(tensor_shape),
+ static_cast<inference_tensor_data_type_e>(tensor_type),
+ 1
+ };
+}
static gboolean __interpret(char *cmd)
{
if (strncmp(cmd, "", 1) == 0) {
@@ -110,119 +233,7 @@ static gboolean __interpret(char *cmd)
menu_idx = 1;
show_menu("Select Model", menu_idx);
} else if (menu_idx == 1) {
- int idx = atoi(cmd);
- switch (idx) {
- case 1 :
- g_print("ic_tflite_model.tflite is selected\n");
- height=224;
- width=224;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/image_classification.bin");
- input_layers.push_back("input_2");
- output_layers.push_back("dense_3/Softmax");
- model_paths.push_back("/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite");
- break;
- case 2 :
- g_print("ic_tflite_q_model.tflite is selected\n");
- height=224;
- width=224;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/image_classification_q.bin");
- input_layers.push_back("input");
- output_layers.push_back("MobilenetV1/Predictions/Reshape_1");
- model_paths.push_back("/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite");
- break;
- case 3 :
- g_print("od_tflite_model.tflite is selected\n");
- height=300;
- width=300;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/object_detection.bin");
- input_layers.push_back("normalized_input_image_tensor");
- output_layers.push_back("TFLite_Detection_PostProcess");
- output_layers.push_back("TFLite_Detection_PostProcess:1");
- output_layers.push_back("TFLite_Detection_PostProcess:2");
- output_layers.push_back("TFLite_Detection_PostProcess:3");
- model_paths.push_back("/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite");
- break;
- case 4 :
- g_print("fd_tflite_model1.tflite is selected\n");
- height=300;
- width=300;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/face_detection.bin");
- input_layers.push_back("normalized_input_image_tensor");
- output_layers.push_back("TFLite_Detection_PostProcess");
- output_layers.push_back("TFLite_Detection_PostProcess:1");
- output_layers.push_back("TFLite_Detection_PostProcess:2");
- output_layers.push_back("TFLite_Detection_PostProcess:3");
- model_paths.push_back("/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite");
- break;
- case 5 :
- g_print("ped_tflite_model.tflite is selected\n");
- height=192;
- width=192;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/pose_estimation.bin");
- input_layers.push_back("image");
- output_layers.push_back("Convolutional_Pose_Machine/stage_5_out");
- model_paths.push_back("/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite");
- break;
- case 6 :
- g_print("posenet1_lite_224.tflite is selected\n");
- height=224;
- width=224;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/hand.bin");
- input_layers.push_back("input");
- output_layers.push_back("mobilenetv2/boundingbox");
- output_layers.push_back("mobilenetv2/heatmap");
- model_paths.push_back("/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite");
- break;
- case 7 :
- g_print("posenet2_lite_224.tflite is selected\n");
- height=56;
- width=56;
- ch=21;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/hand.bin");
- input_layers.push_back("input");
- output_layers.push_back("mobilenetv2/coord_refine");
- output_layers.push_back("mobilenetv2/gesture");
- model_paths.push_back("/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite");
- break;
- default :
- break;
- }
-
- inference_engine_tensor_info _input_tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e)tensor_shape,
- static_cast<inference_tensor_data_type_e>(tensor_type),
- static_cast<size_t>(1 * ch * height * width)
- };
-
- inference_engine_tensor_info _output_tensor_info = {
- std::vector<size_t>{1},
- (inference_tensor_shape_type_e)tensor_shape,
- (inference_tensor_data_type_e)tensor_type,
- 1
- };
- input_tensor_info = _input_tensor_info;
- output_tensor_info = _output_tensor_info;
+ SetupModelInfo(static_cast<model_type_e>(atoi(cmd)));
if (!process()) {
g_print("Error is occurred while doing process.\n ");
return FALSE;
@@ -236,7 +247,8 @@ static gboolean __interpret(char *cmd)
return TRUE;
}
-void CheckResult(){
+void CheckResult()
+{
std::string tune_file = model_paths[0];
tune_file.append(".tune");
int fd = open(tune_file.c_str(), O_RDONLY);
@@ -279,7 +291,8 @@ static gboolean __input(GIOChannel *channel,
return TRUE;
}
-static gboolean process(){
+static gboolean process()
+{
InferenceEngineCommon *mBackend;
inference_engine_config config = {
.backend_name = "armnn",