diff options
author | Inki Dae <inki.dae@samsung.com> | 2021-09-30 19:45:34 +0900 |
---|---|---|
committer | Inki Dae <inki.dae@samsung.com> | 2021-10-05 16:22:11 +0900 |
commit | 10965865f20620704308c026f3139abbb8f96742 (patch) | |
tree | eaa469202be9ee62e35554fd6a1e8162a48d2038 | |
parent | 83e5cbee583e87d5f76e732d5cb1c1459292232d (diff) | |
download | mediavision-10965865f20620704308c026f3139abbb8f96742.tar.gz mediavision-10965865f20620704308c026f3139abbb8f96742.tar.bz2 mediavision-10965865f20620704308c026f3139abbb8f96742.zip |
test/machine_learning/inference: add test cases for legacy path
Added test cases for legacy path of inference engine, which uses
user-given model information instead of ones from json file.
As for this, this patch has a little bit code refactoring which
uses parameterized test instead of fixture one of google test
to decide API path in runtime - json or legacy.
This patch enlarges exsiting test coverage from 119 to 132 test cases.
[==========] 132 tests from 6 test suites ran. (49021 ms total)
[ PASSED ] 132 tests.
Change-Id: I9829725aad8037cbe5a82d50e7790a3e7a6bfe6b
Signed-off-by: Inki Dae <inki.dae@samsung.com>
7 files changed, 341 insertions, 39 deletions
diff --git a/test/testsuites/machine_learning/inference/test_face_detection.cpp b/test/testsuites/machine_learning/inference/test_face_detection.cpp index ebf37e5d..376a7173 100644 --- a/test/testsuites/machine_learning/inference/test_face_detection.cpp +++ b/test/testsuites/machine_learning/inference/test_face_detection.cpp @@ -27,6 +27,7 @@ public: { ASSERT_EQ(mv_inference_configure(infer, engine_cfg), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_inference_prepare(infer), MEDIA_VISION_ERROR_NONE); ASSERT_EQ(MediaVision::Common::ImageHelper::loadImageToSource( IMG_FACE, mv_source), @@ -37,9 +38,42 @@ public: } }; -TEST_F(TestFaceDetection, CPU_TFLITE_MobilenetV1_SSD) +TEST_P(TestFaceDetection, CPU_TFLITE_MobilenetV1_SSD) { engine_config_hosted_cpu_tflite(engine_cfg, - FD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH); + FD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH, _use_json_parser); + if (!_use_json_parser) { + const char *inputNodeName = "normalized_input_image_tensor"; + const char *outputNodeName[] = { "TFLite_Detection_PostProcess", + "TFLite_Detection_PostProcess:1", + "TFLite_Detection_PostProcess:2", + "TFLite_Detection_PostProcess:3" }; + + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3), + MEDIA_VISION_ERROR_NONE); + + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, + inputNodeName), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, + outputNodeName, 4), MEDIA_VISION_ERROR_NONE); + } + inferenceFace(); -}
\ No newline at end of file +} + +INSTANTIATE_TEST_CASE_P(Prefix, TestFaceDetection, + ::testing::Values( + ParamTypeOne(false), + ParamTypeOne(true) + ) +);
\ No newline at end of file diff --git a/test/testsuites/machine_learning/inference/test_face_landmark_detection.cpp b/test/testsuites/machine_learning/inference/test_face_landmark_detection.cpp index 6d4ada9c..e186c6fe 100644 --- a/test/testsuites/machine_learning/inference/test_face_landmark_detection.cpp +++ b/test/testsuites/machine_learning/inference/test_face_landmark_detection.cpp @@ -37,7 +37,7 @@ public: } }; -TEST_F(TestFaceLandmarkDetection, CPU_OPENCV_CAFFE_CNNCASCADE) +TEST_P(TestFaceLandmarkDetection, CPU_OPENCV_CAFFE_CNNCASCADE) { const char *inputNodeName = "data"; const char *outputNodeName[] = { "Sigmoid_fc2" }; @@ -85,4 +85,11 @@ TEST_F(TestFaceLandmarkDetection, CPU_OPENCV_CAFFE_CNNCASCADE) outputNodeName, 1), MEDIA_VISION_ERROR_NONE); inferenceFaceLandmark(); -}
\ No newline at end of file +} + +INSTANTIATE_TEST_CASE_P(Prefix, TestFaceLandmarkDetection, + ::testing::Values( + ParamTypeOne(false), + ParamTypeOne(true) + ) +);
\ No newline at end of file diff --git a/test/testsuites/machine_learning/inference/test_image_classification.cpp b/test/testsuites/machine_learning/inference/test_image_classification.cpp index 0aab5947..cdda3f63 100644 --- a/test/testsuites/machine_learning/inference/test_image_classification.cpp +++ b/test/testsuites/machine_learning/inference/test_image_classification.cpp @@ -63,58 +63,232 @@ public: } }; -TEST_F(TestImageClassification, CPU_TFLITE_MobilenetV1) +TEST_P(TestImageClassification, CPU_TFLITE_MobilenetV1) { engine_config_hosted_cpu_tflite_user_model( engine_cfg, IC_TFLITE_WEIGHT_MOBILENET_V1_224_PATH, - IC_LABEL_MOBILENET_V1_224_PATH); + IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser); + + if (!_use_json_parser) { + const char *inputNodeName = "input"; + const char *outputNodeName[] = { "MobilenetV1/Predictions/Reshape_1" }; + + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, + inputNodeName), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, + outputNodeName, 1), MEDIA_VISION_ERROR_NONE); + } + inferenceBanana(); } -TEST_F(TestImageClassification, CPU_TFLITE_MobilenetV2) +TEST_P(TestImageClassification, CPU_TFLITE_MobilenetV2) { engine_config_hosted_cpu_tflite_user_model( engine_cfg, IC_TFLITE_WEIGHT_MOBILENET_V2_224_PATH, - IC_LABEL_MOBILENET_V1_224_PATH); + IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser); + + if (!_use_json_parser) { + const char *inputNodeName = "input"; + const char *outputNodeName[] = { "MobilenetV2/Predictions/Reshape_1" }; + + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.01), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, + inputNodeName), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, + outputNodeName, 1), MEDIA_VISION_ERROR_NONE); + } + inferenceBanana(); } -TEST_F(TestImageClassification, CPU_TFLITE_Densenet) +TEST_P(TestImageClassification, CPU_TFLITE_Densenet) { engine_config_hosted_cpu_tflite_user_model( engine_cfg, IC_TFLITE_WEIGHT_DENSENET_224_PATH, - IC_LABEL_MOBILENET_V1_224_PATH); + IC_LABEL_MOBILENET_V1_224_PATH, _use_json_parser); + + if (!_use_json_parser) { + const char *inputNodeName = "Placeholder"; + const char *outputNodeName[] = { "softmax_tensor" }; + + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 255.0), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, + inputNodeName), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, + outputNodeName, 1), MEDIA_VISION_ERROR_NONE); + } + inferenceBanana(); } -TEST_F(TestImageClassification, CPU_TFLITE_Nasnet) +TEST_P(TestImageClassification, CPU_TFLITE_Nasnet) { engine_config_hosted_cpu_tflite_user_model(engine_cfg, IC_TFLITE_WEIGHT_NASNET_224_PATH, - IC_LABEL_MOBILENET_V1_224_PATH); + IC_LABEL_MOBILENET_V1_224_PATH, + _use_json_parser); + + if (!_use_json_parser) { + const char *inputNodeName = "input"; + const char *outputNodeName[] = { "final_layer/predictions" }; + + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, + inputNodeName), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, + outputNodeName, 1), MEDIA_VISION_ERROR_NONE); + } + inferenceBanana(); } -TEST_F(TestImageClassification, CPU_TFLITE_MNasnet) +TEST_P(TestImageClassification, CPU_TFLITE_MNasnet) { engine_config_hosted_cpu_tflite_user_model( engine_cfg, IC_TFLITE_WEIGHT_MNASNET_224_PATH, - IC_LABEL_MOBILENET_V1_224_PATH); + IC_LABEL_MOBILENET_V1_224_PATH, + _use_json_parser); + + if (!_use_json_parser) { + const char *inputNodeName = "input"; + const char *outputNodeName[] = { "output" }; + + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 57.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, + inputNodeName), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, + outputNodeName, 1), MEDIA_VISION_ERROR_NONE); + } + inferenceBanana(); } -TEST_F(TestImageClassification, CPU_TFLITE_Squeezenet) +TEST_P(TestImageClassification, CPU_TFLITE_Squeezenet) { engine_config_hosted_cpu_tflite_user_model( engine_cfg, IC_TFLITE_WEIGHT_SQUEEZENET_224_PATH, - IC_LABEL_MOBILENET_V1_224_PATH); + IC_LABEL_MOBILENET_V1_224_PATH, + _use_json_parser); + + if (!_use_json_parser) { + const char *inputNodeName = "Placeholder"; + const char *outputNodeName[] = { "softmax_tensor" }; + + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, + inputNodeName), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, + outputNodeName, 1), MEDIA_VISION_ERROR_NONE); + } + inferenceBanana(); } -TEST_F(TestImageClassification, CPU_TFLITE_QUANT_MobilenetV1) +TEST_P(TestImageClassification, CPU_TFLITE_QUANT_MobilenetV1) { engine_config_hosted_cpu_tflite_user_model( engine_cfg, IC_TFLITE_WEIGHT_QUANT_MOBILENET_V1_224_PATH, - IC_LABEL_MOBILENET_V1_224_PATH); + IC_LABEL_MOBILENET_V1_224_PATH, + _use_json_parser); + + if (!_use_json_parser) { + const char *inputNodeName = "input"; + const char *outputNodeName[] = { "MobilenetV1/Predictions/Reshape_1" }; + + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_DATA_TYPE, MV_INFERENCE_DATA_UINT8), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 0.0), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 1.0), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.0), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 224), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, + inputNodeName), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, + outputNodeName, 1), MEDIA_VISION_ERROR_NONE); + } + inferenceBanana(); -}
\ No newline at end of file +} + +INSTANTIATE_TEST_CASE_P(Prefix, TestImageClassification, + ::testing::Values( + ParamTypeOne(false), + ParamTypeOne(true) + ) +);
\ No newline at end of file diff --git a/test/testsuites/machine_learning/inference/test_inference_helper.cpp b/test/testsuites/machine_learning/inference/test_inference_helper.cpp index 1c88689f..81a0380b 100644 --- a/test/testsuites/machine_learning/inference/test_inference_helper.cpp +++ b/test/testsuites/machine_learning/inference/test_inference_helper.cpp @@ -16,19 +16,23 @@ TestInference::~TestInference() } void engine_config_hosted_cpu_tflite(mv_engine_config_h handle, - const char *tf_weight) + const char *tf_weight, + const bool use_json_parser) { EXPECT_EQ(mv_engine_config_set_string_attribute( handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, tf_weight), MEDIA_VISION_ERROR_NONE); - std::string meta_file_path = tf_weight; - meta_file_path = meta_file_path.substr(0, meta_file_path.find('.')); - meta_file_path += std::string(".json"); + if (use_json_parser) { + std::string meta_file_path = tf_weight; + meta_file_path = meta_file_path.substr(0, meta_file_path.find('.')); + meta_file_path += std::string(".json"); + + EXPECT_EQ(mv_engine_config_set_string_attribute( + handle, MV_INFERENCE_MODEL_META_FILE_PATH , meta_file_path.c_str()), + MEDIA_VISION_ERROR_NONE); + } - EXPECT_EQ(mv_engine_config_set_string_attribute( - handle, MV_INFERENCE_MODEL_META_FILE_PATH , meta_file_path.c_str()), - MEDIA_VISION_ERROR_NONE); EXPECT_EQ(mv_engine_config_set_int_attribute(handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_TFLITE), @@ -41,10 +45,11 @@ void engine_config_hosted_cpu_tflite(mv_engine_config_h handle, void engine_config_hosted_cpu_tflite_user_model(mv_engine_config_h handle, const char *tf_weight, - const char *user_file) + const char *user_file, + const bool use_json_parser) { - engine_config_hosted_cpu_tflite(handle, tf_weight); + engine_config_hosted_cpu_tflite(handle, tf_weight, use_json_parser); EXPECT_EQ(mv_engine_config_set_string_attribute( handle, MV_INFERENCE_MODEL_USER_FILE_PATH, user_file), MEDIA_VISION_ERROR_NONE); -} +}
\ No newline at end of file diff --git a/test/testsuites/machine_learning/inference/test_inference_helper.hpp b/test/testsuites/machine_learning/inference/test_inference_helper.hpp index 16bb4c62..a04fb000 100644 --- a/test/testsuites/machine_learning/inference/test_inference_helper.hpp +++ b/test/testsuites/machine_learning/inference/test_inference_helper.hpp @@ -3,8 +3,18 @@ #include <mv_inference.h> -class TestInference : public ::testing::Test +typedef std::tuple<int> ParamTypeOne; + +class TestInference : public ::testing::TestWithParam<ParamTypeOne> { +protected: + void SetUp() final + { + std::tie(_use_json_parser) = GetParam(); + } + + bool _use_json_parser; + public: TestInference(); virtual ~TestInference(); @@ -14,10 +24,12 @@ public: }; void engine_config_hosted_cpu_tflite(mv_engine_config_h handle, - const char *tf_weight); + const char *tf_weight, + const bool use_json_parser); void engine_config_hosted_cpu_tflite_user_model(mv_engine_config_h handle, const char *tf_weight, - const char *user_file); + const char *user_file, + const bool use_json_parser); #endif //__TEST_INFERENCE_HELPER_HPP__ diff --git a/test/testsuites/machine_learning/inference/test_object_detection.cpp b/test/testsuites/machine_learning/inference/test_object_detection.cpp index 8cea9f7b..b849c20b 100644 --- a/test/testsuites/machine_learning/inference/test_object_detection.cpp +++ b/test/testsuites/machine_learning/inference/test_object_detection.cpp @@ -46,10 +46,45 @@ public: } }; -TEST_F(TestObjectDetection, CPU_TFLITE_MobilenetV1_SSD) +TEST_P(TestObjectDetection, CPU_TFLITE_MobilenetV1_SSD) { engine_config_hosted_cpu_tflite_user_model( engine_cfg, OD_TFLITE_WEIGHT_MOBILENET_V1_SSD_300_PATH, - OD_LABEL_MOBILENET_V1_SSD_300_PATH); + OD_LABEL_MOBILENET_V1_SSD_300_PATH, + _use_json_parser); + + if (!_use_json_parser) { + const char *inputNodeName = "normalized_input_image_tensor"; + const char *outputNodeName[] = { "TFLite_Detection_PostProcess", + "TFLite_Detection_PostProcess:1", + "TFLite_Detection_PostProcess:2", + "TFLite_Detection_PostProcess:3" }; + + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3), + MEDIA_VISION_ERROR_NONE); + + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, + inputNodeName), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, + outputNodeName, 4), MEDIA_VISION_ERROR_NONE); + } + inferenceDog(); -}
\ No newline at end of file +} + +INSTANTIATE_TEST_CASE_P(Prefix, TestObjectDetection, + ::testing::Values( + ParamTypeOne(false), + ParamTypeOne(true) + ) +);
\ No newline at end of file diff --git a/test/testsuites/machine_learning/inference/test_pose_landmark_detection.cpp b/test/testsuites/machine_learning/inference/test_pose_landmark_detection.cpp index 033488c1..58c4b43a 100644 --- a/test/testsuites/machine_learning/inference/test_pose_landmark_detection.cpp +++ b/test/testsuites/machine_learning/inference/test_pose_landmark_detection.cpp @@ -37,9 +37,44 @@ public: } }; -TEST_F(TestPoseLandmarkDetection, CPU_TFLITE_MobilenetV1) +TEST_P(TestPoseLandmarkDetection, CPU_TFLITE_MobilenetV1) { engine_config_hosted_cpu_tflite( - engine_cfg, PLD_TFLITE_WEIGHT_MOBILENET_V1_POSENET_257_PATH); + engine_cfg, PLD_TFLITE_WEIGHT_MOBILENET_V1_POSENET_257_PATH, _use_json_parser); + + if (!_use_json_parser) { + const char *inputNodeName = "sub_2"; + const char *outputNodeName[] = { "MobilenetV1/heatmap_2/BiasAdd", + "MobilenetV1/offset_2/BiasAdd", + "MobilenetV1/displacement_fwd_2/BiasAdd", + "MobilenetV1/displacement_bwd_2/BiasAdd" }; + + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_MEAN_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_MODEL_STD_VALUE, 127.5), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_double_attribute(engine_cfg, MV_INFERENCE_CONFIDENCE_THRESHOLD, 0.3), + MEDIA_VISION_ERROR_NONE); + + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_WIDTH, 300), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_HEIGHT, 300), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_int_attribute(engine_cfg, MV_INFERENCE_INPUT_TENSOR_CHANNELS, 3), + MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_string_attribute(engine_cfg, MV_INFERENCE_INPUT_NODE_NAME, + inputNodeName), MEDIA_VISION_ERROR_NONE); + ASSERT_EQ(mv_engine_config_set_array_string_attribute(engine_cfg, MV_INFERENCE_OUTPUT_NODE_NAMES, + outputNodeName, 4), MEDIA_VISION_ERROR_NONE); + } + + inferencePoseLandmark(); -}
\ No newline at end of file +} + +INSTANTIATE_TEST_CASE_P(Prefix, TestPoseLandmarkDetection, + ::testing::Values( + ParamTypeOne(false), + ParamTypeOne(true) + ) +);
\ No newline at end of file |