summaryrefslogtreecommitdiff
path: root/inference-engine/tests
diff options
context:
space:
mode:
Diffstat (limited to 'inference-engine/tests')
-rw-r--r--inference-engine/tests/CMakeLists.txt1
-rw-r--r--inference-engine/tests/helpers/CMakeLists.txt3
-rw-r--r--inference-engine/tests/helpers/disable_tests.hpp1
-rw-r--r--inference-engine/tests/helpers/ir_gen_helper.cpp48
-rw-r--r--inference-engine/tests/helpers/ir_gen_helper.hpp27
-rw-r--r--inference-engine/tests/helpers/single_layer_common.cpp196
-rw-r--r--inference-engine/tests/helpers/single_layer_common.hpp142
-rw-r--r--inference-engine/tests/helpers/test_assertions.hpp1
-rw-r--r--inference-engine/tests/helpers/test_model_path.hpp1
-rw-r--r--inference-engine/tests/helpers/test_model_repo.hpp.in16
-rw-r--r--inference-engine/tests/helpers/test_models_path.cpp1
-rw-r--r--inference-engine/tests/helpers/tests_common.hpp26
-rw-r--r--inference-engine/tests/helpers/tests_common_func.hpp1
-rw-r--r--inference-engine/tests/helpers/tests_file_utils.cpp1
-rw-r--r--inference-engine/tests/helpers/tests_file_utils.hpp1
-rw-r--r--inference-engine/tests/helpers/tests_utils.hpp1
-rw-r--r--inference-engine/tests/helpers/version_printer.cpp1
-rw-r--r--inference-engine/tests/helpers/xml_father.hpp5
-rw-r--r--inference-engine/tests/helpers/xml_helper.hpp6
-rw-r--r--inference-engine/tests/helpers/xml_net_builder.cpp21
-rw-r--r--inference-engine/tests/helpers/xml_net_builder.hpp120
-rw-r--r--inference-engine/tests/mock_engine/CMakeLists.txt6
-rw-r--r--inference-engine/tests/mock_engine/dllmain.cpp3
-rw-r--r--inference-engine/tests/mock_engine/mock_plugin.cpp1
-rw-r--r--inference-engine/tests/mock_engine/mock_plugin.hpp1
-rw-r--r--inference-engine/tests/mock_engine/stub_inference_engine.xpp1
-rw-r--r--inference-engine/tests/unit/CMakeLists.txt46
-rw-r--r--inference-engine/tests/unit/builders/batch_normalization_layer_test.cpp36
-rw-r--r--inference-engine/tests/unit/builders/builder_test.hpp33
-rw-r--r--inference-engine/tests/unit/builders/input_layer_test.cpp32
-rw-r--r--inference-engine/tests/unit/builders/network_builder_test.cpp927
-rw-r--r--inference-engine/tests/unit/cnn_network/cnn_net_reader_impl_test.cpp264
-rw-r--r--inference-engine/tests/unit/cnn_network/cnn_network_impl_test.cpp1
-rw-r--r--inference-engine/tests/unit/cnn_network/layout_tests.cpp1
-rw-r--r--inference-engine/tests/unit/cnn_network/mean_image.cpp1
-rw-r--r--inference-engine/tests/unit/cnn_network/mean_image.h1
-rw-r--r--inference-engine/tests/unit/cnn_network/parser_tests_base.hpp11
-rw-r--r--inference-engine/tests/unit/cnn_network/v2_format_parser_test.cpp3
-rw-r--r--inference-engine/tests/unit/cnn_network/v3_format_parser_test.cpp95
-rw-r--r--inference-engine/tests/unit/cnn_network/xml_father_tests.cpp1
-rw-r--r--inference-engine/tests/unit/engines/gna/I8_quantisation_test.cpp117
-rw-r--r--inference-engine/tests/unit/engines/gna/configuration_test.cpp136
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_allocator_test.cpp78
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_api_stub.cpp218
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_graph_aot_test.cpp85
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_hardware_precision_test.cpp49
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_matcher.cpp440
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_matcher.hpp490
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_memory_test.cpp440
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_mock_api.hpp70
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_proc_type_test.cpp40
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_pwl_test.cpp214
-rw-r--r--inference-engine/tests/unit/engines/gna/gna_query_state_tests.cpp25
-rw-r--r--inference-engine/tests/unit/engines/gna/i16_quantisation_test.cpp381
-rw-r--r--inference-engine/tests/unit/engines/gna/matchers/conv_matcher.hpp34
-rw-r--r--inference-engine/tests/unit/engines/gna/matchers/copy_matcher.hpp32
-rw-r--r--inference-engine/tests/unit/engines/gna/matchers/diag_matcher.hpp51
-rw-r--r--inference-engine/tests/unit/engines/gna/matchers/nnet_base_matcher.hpp86
-rw-r--r--inference-engine/tests/unit/engines/gna/matchers/pool_matcher.hpp37
-rw-r--r--inference-engine/tests/unit/engines/gna/matchers/precision_matcher.hpp54
-rw-r--r--inference-engine/tests/unit/engines/gna/matchers/pwl_matcher.hpp61
-rw-r--r--inference-engine/tests/unit/engines/gna/matchers/pwl_quantization_metrics_matcher.hpp139
-rw-r--r--inference-engine/tests/unit/engines/gna/test_irs.cpp2678
-rw-r--r--inference-engine/tests/unit/engines/gna/test_irs.hpp43
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/constant_propagation_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/convert_desc_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/dump_test.cpp136
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/dumper_test.cpp99
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/fake_layer.cpp125
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/gather_tests.cpp695
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp11
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/interp_tests.cpp6
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/mvn_tests.cpp279
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/resample_tests.cpp9
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp139
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp412
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp427
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_crop_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp362
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp169
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp121
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp123
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_gemm_test.cpp627
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_input_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_leaks_test.cpp3
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_permute_test.cpp9
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp397
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_relu_test.cpp99
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_reshape_test.cpp51
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_roi_pooling_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_simplernms_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp415
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_conv_depthwise_fusing_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_dw_conv_fusing_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_optimization_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp40
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/graph/test_graph.hpp88
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/mkldnn_primitive_test.cpp1
-rw-r--r--inference-engine/tests/unit/engines/mkldnn/test_layers.cpp1
-rw-r--r--inference-engine/tests/unit/graph_tools/graph_copy_tests.cpp52
-rw-r--r--inference-engine/tests/unit/graph_tools/graph_test_base.hpp1
-rw-r--r--inference-engine/tests/unit/graph_tools/graph_tools_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/alocator_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/blob_proxy_test.cpp23
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/blob_test.cpp4
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/caslesseq_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cnn_network_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_base_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_thread_safe_default_tests.cpp5
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_thread_safe_internal.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/callback_manager_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_base_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_thread_safe_async_only_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_thread_safe_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executor_manager_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/iinference_plugin_internal_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/memory_state_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/plugin_base_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_common_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_executor_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_synchronizer_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_tests_utils.hpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_with_stages_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/data_test.cpp19
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/debug_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/device_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/exception_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/inference_engine_plugin_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/inference_engine_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/layer_transform_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/layers_test.cpp169
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/locked_memory_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/plugin_dispatcher_tests.cpp18
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/pointer_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/pre_allocator_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/precision_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/preprocess_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/range_iterator_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/response_buffer_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/shared_object_loader_test.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/so_pointer_tests.cpp1
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/tensor_desc_test.cpp28
-rw-r--r--inference-engine/tests/unit/inference_engine_tests/util_test.cpp1
-rw-r--r--inference-engine/tests/unit/mem_solver/mem_solver_test.cpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_default.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_internal.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_thread_safe_internal.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_network_internal.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_thread_safe_async_only.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_thread_safe_default.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_infer_request_internal.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/mock_plugin_impl.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/mock_task_executor.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/cpp_interfaces/mock_task_synchronizer.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/mock_allocator.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/mock_error_listener.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/mock_iasync_infer_request.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/mock_icnn_network.hpp2
-rw-r--r--inference-engine/tests/unit/mocks/mock_iexecutable_network.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/mock_iformat_parser.hpp3
-rw-r--r--inference-engine/tests/unit/mocks/mock_inference_engine.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/mock_not_empty_icnn_network.hpp2
-rw-r--r--inference-engine/tests/unit/mocks/mock_plugin_dispatcher.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/shape_infer/mock_input_controller.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/shape_infer/mock_ishape_infer_impl.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/shape_infer/mock_output_controller.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/shape_infer/mock_reshaper_launcher.hpp1
-rw-r--r--inference-engine/tests/unit/mocks/shape_infer/mock_shape_infer_extension.hpp1
-rw-r--r--inference-engine/tests/unit/opencv_test_gapi/CMakeLists.txt34
-rw-r--r--inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests.cpp (renamed from inference-engine/tests/unit/topology_verification_tests/v1_topology_verification_test.cpp)2
-rw-r--r--inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests.hpp47
-rw-r--r--inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests_inl.hpp876
-rw-r--r--inference-engine/tests/unit/opencv_test_gapi/common/gapi_tests_common.hpp106
-rw-r--r--inference-engine/tests/unit/opencv_test_gapi/cpu/gapi_core_tests_fluid.cpp244
-rw-r--r--inference-engine/tests/unit/shape_infer/built_in_holder_test.cpp4
-rw-r--r--inference-engine/tests/unit/shape_infer/built_in_shape_infer_batch_test.cpp5
-rw-r--r--inference-engine/tests/unit/shape_infer/built_in_shape_infer_conv_test.cpp15
-rw-r--r--inference-engine/tests/unit/shape_infer/built_in_shape_infer_fake_test.cpp5
-rw-r--r--inference-engine/tests/unit/shape_infer/built_in_shape_infer_general_test.cpp233
-rw-r--r--inference-engine/tests/unit/shape_infer/built_in_shape_infer_general_test.hpp118
-rw-r--r--inference-engine/tests/unit/shape_infer/built_in_shape_infer_pool_test.cpp13
-rw-r--r--inference-engine/tests/unit/shape_infer/cpu_ext_shape_infer_general_test.cpp11
-rw-r--r--inference-engine/tests/unit/shape_infer/input_controller_test.cpp1
-rw-r--r--inference-engine/tests/unit/shape_infer/input_reshape_launcher_test.cpp1
-rw-r--r--inference-engine/tests/unit/shape_infer/output_controller_test.cpp1
-rw-r--r--inference-engine/tests/unit/shape_infer/reshape_launcher_test.cpp1
-rw-r--r--inference-engine/tests/unit/shape_infer/reshaper_test.cpp4
-rw-r--r--inference-engine/tests/unit/stress_tests/stress_tests.cpp1
-rw-r--r--inference-engine/tests/unit/topology_verification_tests/v2_topology_verification_test.cpp5
204 files changed, 13391 insertions, 1669 deletions
diff --git a/inference-engine/tests/CMakeLists.txt b/inference-engine/tests/CMakeLists.txt
index bfc653776..4fa0b4477 100644
--- a/inference-engine/tests/CMakeLists.txt
+++ b/inference-engine/tests/CMakeLists.txt
@@ -1,6 +1,7 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
+
####################################
## All next project will use C++11
set (CMAKE_CXX_STANDARD 11)
diff --git a/inference-engine/tests/helpers/CMakeLists.txt b/inference-engine/tests/helpers/CMakeLists.txt
index 684c2b650..4ab1278a3 100644
--- a/inference-engine/tests/helpers/CMakeLists.txt
+++ b/inference-engine/tests/helpers/CMakeLists.txt
@@ -1,6 +1,7 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
+
cmake_minimum_required(VERSION 2.8)
set(TARGET_NAME helpers)
@@ -23,7 +24,7 @@ add_library(${TARGET_NAME} STATIC
${HELPERS_HEADERS})
target_include_directories(${TARGET_NAME} PUBLIC ${PROJECT_BINARY_DIR})
-target_compile_definitions(${TARGET_NAME} PUBLIC -DMODELS_PATH="${MODELS_PATH}")
+target_compile_definitions(${TARGET_NAME} PUBLIC -DMODELS_PATH=\"${MODELS_PATH}\")
set_property(TARGET ${TARGET_NAME} PROPERTY CXX_STANDARD 11)
set_property(TARGET ${TARGET_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
diff --git a/inference-engine/tests/helpers/disable_tests.hpp b/inference-engine/tests/helpers/disable_tests.hpp
index 9cc5a827b..d0f0949ce 100644
--- a/inference-engine/tests/helpers/disable_tests.hpp
+++ b/inference-engine/tests/helpers/disable_tests.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/helpers/ir_gen_helper.cpp b/inference-engine/tests/helpers/ir_gen_helper.cpp
new file mode 100644
index 000000000..40a05c451
--- /dev/null
+++ b/inference-engine/tests/helpers/ir_gen_helper.cpp
@@ -0,0 +1,48 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ir_gen_helper.hpp"
+
+namespace single_layer_tests {
+
+ std::string IRTemplateGenerator::getIRTemplate(const std::string& name,
+ const std::vector<size_t>& input_shape,
+ const std::string& precision,
+ const std::string& layers,
+ const std::string& edges,
+ const unsigned ir_version) {
+ std::string model = model_t;
+ REPLACE_WITH_STR(model, "_NAME_", name);
+ REPLACE_WITH_NUM(model, "_IRv_", ir_version);
+ REPLACE_WITH_STR(model, "_PR_", precision);
+
+ std::string s_dims;
+ for (auto& dim : input_shape) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims);
+ REPLACE_WITH_STR(model, "_LAYERS_", layers);
+ REPLACE_WITH_STR(model, "_EDGES_", edges);
+
+ return model;
+ }
+
+ std::string IRTemplateGenerator::model_t = R"V0G0N(
+ <net name="_NAME_" version="_IRv_" precision="_PR_" batch="1">
+ <layers>
+ <layer name="in1" type="Input" precision="_PR_" id="0">
+ <output>
+ <port id="0">__SRC_DIMS__
+ </port>
+ </output>
+ </layer>
+ _LAYERS_
+ </layers>
+ <edges>
+ _EDGES_
+ </edges>
+ </net>
+ )V0G0N";
+} \ No newline at end of file
diff --git a/inference-engine/tests/helpers/ir_gen_helper.hpp b/inference-engine/tests/helpers/ir_gen_helper.hpp
new file mode 100644
index 000000000..db8bff547
--- /dev/null
+++ b/inference-engine/tests/helpers/ir_gen_helper.hpp
@@ -0,0 +1,27 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#ifndef IR_GEN_HELPER_HPP
+#define IR_GEN_HELPER_HPP
+
+#include "single_layer_common.hpp"
+
+namespace single_layer_tests {
+
+ class IRTemplateGenerator {
+ IRTemplateGenerator() = default;
+ public:
+ static std::string model_t;
+
+ static std::string getIRTemplate(const std::string& name,
+ const std::vector<size_t>& input_shape,
+ const std::string& precision,
+ const std::string& layers,
+ const std::string& edges,
+ const unsigned ir_version = 4u);
+ };
+
+} // namespace single_layer_tests
+#endif /* IR_GEN_HELPER_HPP */
+
diff --git a/inference-engine/tests/helpers/single_layer_common.cpp b/inference-engine/tests/helpers/single_layer_common.cpp
new file mode 100644
index 000000000..434d3f28b
--- /dev/null
+++ b/inference-engine/tests/helpers/single_layer_common.cpp
@@ -0,0 +1,196 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cmath>
+#include <ie_blob.h>
+#include <ie_layers_property.hpp>
+#include <ie_precision.hpp>
+#include <inference_engine/precision_utils.h>
+#include <gtest/gtest.h>
+#include "single_layer_common.hpp"
+#include <math.h>
+
+using namespace InferenceEngine;
+
+void get_common_dims(const Blob &blob,
+ int32_t &dimx,
+ int32_t &dimy,
+ int32_t &dimz) {
+ if (blob.dims().size() == 2) {
+ dimz = 1;
+ dimy = blob.dims()[1];
+ dimx = blob.dims()[0];
+ } else if (blob.dims().size() == 3 || (blob.dims().size() == 4 && blob.dims()[3] == 1)) {
+ dimx = blob.dims()[0];
+ dimy = blob.dims()[1];
+ dimz = blob.dims()[2];
+ }
+}
+
+void get_common_dims(const Blob &blob,
+ int32_t &dimx,
+ int32_t &dimy,
+ int32_t &dimz,
+ int32_t &dimn) {
+ dimn = 1;
+ if (blob.dims().size() == 2) {
+ dimz = 1;
+ dimy = blob.dims()[1];
+ dimx = blob.dims()[0];
+ } else if (blob.dims().size() == 3 || (blob.dims().size() == 4 && blob.dims()[3] == 1)) {
+ dimx = blob.dims()[0];
+ dimy = blob.dims()[1];
+ dimz = blob.dims()[2];
+ } else {
+ if (blob.dims().size() == 4 && blob.dims()[3] != 1) {
+ dimx = blob.dims()[0];
+ dimy = blob.dims()[1];
+ dimz = blob.dims()[2];
+ dimn = blob.dims()[3];
+ }
+ }
+}
+
+void GenRandomDataCommon(Blob::Ptr blob) {
+ if (blob->precision() == Precision::U8) {
+ auto * blobRawDataU8 = blob->buffer().as<uint8_t*>();
+ size_t count = blob->size();
+ for (size_t i = 0; i < count; i++) {
+ auto val = static_cast<uint8_t>(rand() % 256);
+ blobRawDataU8[i] = val;
+ }
+ } else if (blob->precision() == Precision::FP16) {
+ float scale = 2.0f / RAND_MAX;
+ /* fill by random data in the range (-1, 1)*/
+ auto * blobRawDataFp16 = blob->buffer().as<ie_fp16 *>();
+ size_t count = blob->size();
+ for (size_t indx = 0; indx < count; ++indx) {
+ float val = rand();
+ val = val * scale - 1.0f;
+ blobRawDataFp16[indx] = PrecisionUtils::f32tof16(val);
+ }
+ } else if (blob->precision() == Precision::FP32) {
+ float scale = 2.0f / RAND_MAX;
+ /* fill by random data in the range (-1, 1)*/
+ auto * blobRawDataFp16 = blob->buffer().as<float*>();
+ size_t count = blob->size();
+ for (size_t i = 0; i < count; i++) {
+ float val = rand();
+ val = val * scale - 1.0f;
+ blobRawDataFp16[i] = val;
+ }
+ }
+}
+
+BufferWrapper::BufferWrapper(const Blob::Ptr& blob) : BufferWrapper(blob, blob->precision()) {}
+
+BufferWrapper::BufferWrapper(const Blob::Ptr& blob, Precision _precision) : precision(_precision) {
+ if (precision == Precision::FP16) {
+ fp16_ptr = blob->buffer().as<ie_fp16*>();
+ } else if (precision == Precision::FP32) {
+ fp32_ptr = blob->buffer().as<float*>();
+ } else {
+ THROW_IE_EXCEPTION << "Unsupported precision for compare: " << precision;
+ }
+}
+
+float BufferWrapper::operator[](size_t index) {
+ if (precision == Precision::FP16) return PrecisionUtils::f16tof32(fp16_ptr[index]);
+ return fp32_ptr[index];
+}
+
+void BufferWrapper::insert(size_t index, float value) {
+ if (precision == Precision::FP16) {
+ fp16_ptr[index] = PrecisionUtils::f32tof16(value);
+ } else {
+ fp32_ptr[index] = value;
+ }
+}
+
+void CompareCommon(const Blob::Ptr& actual, const Blob::Ptr& expected, float tolerance) {
+ ASSERT_NE(actual, nullptr);
+ ASSERT_NE(expected, nullptr);
+
+ Layout res_layout = actual->layout();
+ Layout ref_layout = expected->layout();
+ SizeVector res_dims = actual->getTensorDesc().getDims();
+
+ BufferWrapper res_ptr(actual);
+ BufferWrapper ref_ptr(expected);
+
+ size_t res_size = actual->size();
+ size_t ref_size = expected->size();
+ ASSERT_EQ(res_size, ref_size);
+
+ float max_error = 0;
+ size_t actualMaxErrId = 0;
+ size_t expectedMaxErrId = 0;
+
+ if (res_layout == NCHW || res_layout == NHWC) {
+ size_t N = res_dims[0];
+ size_t C = res_dims[1];
+ size_t H = res_dims[2];
+ size_t W = res_dims[3];
+
+ for (size_t n = 0; n < N; n++) {
+ for (size_t c = 0; c < C; c++) {
+ for (size_t h = 0; h < H; h++) {
+ for (size_t w = 0; w < W; w++) {
+ size_t actualIdx = res_layout == NCHW ?
+ w + h * W + c * W * H + n * W * H * C : c + w * C + h * C * W +
+ n * W * H * C;
+ size_t expectedIdx = ref_layout == NCHW ?
+ w + h * W + c * W * H + n * W * H * C : c + w * C + h * C * W +
+ n * C * W * H;
+ float cur_diff = fabs(res_ptr[actualIdx] - ref_ptr[expectedIdx]);
+ if (cur_diff > max_error) {
+ max_error = cur_diff;
+ actualMaxErrId = actualIdx;
+ expectedMaxErrId = expectedIdx;
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (res_layout == NC) {
+
+ size_t N = res_dims[0];
+ size_t C = res_dims[1];
+ for (size_t n = 0; n < N; n++) {
+ for (size_t c = 0; c < C; c++) {
+ size_t actualIdx = c + n * C;
+ float cur_diff = fabs(res_ptr[actualIdx] - ref_ptr[actualIdx]);
+ if (cur_diff > max_error) {
+ max_error = cur_diff;
+ actualMaxErrId = actualIdx;
+ expectedMaxErrId = actualIdx;
+ }
+ }
+ }
+ } else {
+ for (size_t i = 0; i < ref_size; i++) {
+ float cur_diff = fabs(res_ptr[i] - ref_ptr[i]);
+ if (cur_diff > max_error) {
+ max_error = cur_diff;
+ actualMaxErrId = expectedMaxErrId = i;
+ }
+ }
+ }
+ }
+
+ ASSERT_NEAR(ref_ptr[expectedMaxErrId], res_ptr[actualMaxErrId], tolerance)
+ << "expectedMaxErrId = " << expectedMaxErrId
+ << " actualMaxErrId = " << actualMaxErrId;
+}
+
+void fill_data_common(BufferWrapper& data, size_t size, size_t duty_ratio) {
+ for (size_t i = 0; i < size; i++) {
+ if ((i / duty_ratio) % 2 == 1) {
+ data.insert(i, 0.0);
+ } else {
+ data.insert(i, sin((float) i));
+ }
+ }
+}
diff --git a/inference-engine/tests/helpers/single_layer_common.hpp b/inference-engine/tests/helpers/single_layer_common.hpp
index 7b852c93c..1354129fb 100644
--- a/inference-engine/tests/helpers/single_layer_common.hpp
+++ b/inference-engine/tests/helpers/single_layer_common.hpp
@@ -1,12 +1,20 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
+#include <ie_blob.h>
+#include <ie_layers_property.hpp>
+#include <inference_engine/precision_utils.h>
+#include <inference_engine/parsers.h>
+#include <xml_net_builder.hpp>
+#include <xml_helper.hpp>
+
#ifndef USE_BOOST_RE
+
#include <regex>
+
#define REPLACE_WITH_STR(SRC, PATTERN, STR) SRC = std::regex_replace(SRC, std::regex(PATTERN), STR)
#define FIND_STR(SRC, PATTERN) std::regex_search(SRC, std::regex(PATTERN))
#else
@@ -16,4 +24,136 @@
#endif
#define REPLACE_WITH_NUM(SRC, PATTERN, NUM) REPLACE_WITH_STR(SRC, PATTERN, std::to_string(NUM))
+#define REPLACE_WITH_NUM_VECTOR(SRC, PATTERN, NUMS) \
+ { std::string result; \
+ if (NUMS.size() > 0) { \
+ result += std::to_string(NUMS[0]); \
+ for (int i = 1; i < NUMS.size(); i++) { \
+ result += "," + std::to_string(NUMS[i]); \
+ } \
+ } \
+ REPLACE_WITH_STR(SRC, PATTERN, result); }
+#define REPLACE_WITH_NUM_VECTOR_REVERSE(SRC, PATTERN, NUMS) \
+ { std::string result; \
+ auto nums_size = NUMS.size(); \
+ if (nums_size > 0) { \
+ result += std::to_string(NUMS[nums_size - 1]); \
+ for (int i = 2; i <= nums_size; i++) { \
+ result += "," + std::to_string(NUMS[nums_size - i]); \
+ } \
+ } \
+ REPLACE_WITH_STR(SRC, PATTERN, result); }
#define REMOVE_LINE(SRC, PATTERN) REPLACE_WITH_STR(SRC, PATTERN, "")
+
+struct conv_common_params {
+ InferenceEngine::PropertyVector<unsigned int> stride;
+ InferenceEngine::PropertyVector<unsigned int> kernel;
+ InferenceEngine::PropertyVector<unsigned int> pads_begin;
+ InferenceEngine::PropertyVector<unsigned int> pads_end;
+ InferenceEngine::PropertyVector<unsigned int> dilation;
+ std::string auto_pad;
+ size_t group;
+ size_t out_c;
+};
+
+struct pool_common_params {
+ InferenceEngine::PropertyVector<unsigned int> stride;
+ InferenceEngine::PropertyVector<unsigned int> kernel;
+ InferenceEngine::PropertyVector<unsigned int> pads_begin;
+ InferenceEngine::PropertyVector<unsigned int> pads_end;
+ std::string auto_pad;
+ bool avg;
+ bool exclude_pad;
+};
+
+#define PRETTY_PARAM(name, type) \
+ class name \
+ { \
+ public: \
+ typedef type param_type; \
+ name ( param_type arg = param_type ()) : val_(arg) {} \
+ operator param_type () const {return val_;} \
+ private: \
+ param_type val_; \
+ }; \
+ static inline void PrintTo(name param, ::std::ostream* os) \
+ { \
+ *os << #name ": " << ::testing::PrintToString((name::param_type)(param)); \
+ }
+
+struct MapStrStr {
+ std::map<std::string, std::string> data{};
+
+ explicit MapStrStr(std::map<std::string, std::string> _data) : data(std::move(_data)) {}
+
+ MapStrStr() = default;
+};
+
+void get_common_dims(const InferenceEngine::Blob &blob,
+ int32_t &dimx,
+ int32_t &dimy,
+ int32_t &dimz);
+
+void get_common_dims(const InferenceEngine::Blob &blob,
+ int32_t &dimx,
+ int32_t &dimy,
+ int32_t &dimz,
+ int32_t &dimn);
+
+template<int Version = 3>
+inline InferenceEngine::details::CNNNetworkImplPtr
+buildSingleLayerNetworkCommon(InferenceEngine::details::IFormatParser *parser,
+ const std::string &layerType,
+ const testing::InOutData &inOutShapes,
+ std::map<std::string, std::string> *params,
+ const std::string &layerDataName = "data",
+ const InferenceEngine::Precision &precision = InferenceEngine::Precision::FP32,
+ size_t weightsSize = 0,
+ size_t biasesSize = 0,
+ const InferenceEngine::TBlob<uint8_t>::Ptr &weights = nullptr) {
+ IE_ASSERT(parser);
+ testing::XMLHelper xmlHelper(parser);
+ std::string precisionStr = precision.name();
+ auto netBuilder = testing::XmlNetBuilder<Version>::buildNetworkWithOneInput("Mock", inOutShapes.inDims[0],
+ precisionStr);
+ size_t inputsNumber = inOutShapes.inDims.size();
+ for (int i = 1; i < inputsNumber; i++) {
+ netBuilder.addInputLayer(precisionStr, inOutShapes.inDims[i]);
+ }
+ netBuilder.addLayer(layerType, precisionStr, params, inOutShapes, weightsSize, biasesSize, layerDataName);
+ std::string testContent;
+ if (inputsNumber > 1) {
+ auto edgeBuilder = netBuilder.havingEdges();
+ for (size_t i = 0; i < inputsNumber; i++) {
+ edgeBuilder.connect(i, inputsNumber);
+ }
+ testContent = edgeBuilder.finish();
+ } else {
+ testContent = netBuilder.finish();
+ }
+ xmlHelper.loadContent(testContent);
+ auto result = xmlHelper.parseWithReturningNetwork();
+ if (weights) xmlHelper.setWeights(weights);
+ return result;
+}
+
+void GenRandomDataCommon(InferenceEngine::Blob::Ptr blob);
+
+class BufferWrapper {
+ InferenceEngine::Precision precision;
+ InferenceEngine::ie_fp16 *fp16_ptr;
+ float *fp32_ptr;
+public:
+ explicit BufferWrapper(const InferenceEngine::Blob::Ptr &blob);
+
+ BufferWrapper(const InferenceEngine::Blob::Ptr &blob, InferenceEngine::Precision precision);
+
+ float operator[](size_t index);
+
+ void insert(size_t index, float value);
+};
+
+void
+CompareCommon(const InferenceEngine::Blob::Ptr &actual, const InferenceEngine::Blob::Ptr &expected, float tolerance);
+
+void fill_data_common(BufferWrapper &data, size_t size, size_t duty_ratio = 10);
diff --git a/inference-engine/tests/helpers/test_assertions.hpp b/inference-engine/tests/helpers/test_assertions.hpp
index a8ae36608..5e2ee36c0 100644
--- a/inference-engine/tests/helpers/test_assertions.hpp
+++ b/inference-engine/tests/helpers/test_assertions.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/helpers/test_model_path.hpp b/inference-engine/tests/helpers/test_model_path.hpp
index ecce409dd..73f4fc643 100644
--- a/inference-engine/tests/helpers/test_model_path.hpp
+++ b/inference-engine/tests/helpers/test_model_path.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/helpers/test_model_repo.hpp.in b/inference-engine/tests/helpers/test_model_repo.hpp.in
index 6c3f3be4b..5356f9886 100644
--- a/inference-engine/tests/helpers/test_model_repo.hpp.in
+++ b/inference-engine/tests/helpers/test_model_repo.hpp.in
@@ -1,17 +1,5 @@
-//
-// Copyright 2017-2018 Intel Corporation.
-//
-// This software and the related documents are Intel copyrighted materials,
-// and your use of them is governed by the express license under which they
-// were provided to you (End User License Agreement for the Intel(R) Software
-// Development Products (Version May 2017)). Unless the License provides
-// otherwise, you may not use, modify, copy, publish, distribute, disclose or
-// transmit this software or the related documents without Intel's prior
-// written permission.
-//
-// This software and the related documents are provided as is, with no
-// express or implied warranties, other than those that are expressly
-// stated in the License.
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
//
#pragma once
diff --git a/inference-engine/tests/helpers/test_models_path.cpp b/inference-engine/tests/helpers/test_models_path.cpp
index de5cc2b66..69d97b880 100644
--- a/inference-engine/tests/helpers/test_models_path.cpp
+++ b/inference-engine/tests/helpers/test_models_path.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/helpers/tests_common.hpp b/inference-engine/tests/helpers/tests_common.hpp
index 9c6100458..d9698ae7d 100644
--- a/inference-engine/tests/helpers/tests_common.hpp
+++ b/inference-engine/tests/helpers/tests_common.hpp
@@ -1,9 +1,9 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
+
#include <cctype>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
@@ -114,15 +114,21 @@ public:
static std::string make_so_name(const std::string & input) {
#ifdef _WIN32
#ifdef __MINGW32__
- return "lib" + input + ".dll";
+ std::string pre = "lib";
+ std::string ext = ".dll";
#else
- return input + ".dll";
+ std::string pre = "";
+ std::string ext = ".dll";
#endif
#elif __APPLE__
- return "lib" + input + ".dylib";
+ std::string pre = "lib";
+ std::string ext = ".dylib";
#else
- return "lib" + input + ".so";
+ std::string pre = "lib";
+ std::string ext = ".so";
#endif
+ return pre + input + IE_BUILD_POSTFIX + ext;
+
}
static std::string make_plugin_name(const std::string & input) {
@@ -161,7 +167,7 @@ public:
}
}
- void compare(InferenceEngine::Blob &res, InferenceEngine::Blob &ref, float max_diff = 0.01f) {
+ static void compare(InferenceEngine::Blob &res, InferenceEngine::Blob &ref, float max_diff = 0.01f) {
float *res_ptr = res.buffer().as<float*>();
size_t res_size = res.size();
@@ -176,7 +182,7 @@ public:
}
}
- void compare_NRMSD(InferenceEngine::Blob &res, InferenceEngine::Blob &ref, float max_nrmsd = 0.01f) {
+ static void compare_NRMSD(InferenceEngine::Blob &res, InferenceEngine::Blob &ref, float max_nrmsd = 0.01f) {
float *res_ptr = res.buffer().as<float*>();
size_t res_size = res.size();
@@ -195,8 +201,8 @@ public:
sqr *= sqr;
sum += sqr;
- mmin = std::min(mmin, ref_ptr[i]);
- mmax = std::max(mmax, ref_ptr[i]);
+ mmin = (std::min)(mmin, ref_ptr[i]);
+ mmax = (std::max)(mmax, ref_ptr[i]);
if (i % 10007 == 0) {
std::cout << i << ": " << res_ptr[i] << "\t" << ref_ptr[i] << "\t" << "\tdiv: " << ref_ptr[i] / res_ptr[i] << std::endl;
@@ -212,7 +218,7 @@ public:
ASSERT_LE(sum, max_nrmsd);
}
- void compare(float* res, float* ref, size_t size, float max_diff = 0.01f) {
+ static void compare(float* res, float* ref, size_t size, float max_diff = 0.01f) {
for (size_t i = 0; i < size; i++) {
ASSERT_NEAR(res[i], ref[i], max_diff);
}
diff --git a/inference-engine/tests/helpers/tests_common_func.hpp b/inference-engine/tests/helpers/tests_common_func.hpp
index 1c1c8a6ad..387d5a639 100644
--- a/inference-engine/tests/helpers/tests_common_func.hpp
+++ b/inference-engine/tests/helpers/tests_common_func.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/helpers/tests_file_utils.cpp b/inference-engine/tests/helpers/tests_file_utils.cpp
index 2225f1258..b23e72679 100644
--- a/inference-engine/tests/helpers/tests_file_utils.cpp
+++ b/inference-engine/tests/helpers/tests_file_utils.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/helpers/tests_file_utils.hpp b/inference-engine/tests/helpers/tests_file_utils.hpp
index 14c883fd9..dbfa50cc8 100644
--- a/inference-engine/tests/helpers/tests_file_utils.hpp
+++ b/inference-engine/tests/helpers/tests_file_utils.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/helpers/tests_utils.hpp b/inference-engine/tests/helpers/tests_utils.hpp
index 6d98af70e..3a44889a8 100644
--- a/inference-engine/tests/helpers/tests_utils.hpp
+++ b/inference-engine/tests/helpers/tests_utils.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/helpers/version_printer.cpp b/inference-engine/tests/helpers/version_printer.cpp
index 12f7a8839..7448c99d6 100644
--- a/inference-engine/tests/helpers/version_printer.cpp
+++ b/inference-engine/tests/helpers/version_printer.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/helpers/xml_father.hpp b/inference-engine/tests/helpers/xml_father.hpp
index 3e349ef97..90b7d732a 100644
--- a/inference-engine/tests/helpers/xml_father.hpp
+++ b/inference-engine/tests/helpers/xml_father.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -88,6 +87,10 @@ namespace testing {
return _content;
}
+ void add_content (std::string content) {
+ _content += content;
+ }
+
std::string attr () const {
return _attr;
}
diff --git a/inference-engine/tests/helpers/xml_helper.hpp b/inference-engine/tests/helpers/xml_helper.hpp
index 94ac84d74..75cc1314b 100644
--- a/inference-engine/tests/helpers/xml_helper.hpp
+++ b/inference-engine/tests/helpers/xml_helper.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -15,6 +14,7 @@
#include <fstream>
#include <stdio.h>
#include "cpp/ie_cnn_network.h"
+#include <gtest/gtest.h>
#include "ie_icnn_network_stats.hpp"
namespace testing {
@@ -45,6 +45,10 @@ namespace testing {
return parser->Parse(*_root);
}
+ void setWeights(const InferenceEngine::TBlob<uint8_t>::Ptr &weights) {
+ parser->SetWeights(weights);
+ }
+
std::string readFileContent(const std::string & filePath) {
const auto openFlags = std::ios_base::ate | std::ios_base::binary;
std::ifstream fp (getXmlPath(filePath), openFlags);
diff --git a/inference-engine/tests/helpers/xml_net_builder.cpp b/inference-engine/tests/helpers/xml_net_builder.cpp
index 892881c2e..45f967270 100644
--- a/inference-engine/tests/helpers/xml_net_builder.cpp
+++ b/inference-engine/tests/helpers/xml_net_builder.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -10,17 +9,6 @@
using namespace ::testing;
-IDManager* IDManager::_instance = nullptr;
-size_t IDManager::portID = 0;
-size_t IDManager::layerID = 0;
-
-IDManager* IDManager::getInstance() {
- if (!_instance) {
- _instance = new IDManager();
- }
- return _instance;
-}
-
size_t IDManager::getNextLayerID() {
return layerID++;
}
@@ -33,16 +21,15 @@ void IDManager::reset() {
portID = layerID = 0;
}
-LayerDesc::LayerDesc(std::string type, InOutData& shapes) : _type(std::move(type)) {
- auto idManager = IDManager::getInstance();
- _layerID = idManager->getNextLayerID();
+LayerDesc::LayerDesc(std::string type, InOutData& shapes, IDManager &id_manager) : _type(std::move(type)) {
+ _layerID = id_manager.getNextLayerID();
auto inDims = shapes.inDims;
auto outDims = shapes.outDims;
for (const auto& inDim : inDims) {
- _inPortsID.emplace_back(idManager->getNextPortID(), inDim);
+ _inPortsID.emplace_back(id_manager.getNextPortID(), inDim);
}
for (const auto& outDim : outDims) {
- _outPortsID.emplace_back(idManager->getNextPortID(), outDim);
+ _outPortsID.emplace_back(id_manager.getNextPortID(), outDim);
}
}
diff --git a/inference-engine/tests/helpers/xml_net_builder.hpp b/inference-engine/tests/helpers/xml_net_builder.hpp
index 56ede0fc2..81fa21dc5 100644
--- a/inference-engine/tests/helpers/xml_net_builder.hpp
+++ b/inference-engine/tests/helpers/xml_net_builder.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -81,37 +80,28 @@ struct TokenType<0> {
*/
class IDManager {
public:
- /**
- * @brief Returns single instanse of the class
- */
- static IDManager* getInstance();
-
- IDManager(IDManager const&) = delete;
-
+ IDManager() = default;
+// IDManager(IDManager const&) = delete;
void operator=(IDManager const&) = delete;
/**
* @brief Returns new unique number for layer to be used in IR
*/
- static size_t getNextLayerID();
+ size_t getNextLayerID();
/**
* @brief Returns new unique number for port to be used in IR
*/
- static size_t getNextPortID();
+ size_t getNextPortID();
/**
* @brief Reset numbers for layers and ports. It's convenient to always start new network from zero number.
*/
- static void reset();
+ void reset();
private:
- IDManager() = default;
-
-private:
- static size_t layerID;
- static size_t portID;
- static IDManager* _instance;
+ size_t layerID = 0;
+ size_t portID = 0;
};
/**
@@ -147,7 +137,7 @@ public:
* @param type - string with type of the layer
* @param shapes - reference to the structure with input and output shapes
*/
- explicit LayerDesc(std::string type, InOutData& shapes);
+ explicit LayerDesc(std::string type, InOutData& shapes, IDManager &id_manager);
/**
* @brief Resets current input and output ports to iterate over all input and output ports
@@ -227,15 +217,31 @@ class XmlNetBuilder {
std::vector<LayerDesc::Ptr> layersDesc;
std::shared_ptr<XMLFather> root;
testing::Token<testing::Token<XMLFather>>& xml;
+ IDManager id_manager;
XmlNetBuilder(std::shared_ptr<XMLFather> _root,
- typename testing::Token<testing::Token<XMLFather>>& _xml) : xml(_xml), root(_root) {
- IDManager::reset();
- };
+ typename testing::Token<testing::Token<XMLFather>>& _xml) : xml(_xml), root(_root) {};
public:
static XmlNetBuilder buildNetworkWithOneInput(
- std::string name = "AlexNet", std::vector<size_t> dims = {1, 3, 227, 227}, std::string precision = "Q78");
+ std::string name = "AlexNet", std::vector<size_t> dims = {1, 3, 227, 227}, std::string precision = "Q78") {
+ std::shared_ptr<XMLFather> root = std::make_shared<XMLFather>();
+ auto &exp = root->node("net").attr("name", name).attr("precision", precision).attr("version", Version);
+ if (Version == 1) {
+ auto &expFinal = exp.node("input").attr("name", "data");
+ addDims(expFinal, dims);
+ return XmlNetBuilder(root, expFinal.close().node("layers"));
+ } else {
+ auto &expFinal = exp.attr("batch", 1);
+ return XmlNetBuilder(root, expFinal.node("layers")).addInputLayer(precision, dims);
+ }
+ }
+
+ static XmlNetBuilder buildBody() {
+ auto root = std::make_shared<XMLFather>(XMLFather::make_without_schema());
+ auto &exp = root->node("body");
+ return XmlNetBuilder(root, exp.node("layers"));
+ }
XmlNetBuilder& havingLayers() {
return *this;
@@ -281,15 +287,55 @@ public:
return addLayer("Pooling", "", &params, inout, 0, 0, "pooling_data");
}
+ struct TIPortMap { int from_l, from_p, to_l, to_p, axis, stride, start, end; };
+
+ XmlNetBuilder& TILayer(InOutData inout,
+ std::string body,
+ std::vector<TIPortMap> inMap,
+ std::vector<TIPortMap> outMap,
+ std::vector<TIPortMap> backMap) {
+ auto builder = XMLFather::make_without_schema();
+ // Port map section
+ auto &ports = builder.node("port_map");
+ auto fill_port_map_info = [&] (std::string name, TIPortMap m) {
+ auto & exp = ports.node(name)
+ .attr("external_port_id", m.from_p)
+ .attr("internal_layer_id", m.to_l)
+ .attr("internal_port_id", m.to_p);
+ if (m.axis != -1)
+ exp.attr("axis", m.axis).attr("stride", m.stride).attr("start", m.start).attr("end", m.end);
+ exp.close();
+ };
+ for (auto &m : inMap) fill_port_map_info("input", m);
+ for (auto &m : outMap) fill_port_map_info("output", m);
+ ports.close();
+ // BackEdge map section
+ auto &backedges = builder.node("back_edges");
+ for (auto &m : backMap) {
+ backedges.node("edge")
+ .attr("from-layer", m.from_l)
+ .attr("from-port", m.from_p)
+ .attr("to-layer", m.to_l)
+ .attr("to-port", m.to_p).close();
+ }
+ backedges.close();
+ // Serialize all TI info
+ std::string content = builder;
+ content += body;
+
+ return addLayer("TensorIterator", "FP32", nullptr, inout, 0,0, "data", content);
+ }
+
XmlNetBuilder& addLayer(const std::string& type,
const std::string& precision,
std::map<std::string, std::string>* params,
InOutData inout,
int weightsSize = 0,
int biasesSize = 0,
- std::string layerDataName = "data") {
+ std::string layerDataName = "data",
+ std::string content = "") {
layersNum++;
- auto layerDesc = std::make_shared<LayerDesc>(type, inout);
+ auto layerDesc = std::make_shared<LayerDesc>(type, inout, id_manager);
layersDesc.push_back(layerDesc);
auto& layer = xml.node("layer").attr("name", layerDesc->getLayerName()).attr("precision", precision)
@@ -308,6 +354,8 @@ public:
layer = layer.node("biases").attr("offset", weightsSize).attr("size", biasesSize).close();
}
}
+ if (!content.empty())
+ layer.add_content(content);
layer.close();
return *this;
}
@@ -384,7 +432,7 @@ private:
template<class T>
void addEdges(T& mainContent) {
- size_t firstLayerNum = Version == 2 ? 0 : 1;
+ size_t firstLayerNum = Version >= 2 ? 0 : 1;
if (layersNum <= firstLayerNum) {
return;
}
@@ -405,33 +453,13 @@ private:
template<class T>
void addPreProcess(T& mainContent) {
auto& preProcess = mainContent.node("pre-process");
- if (Version == 2) {
+ if (Version >= 2) {
preProcess.attr("reference-layer-name", layersDesc[0]->getLayerName());
}
preProcess.close();
}
};
-template<>
-inline XmlNetBuilder<1> XmlNetBuilder<1>::buildNetworkWithOneInput(
- std::string name, std::vector<size_t> dims, std::string precision) {
- std::shared_ptr<XMLFather> root = std::make_shared<XMLFather>();
-
- auto& exp = root->node("net").attr("name", name).attr("precision", precision).attr("version", 1)
- .node("input").attr("name", "data");
- addDims(exp, dims);
- return XmlNetBuilder(root, exp.close().node("layers"));
-}
-
-template<>
-inline XmlNetBuilder<2> XmlNetBuilder<2>::buildNetworkWithOneInput(
- std::string name, std::vector<size_t> dims, std::string precision) {
- std::shared_ptr<XMLFather> root = std::make_shared<XMLFather>();
-
- auto& exp = root->node("net").attr("name", name).attr("precision", precision).attr("version", 2).attr("batch", 1);
- return XmlNetBuilder(root, exp.node("layers")).addInputLayer(precision, dims);
-}
-
typedef XmlNetBuilder<1> V1NetBuilder;
typedef XmlNetBuilder<2> V2NetBuilder;
diff --git a/inference-engine/tests/mock_engine/CMakeLists.txt b/inference-engine/tests/mock_engine/CMakeLists.txt
index f991c70e4..dc1edfb5e 100644
--- a/inference-engine/tests/mock_engine/CMakeLists.txt
+++ b/inference-engine/tests/mock_engine/CMakeLists.txt
@@ -1,6 +1,7 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
+
set (TARGET_NAME "mock_engine")
file (GLOB LIBRARY_SRC
@@ -15,9 +16,8 @@ file (GLOB LIBRARY_HEADERS
if(UNIX)
list(REMOVE_ITEM LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/dllmain.cpp)
-else()
- add_definitions(-DIMPLEMENT_INFERENCE_ENGINE_API)
endif()
+add_definitions(-DIMPLEMENT_INFERENCE_ENGINE_API)
# Create named folders for the sources within the .vcproj
# Empty name lists them directly under the .vcproj
@@ -38,4 +38,4 @@ add_library(${TARGET_NAME} SHARED
set_property(TARGET ${TARGET_NAME} PROPERTY CXX_STANDARD 11)
set_property(TARGET ${TARGET_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
-set_property(TARGET ${TARGET_NAME} PROPERTY COMPILE_PDB_NAME ${TARGET_NAME}) \ No newline at end of file
+set_property(TARGET ${TARGET_NAME} PROPERTY COMPILE_PDB_NAME ${TARGET_NAME})
diff --git a/inference-engine/tests/mock_engine/dllmain.cpp b/inference-engine/tests/mock_engine/dllmain.cpp
index 5d750aed5..a9dd58a52 100644
--- a/inference-engine/tests/mock_engine/dllmain.cpp
+++ b/inference-engine/tests/mock_engine/dllmain.cpp
@@ -1,8 +1,7 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
-
+// dllmain.cpp : Defines the entry point for the DLL application.
#ifdef _WIN32
#define _WINSOCKAPI_
#include <windows.h>
diff --git a/inference-engine/tests/mock_engine/mock_plugin.cpp b/inference-engine/tests/mock_engine/mock_plugin.cpp
index e7b95ab64..0d344c8ba 100644
--- a/inference-engine/tests/mock_engine/mock_plugin.cpp
+++ b/inference-engine/tests/mock_engine/mock_plugin.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/mock_engine/mock_plugin.hpp b/inference-engine/tests/mock_engine/mock_plugin.hpp
index b67666fc5..970638134 100644
--- a/inference-engine/tests/mock_engine/mock_plugin.hpp
+++ b/inference-engine/tests/mock_engine/mock_plugin.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/mock_engine/stub_inference_engine.xpp b/inference-engine/tests/mock_engine/stub_inference_engine.xpp
index 9545650ba..fa2d9de1f 100644
--- a/inference-engine/tests/mock_engine/stub_inference_engine.xpp
+++ b/inference-engine/tests/mock_engine/stub_inference_engine.xpp
@@ -1,6 +1,7 @@
// Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
+
#include <random>
#include <algorithm>
diff --git a/inference-engine/tests/unit/CMakeLists.txt b/inference-engine/tests/unit/CMakeLists.txt
index 0c6b15f1f..4761e8336 100644
--- a/inference-engine/tests/unit/CMakeLists.txt
+++ b/inference-engine/tests/unit/CMakeLists.txt
@@ -1,6 +1,7 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
+
cmake_minimum_required(VERSION 2.8)
cmake_policy(SET CMP0054 NEW)
@@ -39,15 +40,24 @@ file(GLOB
stress_tests/*.cpp
)
-if (ENABLE_MKL_DNN)
- if (THREADING STREQUAL "OMP")
- find_package(OpenMP)
- if (OPENMP_FOUND)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
- endif ()
- endif ()
+if (ENABLE_GNA)
+ file(GLOB
+ GNA_TESTS
+ engines/gna/*cpp
+ )
+ list(APPEND TEST_SRC ${GNA_TESTS})
+ source_group("gna" FILES ${GNA_TESTS})
+
+ find_package(libGNA)
+ include_directories(${libGNA_INCLUDE_DIRS})
+ set (GNA_TEST_ENGINE GNAPlugin_test_static)
+endif()
+
+if (ENABLE_MKL_DNN)
+ if (GEMM STREQUAL "MKL")
+ add_definitions(-DUSE_MKL)
+ endif ()
file(GLOB
MKLDNN_TESTS
engines/mkldnn/*.cpp
@@ -87,10 +97,14 @@ include_directories(
${IE_MAIN_SOURCE_DIR}/include
${IE_MAIN_SOURCE_DIR}/src/inference_engine
${IE_MAIN_SOURCE_DIR}/src/mkldnn_plugin
+ ${IE_MAIN_SOURCE_DIR}/src/gna_plugin
${IE_MAIN_SOURCE_DIR}/src/extension
+ ${IE_MAIN_SOURCE_DIR}/src/extension/common
${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/gflags/include
mocks)
add_executable(${TARGET_NAME} ${TEST_SRC} ${TEST_INCLUDE} ${MKLDNN_TESTS} ${MKLDNN_TESTS_INCLUDE} ${DLAI_TESTS})
+set_ie_threading_interface_for(${TARGET_NAME})
+
set_target_properties(${TARGET_NAME} PROPERTIES "CMAKE_CXX_FLAGS" "${CMAKE_CXX_FLAGS} -fPIE"
COMPILE_PDB_NAME ${TARGET_NAME})
@@ -111,11 +125,9 @@ else ()
set(PUGI pugixml)
endif ()
-add_definitions(-DMODELS_PATH="${MODELS_PATH}" -DDATA_PATH="${IE_MAIN_SOURCE_DIR}/tests/data")
+add_definitions(-DMODELS_PATH=\"${MODELS_PATH}\" -DDATA_PATH=\"${IE_MAIN_SOURCE_DIR}/tests/data\")
-target_compile_definitions(${TARGET_NAME} PUBLIC -DUSE_STATIC_IE)
-
-target_link_libraries(${TARGET_NAME}
+target_link_libraries(${TARGET_NAME} PRIVATE
gtest
gmock
gtest_main
@@ -128,10 +140,13 @@ target_link_libraries(${TARGET_NAME}
${INTEL_ITT_LIBS}
${Boost_REGEX_LIBRARY}
${TBB_LIBRARY}
- ${TBBMALLOC_LIBRARY})
+ ${TBBMALLOC_LIBRARY}
+ ${GNA_TEST_ENGINE})
+
+add_dependencies(${TARGET_NAME} ie_cpu_extension)
if (ENABLE_MKL_DNN)
- target_link_libraries(${TARGET_NAME}
+ target_link_libraries(${TARGET_NAME} PRIVATE
test_MKLDNNPlugin
mkldnn)
endif ()
@@ -140,3 +155,6 @@ add_test(NAME ${TARGET_NAME}
COMMAND ${TARGET_NAME})
add_dependencies(${TARGET_NAME} mock_engine)
+
+# GAPI unit tests
+add_subdirectory(opencv_test_gapi)
diff --git a/inference-engine/tests/unit/builders/batch_normalization_layer_test.cpp b/inference-engine/tests/unit/builders/batch_normalization_layer_test.cpp
new file mode 100644
index 000000000..5d55c17b0
--- /dev/null
+++ b/inference-engine/tests/unit/builders/batch_normalization_layer_test.cpp
@@ -0,0 +1,36 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <string.h>
+#include <ie_builders.hpp>
+#include <builders/ie_batch_normalization_layer.hpp>
+
+#include "builder_test.hpp"
+
+using namespace testing;
+using namespace InferenceEngine;
+
+class BatchNormalizationLayerBuilderTest : public BuilderTestCommon {};
+
+TEST_F(BatchNormalizationLayerBuilderTest, cannotCreateBatchNormalizationWithoutWeightOrBiases) {
+ ASSERT_THROW(((Builder::Layer)Builder::BatchNormalizationLayer("in1")), InferenceEngine::details::InferenceEngineException);
+ ASSERT_THROW(((Builder::Layer)Builder::BatchNormalizationLayer("in1")
+ .setWeights(generateBlob(Precision::FP32, {3}, Layout::C))), InferenceEngine::details::InferenceEngineException);
+ ASSERT_THROW(((Builder::Layer)Builder::BatchNormalizationLayer("in1")
+ .setBiases(generateBlob(Precision::FP32, {3}, Layout::C))), InferenceEngine::details::InferenceEngineException);
+}
+
+TEST_F(BatchNormalizationLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
+ Builder::Network network("Test");
+ Builder::BatchNormalizationLayer bnBuilder("bn");
+ bnBuilder.setWeights(generateBlob(Precision::FP32, {3}, Layout::C));
+ bnBuilder.setBiases(generateBlob(Precision::FP32, {3}, Layout::C));
+ size_t bnId = network.addLayer(bnBuilder);
+ Builder::BatchNormalizationLayer bnBuilderFromNetwork(network.getLayer(bnId));
+ ASSERT_EQ(bnBuilderFromNetwork.getEpsilon(), bnBuilder.getEpsilon());
+ bnBuilderFromNetwork.setEpsilon(2);
+ ASSERT_NE(bnBuilderFromNetwork.getEpsilon(), bnBuilder.getEpsilon());
+ ASSERT_EQ(bnBuilderFromNetwork.getEpsilon(), network.getLayer(bnId).getParameters()["epsilon"].asFloat());
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/builders/builder_test.hpp b/inference-engine/tests/unit/builders/builder_test.hpp
new file mode 100644
index 000000000..28ef342c8
--- /dev/null
+++ b/inference-engine/tests/unit/builders/builder_test.hpp
@@ -0,0 +1,33 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <string.h>
+#include <ie_builders.hpp>
+#include <blob_factory.hpp>
+
+#include "tests_common.hpp"
+
+
+class BuilderTestCommon : public TestsCommon {
+public:
+ InferenceEngine::Blob::Ptr generateBlob(InferenceEngine::Precision precision,
+ InferenceEngine::SizeVector dims, InferenceEngine::Layout layout) {
+ InferenceEngine::Blob::Ptr blob = make_blob_with_precision(InferenceEngine::TensorDesc(precision, dims, layout));
+ blob->allocate();
+ fill_data(blob);
+ return blob;
+ }
+
+ template<class T>
+ InferenceEngine::Blob::Ptr generateBlob(InferenceEngine::Precision precision,
+ InferenceEngine::SizeVector dims, InferenceEngine::Layout layout,
+ std::vector<T> data) {
+ auto blob = generateBlob(precision, dims, layout);
+ auto *blbData = blob->buffer().as<T *>();
+ for (size_t i = 0; i < data.size(); i++) {
+ blbData[i] = data[i];
+ }
+ return blob;
+ }
+}; \ No newline at end of file
diff --git a/inference-engine/tests/unit/builders/input_layer_test.cpp b/inference-engine/tests/unit/builders/input_layer_test.cpp
new file mode 100644
index 000000000..6a30fdb64
--- /dev/null
+++ b/inference-engine/tests/unit/builders/input_layer_test.cpp
@@ -0,0 +1,32 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <string.h>
+#include <ie_builders.hpp>
+
+#include "builder_test.hpp"
+
+using namespace testing;
+using namespace InferenceEngine;
+
+class InputLayerBuilderTest : public BuilderTestCommon {};
+
+TEST_F(InputLayerBuilderTest, cannotCreateInputWithoutPort) {
+ ASSERT_THROW(((Builder::Layer)Builder::InputLayer("in1")).build(), InferenceEngine::details::InferenceEngineException);
+}
+
+TEST_F(InputLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
+ Builder::Network network("Test");
+ Builder::InputLayer inBuilder("in1");
+ inBuilder.setPort(Port({1, 3, 3, 3}));
+ size_t inId = network.addLayer(inBuilder);
+ ASSERT_EQ(inBuilder.getPort().shape(), Port({1, 3, 3, 3}).shape());
+ Builder::InputLayer inBuilderFromNetwork(network.getLayer(inId));
+ ASSERT_EQ(inBuilderFromNetwork.getPort().shape(), Port({1, 3, 3, 3}).shape());
+ inBuilderFromNetwork.setPort(Port({1, 3, 4, 4}));
+ ASSERT_EQ(inBuilderFromNetwork.getPort().shape(), Port({1, 3, 4, 4}).shape());
+ ASSERT_EQ(network.getLayer(inId).getOutputPorts()[0].shape(), Port({1, 3, 4, 4}).shape());
+ ASSERT_EQ(inBuilder.getPort().shape(), Port({1, 3, 3, 3}).shape());
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/builders/network_builder_test.cpp b/inference-engine/tests/unit/builders/network_builder_test.cpp
new file mode 100644
index 000000000..3b53f1271
--- /dev/null
+++ b/inference-engine/tests/unit/builders/network_builder_test.cpp
@@ -0,0 +1,927 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <string.h>
+#include <ie_builders.hpp>
+
+
+#include "builder_test.hpp"
+
+using namespace testing;
+using namespace InferenceEngine;
+
+class NetworkBuilderTest : public BuilderTestCommon {
+protected:
+ std::vector<std::string> alexNetNames = {
+ "in1",
+ "mean",
+ "conv1",
+ "relu1",
+ "norm1",
+ "pool1",
+ "conv2",
+ "relu2",
+ "norm2",
+ "pool2",
+ "conv3",
+ "relu3",
+ "conv4",
+ "relu4",
+ "conv5",
+ "relu5",
+ "pool5",
+ "fc6",
+ "relu6",
+ "fc7",
+ "relu7",
+ "fc8",
+ "prob",
+ "sf_out"
+ };
+
+public:
+
+ Builder::Network prepateAlexnetBuilder() {
+ Context ctx;
+ Builder::Network builder(ctx, "AlexNet");
+ idx_t layerId = builder.addLayer(Builder::InputLayer(alexNetNames[0]).setPort(Port({1,3, 227, 227})));
+ layerId = builder.addLayer({{layerId}}, Builder::ScaleShiftLayer(alexNetNames[1]).setBiases(generateBlob(Precision::FP32, {3}, Layout::C)));
+ layerId = builder.addLayer({{layerId}}, Builder::ConvolutionLayer(alexNetNames[2]).setKernel({11, 11}).setStrides({4, 4}).setOutDepth(96)
+ .setWeights(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW))
+ .setBiases(generateBlob(Precision::FP32, {96}, Layout::C)));
+ layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[3]));
+ layerId = builder.addLayer({{layerId}}, Builder::NormLayer(alexNetNames[4]).setAlpha(9.999999747378752e-05f).setBeta(0.75f).setSize(5).setAcrossMaps(true));
+ layerId = builder.addLayer({{layerId}}, Builder::PoolingLayer(alexNetNames[5]).setExcludePad(false).setKernel({3, 3}).setPaddingsBegin({0, 0})
+ .setPaddingsEnd({0, 0}).setPoolingType(Builder::PoolingLayer::PoolingType::MAX).setStrides({2, 2}));
+ layerId = builder.addLayer({{layerId}}, Builder::ConvolutionLayer(alexNetNames[6]).setKernel({5, 5}).setStrides({1, 1}).setOutDepth(256)
+ .setPaddingsBegin({2, 2}).setPaddingsEnd({2, 2}).setGroup(2).setDilation({1, 1})
+ .setWeights(generateBlob(Precision::FP32, {96, 256, 5, 5}, Layout::OIHW))
+ .setBiases(generateBlob(Precision::FP32, {256}, Layout::C)));
+ layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[7]));
+ layerId = builder.addLayer({{layerId}}, Builder::NormLayer(alexNetNames[8]).setAlpha(9.999999747378752e-05f).setBeta(0.75f).setSize(5).setAcrossMaps(true));
+ layerId = builder.addLayer({{layerId}}, Builder::PoolingLayer(alexNetNames[9]).setExcludePad(false).setKernel({3, 3}).setPaddingsBegin({0, 0})
+ .setPaddingsEnd({0, 0}).setPoolingType(Builder::PoolingLayer::PoolingType::MAX).setStrides({2, 2}));
+ layerId = builder.addLayer({{layerId}}, Builder::ConvolutionLayer(alexNetNames[10]).setKernel({3, 3}).setStrides({1, 1}).setOutDepth(384)
+ .setPaddingsBegin({1, 1}).setPaddingsEnd({1, 1}).setGroup(1).setDilation({1, 1})
+ .setWeights(generateBlob(Precision::FP32, {256, 384, 3, 3}, Layout::OIHW))
+ .setBiases(generateBlob(Precision::FP32, {384}, Layout::C)));
+ layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[11]));
+ layerId = builder.addLayer({{layerId}}, Builder::ConvolutionLayer(alexNetNames[12]).setKernel({3, 3}).setStrides({1, 1}).setOutDepth(384)
+ .setPaddingsBegin({1, 1}).setPaddingsEnd({1, 1}).setGroup(2).setDilation({1, 1})
+ .setWeights(generateBlob(Precision::FP32, {384, 384, 3, 3}, Layout::OIHW))
+ .setBiases(generateBlob(Precision::FP32, {384}, Layout::C)));
+ layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[13]));
+ layerId = builder.addLayer({{layerId}}, Builder::ConvolutionLayer(alexNetNames[14]).setKernel({3, 3}).setStrides({1, 1}).setOutDepth(256)
+ .setPaddingsBegin({1, 1}).setPaddingsEnd({1, 1}).setGroup(2).setDilation({1, 1})
+ .setWeights(generateBlob(Precision::FP32, {256, 384, 3, 3}, Layout::OIHW))
+ .setBiases(generateBlob(Precision::FP32, {384}, Layout::C)));
+ layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[15]));
+ layerId = builder.addLayer({{layerId}}, Builder::PoolingLayer(alexNetNames[16]).setExcludePad(false).setKernel({3, 3}).setPaddingsBegin({0, 0})
+ .setPaddingsEnd({0, 0}).setPoolingType(Builder::PoolingLayer::PoolingType::MAX).setStrides({2, 2}));
+ layerId = builder.addLayer({{layerId}}, Builder::FullyConnectedLayer(alexNetNames[17]).setOutputNum(4096)
+ .setWeights(generateBlob(Precision::FP32, {4096, 256, 6, 6}, Layout::OIHW))
+ .setBiases(generateBlob(Precision::FP32, {4096}, Layout::C)));
+ layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[18]));
+ layerId = builder.addLayer({{layerId}}, Builder::FullyConnectedLayer(alexNetNames[19]).setOutputNum(4096)
+ .setWeights(generateBlob(Precision::FP32, {4096, 4096}, Layout::NC))
+ .setBiases(generateBlob(Precision::FP32, {4096}, Layout::C)));
+ layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[20]));
+ layerId = builder.addLayer({{layerId}}, Builder::FullyConnectedLayer(alexNetNames[21]).setOutputNum(1000)
+ .setWeights(generateBlob(Precision::FP32, {1000, 4096}, Layout::NC))
+ .setBiases(generateBlob(Precision::FP32, {1000}, Layout::C)));
+ layerId = builder.addLayer({{layerId}}, Builder::SoftMaxLayer(alexNetNames[22]).setAxis(1));
+
+ idx_t outputId = builder.addLayer({PortInfo(layerId)}, Builder::OutputLayer(alexNetNames[23]));
+ return builder;
+ }
+
+ const INetwork::Ptr createAlexnet() {
+ return prepateAlexnetBuilder().build();
+ }
+
+ void compareWithICNNNetwork(const INetwork& network, const ICNNNetwork& cnnNetwork) {
+ for (const auto& layer : network) {
+ auto connections = network.getLayerConnections(layer->getId());
+ CNNLayerPtr cnnLayer;
+ StatusCode sts = cnnNetwork.getLayerByName(layer->getName().c_str(), cnnLayer, nullptr);
+ if (sts != OK && layer->getType() == "Output")
+ continue;
+ else if (sts != OK)
+ THROW_IE_EXCEPTION << "Cannot find CNNLayer by name: " << layer->getName();
+
+
+ // Output connections
+ for (size_t i = 0; i < cnnLayer->outData.size(); i++) {
+ for (const auto& it : cnnLayer->outData[i]->inputTo) {
+ size_t j = 0;
+ for (; j < it.second->insData.size(); j++) {
+ auto lockedData = it.second->insData[j].lock();
+ if (lockedData && lockedData.get() == cnnLayer->outData[i].get()) {
+ break;
+ }
+ }
+
+ for (auto conIt = connections.begin(); conIt != connections.end(); conIt++) {
+ if (conIt->from().layerId() == layer->getId() && conIt->from().portId() == i &&
+ network.getLayer(conIt->to().layerId())->getName() == it.second->name &&
+ conIt->to().portId() == j) {
+ connections.erase(conIt);
+ break;
+ }
+ }
+ }
+ }
+
+ // Input connections
+ for (size_t i = 0; i < cnnLayer->insData.size(); i++) {
+ auto inData = cnnLayer->insData[i].lock();
+ if (!inData)
+ continue;
+ auto creatorLayer = inData->creatorLayer.lock();
+ if (!creatorLayer)
+ continue;
+ size_t j = 0;
+ for (; j < creatorLayer->outData.size(); j++) {
+ if (creatorLayer->outData[j] && creatorLayer->outData[j].get() == inData.get()) {
+ break;
+ }
+ }
+
+ for (auto conIt = connections.begin(); conIt != connections.end(); conIt++) {
+ if (conIt->to().layerId() == layer->getId() && conIt->from().portId() == j &&
+ network.getLayer(conIt->from().layerId())->getName() == creatorLayer->name &&
+ conIt->to().portId() == i) {
+ connections.erase(conIt);
+ break;
+ }
+ }
+ }
+
+ if (connections.size() == 1 && network.getLayer(connections[0].to().layerId())->getType() == "Output")
+ connections.erase(connections.begin());
+
+ if (!connections.empty())
+ THROW_IE_EXCEPTION << "Not all connections were connected.";
+ }
+ }
+
+ void compareICNNNetworks(const ICNNNetwork& newNetwork, const ICNNNetwork& oldNetwork) {
+ CNNNetwork network((ICNNNetwork*)&newNetwork);
+
+ if (newNetwork.layerCount() != oldNetwork.layerCount())
+ THROW_IE_EXCEPTION << "ICNNNetworks have different numbers of layers!";
+ for (const auto& layer : network) {
+ CNNLayerPtr oldLayer;
+ StatusCode sts = oldNetwork.getLayerByName(layer->name.c_str(), oldLayer, nullptr);
+ bool success = sts == OK && layer->name == oldLayer->name &&
+ layer->type == oldLayer->type &&
+ layer->insData.size() == oldLayer->insData.size() &&
+ layer->outData.size() == oldLayer->outData.size() &&
+ layer->precision == oldLayer->precision;
+
+ for (size_t i = 0; i < layer->insData.size() && success; i++) {
+ auto lockedOldData = oldLayer->insData[i].lock();
+ auto lockedData = layer->insData[i].lock();
+ success = success && lockedOldData->name == lockedData->name &&
+ lockedOldData->getTensorDesc() == lockedData->getTensorDesc();
+ }
+ for (size_t i = 0; i < layer->outData.size() && success; i++) {
+ success = success && oldLayer->outData[i]->name == layer->outData[i]->name &&
+ oldLayer->outData[i]->getTensorDesc() == layer->outData[i]->getTensorDesc();
+ }
+
+ if (!success)
+ THROW_IE_EXCEPTION << "ICNNNetworks have different layers!";
+ }
+
+ InputsDataMap newInput;
+ OutputsDataMap newOutput;
+ newNetwork.getInputsInfo(newInput);
+ newNetwork.getOutputsInfo(newOutput);
+ InputsDataMap oldInput;
+ OutputsDataMap oldOutput;
+ oldNetwork.getInputsInfo(oldInput);
+ oldNetwork.getOutputsInfo(oldOutput);
+
+ bool success = newInput.size() == oldInput.size();
+ for (const auto& it : newInput) {
+ if (!success)
+ break;
+ success = success && oldInput.find(it.first) != oldInput.end();
+ }
+ if (!success)
+ THROW_IE_EXCEPTION << "ICNNNetworks have different inputs!";
+
+ success = newOutput.size() == oldOutput.size();
+ for (const auto& it : newOutput) {
+ if (!success)
+ break;
+ success = success && oldOutput.find(it.first) != oldOutput.end();
+ }
+ if (!success)
+ THROW_IE_EXCEPTION << "ICNNNetworks have different outputs!";
+ }
+};
+
+TEST_F(NetworkBuilderTest, checkReshapeAlexNet) {
+ std::map<std::string, std::vector<SizeVector>> inPorts = {
+ {alexNetNames[0], {}},
+ {alexNetNames[1], {{1, 3, 227, 227}}},
+ {alexNetNames[2], {{1, 3, 227, 227}}},
+ {alexNetNames[3], {{1, 96, 55, 55}}},
+ {alexNetNames[4], {{1, 96, 55, 55}}},
+ {alexNetNames[5], {{1, 96, 55, 55}}},
+ {alexNetNames[6], {{1, 96, 27, 27}}},
+ {alexNetNames[7], {{1, 256, 27, 27}}},
+ {alexNetNames[8], {{1, 256, 27, 27}}},
+ {alexNetNames[9], {{1, 256, 27, 27}}},
+ {alexNetNames[10], {{1, 256, 13, 13}}},
+ {alexNetNames[11], {{1, 384, 13, 13}}},
+ {alexNetNames[12], {{1, 384, 13, 13}}},
+ {alexNetNames[13], {{1, 384, 13, 13}}},
+ {alexNetNames[14], {{1, 384, 13, 13}}},
+ {alexNetNames[15], {{1, 256, 13, 13}}},
+ {alexNetNames[16], {{1, 256, 13, 13}}},
+ {alexNetNames[17], {{1, 256, 6, 6}}},
+ {alexNetNames[18], {{1, 4096}}},
+ {alexNetNames[19], {{1, 4096}}},
+ {alexNetNames[20], {{1, 4096}}},
+ {alexNetNames[21], {{1, 4096}}},
+ {alexNetNames[22], {{1, 1000}}},
+ {alexNetNames[23], {{1, 1000}}}
+ };
+
+ std::map<std::string, std::vector<SizeVector>> outPorts = {
+ {alexNetNames[0], {{1, 3, 227, 227}}},
+ {alexNetNames[1], {{1, 3, 227, 227}}},
+ {alexNetNames[2], {{1, 96, 55, 55}}},
+ {alexNetNames[3], {{1, 96, 55, 55}}},
+ {alexNetNames[4], {{1, 96, 55, 55}}},
+ {alexNetNames[5], {{1, 96, 27, 27}}},
+ {alexNetNames[6], {{1, 256, 27, 27}}},
+ {alexNetNames[7], {{1, 256, 27, 27}}},
+ {alexNetNames[8], {{1, 256, 27, 27}}},
+ {alexNetNames[9], {{1, 256, 13, 13}}},
+ {alexNetNames[10], {{1, 384, 13, 13}}},
+ {alexNetNames[11], {{1, 384, 13, 13}}},
+ {alexNetNames[12], {{1, 384, 13, 13}}},
+ {alexNetNames[13], {{1, 384, 13, 13}}},
+ {alexNetNames[14], {{1, 256, 13, 13}}},
+ {alexNetNames[15], {{1, 256, 13, 13}}},
+ {alexNetNames[16], {{1, 256, 6, 6}}},
+ {alexNetNames[17], {{1, 4096}}},
+ {alexNetNames[18], {{1, 4096}}},
+ {alexNetNames[19], {{1, 4096}}},
+ {alexNetNames[20], {{1, 4096}}},
+ {alexNetNames[21], {{1, 1000}}},
+ {alexNetNames[22], {{1, 1000}}},
+ {alexNetNames[23], {}}
+ };
+
+ Builder::Network builder = prepateAlexnetBuilder();
+ for (const auto &layer : builder.getLayers()) {
+ if (layer.getType() == "Input") {
+ ASSERT_EQ(outPorts[layer.getName()][0], layer.getOutputPorts()[0].shape());
+ } else {
+ for (size_t j = 0; j < layer.getOutputPorts().size(); j++) {
+ ASSERT_TRUE(layer.getOutputPorts()[j].shape().empty());
+ }
+ }
+ }
+ INetwork::Ptr graph;
+ ASSERT_NO_THROW(graph = builder.build());
+ for (const auto &layer : *graph) {
+ for (size_t i = 0; i < layer->getInputPorts().size(); i++) {
+ ASSERT_EQ(inPorts[layer->getName()][i], layer->getInputPorts()[i].shape());
+ }
+ for (size_t i = 0; i < layer->getOutputPorts().size(); i++) {
+ ASSERT_EQ(outPorts[layer->getName()][i], layer->getOutputPorts()[i].shape());
+ }
+ }
+}
+
+TEST_F(NetworkBuilderTest, checkNoImplWithCorrectPorts) {
+ Context ctx;
+ Builder::Network builder(ctx, "TestAlexNet");
+ idx_t inId = builder.addLayer(Builder::InputLayer(alexNetNames[0]).setPort(Port({1,3, 227, 227})));
+ idx_t convId = builder.addLayer({{inId}}, Builder::ConvolutionLayer(alexNetNames[2]).setKernel({11, 11}).setStrides({4, 4}).setOutDepth(96)
+ .setInputPort(Port({1,3, 227, 227})).setOutputPort(Port({1, 96, 55, 55}))
+ .setWeights(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW))
+ .setBiases(generateBlob(Precision::FP32, {96}, Layout::C)));
+ idx_t testLayerId = builder.addLayer({PortInfo(convId)}, Builder::Layer("TestLayer", "testPort")
+ .setInputPorts({Port({1, 96, 55, 55})}).setOutputPorts({Port({1, 96, 55, 55})}));
+ idx_t outputId = builder.addLayer({PortInfo(testLayerId)}, Builder::OutputLayer("out").setPort({Port({1, 96, 55, 55})}));
+
+ ASSERT_NO_THROW(builder.build());
+}
+
+TEST_F(NetworkBuilderTest, checkNoImplWithIncorrectPorts) {
+ Context ctx;
+ Builder::Network builder(ctx, "TestAlexNet");
+ idx_t inId = builder.addLayer(Builder::InputLayer(alexNetNames[0]).setPort(Port({1,3, 227, 227})));
+ idx_t convId = builder.addLayer({{inId}}, Builder::ConvolutionLayer(alexNetNames[2]).setKernel({11, 11}).setStrides({4, 4}).setOutDepth(96)
+ .setInputPort(Port({1,3, 227, 227})).setOutputPort(Port({1, 96, 55, 55}))
+ .setWeights(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW))
+ .setBiases(generateBlob(Precision::FP32, {96}, Layout::C)));
+ idx_t testLayerId = builder.addLayer({PortInfo(convId)}, Builder::Layer("TestLayer", "testPort")
+ .setInputPorts({Port({1, 3, 55, 55})}).setOutputPorts({Port({1, 96, 55, 55})}));
+
+ ASSERT_THROW(builder.build(), InferenceEngine::details::InferenceEngineException);
+}
+
+TEST_F(NetworkBuilderTest, createNetworkIterator) {
+ const INetwork::Ptr graph = createAlexnet();
+
+ ASSERT_NO_THROW(graph->begin());
+}
+
+TEST_F(NetworkBuilderTest, checkNetworkSize) {
+ const INetwork::Ptr graph = createAlexnet();
+
+ ASSERT_EQ(24, graph->size());
+}
+
+TEST_F(NetworkBuilderTest, iterateNetworkForeach) {
+ const INetwork::Ptr graph = createAlexnet();
+
+ size_t idx = 0;
+ for (const auto& layer : *graph) {
+ ASSERT_NE(idx, alexNetNames.size());
+ ASSERT_EQ(alexNetNames[idx], layer->getName());
+ idx++;
+ }
+}
+
+TEST_F(NetworkBuilderTest, iterateNetworkFor) {
+ const INetwork::Ptr graph = createAlexnet();
+
+ size_t idx = 0;
+ for (auto it = graph->begin(); it != graph->end(); it++) {
+ ASSERT_EQ(alexNetNames[idx], (*it)->getName());
+ idx++;
+ }
+}
+
+TEST_F(NetworkBuilderTest, convertFromICNNNetwork) {
+ std::string model = R"V0G0N(
+<net name="PVANET" version="2" batch="1">
+ <layers>
+ <layer name="data" type="Input" precision="FP32" id="0">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>3</dim>
+ <dim>544</dim>
+ <dim>992</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="conv1_1_conv" type="Convolution" precision="FP32" id="2">
+ <convolution_data stride-x="2" stride-y="2" pad-x="3" pad-y="3" kernel-x="7" kernel-y="7" output="16" group="1"/>
+ <input>
+ <port id="2">
+ <dim>1</dim>
+ <dim>3</dim>
+ <dim>544</dim>
+ <dim>992</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ <weights offset="0" size="9408"/>
+ <biases offset="9408" size="64"/>
+ </layer>
+ <layer name="conv1_1_neg" type="Power" precision="FP32" id="3">
+ <power_data power="1" scale="-1" shift="0"/>
+ <input>
+ <port id="4">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="5">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="conv1_1_concat" type="Concat" precision="FP32" id="4">
+ <concat_data axis="1"/>
+ <input>
+ <port id="6">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ <port id="7">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="8">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="conv1_1_scale" type="ScaleShift" precision="FP32" id="5">
+ <input>
+ <port id="9">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="10">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ <weights offset="9472" size="128"/>
+ <biases offset="9600" size="128"/>
+ </layer>
+ <layer name="conv1_1_relu" type="ReLU" precision="FP32" id="6">
+ <data negative_slope="0" engine="caffe.ReLUParameter.DEFAULT"/>
+ <input>
+ <port id="11">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="12">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="pool1" type="Pooling" precision="FP32" id="7">
+ <pooling_data kernel-x="3" kernel-y="3" pad-x="0" pad-y="0" stride-x="2" stride-y="2" rounding-type="ceil" pool-method="max"/>
+ <input>
+ <port id="13">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="14">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>136</dim>
+ <dim>248</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+ <edge from-layer="2" from-port="3" to-layer="3" to-port="4"/>
+ <edge from-layer="2" from-port="3" to-layer="4" to-port="6"/>
+ <edge from-layer="3" from-port="5" to-layer="4" to-port="7"/>
+ <edge from-layer="4" from-port="8" to-layer="5" to-port="9"/>
+ <edge from-layer="5" from-port="10" to-layer="6" to-port="11"/>
+ <edge from-layer="6" from-port="12" to-layer="7" to-port="13"/>
+ </edges>
+</net>)V0G0N";
+
+ InferenceEngine::CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+
+ InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {9728});
+ weights->allocate();
+ fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
+ InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+
+ net_reader.SetWeights(weights_ptr);
+ INetwork::Ptr network = Builder::Network(net_reader.getNetwork()).build();
+
+ try {
+ compareWithICNNNetwork(*network, net_reader.getNetwork());
+ } catch (InferenceEngine::details::InferenceEngineException &ex) {
+ FAIL() << ex.what();
+ }
+}
+
+TEST_F(NetworkBuilderTest, convertFromICNNNetworkToICNNNetwork) {
+ std::string model = R"V0G0N(
+<net name="PVANET" version="2" batch="1">
+ <layers>
+ <layer name="data" type="Input" precision="FP32" id="0">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>3</dim>
+ <dim>544</dim>
+ <dim>992</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="conv1_1_conv" type="Convolution" precision="FP32" id="2">
+ <convolution_data stride-x="2" stride-y="2" pad-x="3" pad-y="3" kernel-x="7" kernel-y="7" output="16" group="1"/>
+ <input>
+ <port id="2">
+ <dim>1</dim>
+ <dim>3</dim>
+ <dim>544</dim>
+ <dim>992</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ <weights offset="0" size="9408"/>
+ <biases offset="9408" size="64"/>
+ </layer>
+ <layer name="conv1_1_neg" type="Power" precision="FP32" id="3">
+ <power_data power="1" scale="-1" shift="0"/>
+ <input>
+ <port id="4">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="5">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="conv1_1_concat" type="Concat" precision="FP32" id="4">
+ <concat_data axis="1"/>
+ <input>
+ <port id="6">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ <port id="7">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="8">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="conv1_1_scale" type="ScaleShift" precision="FP32" id="5">
+ <input>
+ <port id="9">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="10">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ <weights offset="9472" size="128"/>
+ <biases offset="9600" size="128"/>
+ </layer>
+ <layer name="conv1_1_relu" type="ReLU" precision="FP32" id="6">
+ <data negative_slope="0" engine="caffe.ReLUParameter.DEFAULT"/>
+ <input>
+ <port id="11">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="12">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="pool1" type="Pooling" precision="FP32" id="7">
+ <pooling_data kernel-x="3" kernel-y="3" pad-x="0" pad-y="0" stride-x="2" stride-y="2" rounding-type="ceil" pool-method="max"/>
+ <input>
+ <port id="13">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="14">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>136</dim>
+ <dim>248</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+ <edge from-layer="2" from-port="3" to-layer="3" to-port="4"/>
+ <edge from-layer="2" from-port="3" to-layer="4" to-port="6"/>
+ <edge from-layer="3" from-port="5" to-layer="4" to-port="7"/>
+ <edge from-layer="4" from-port="8" to-layer="5" to-port="9"/>
+ <edge from-layer="5" from-port="10" to-layer="6" to-port="11"/>
+ <edge from-layer="6" from-port="12" to-layer="7" to-port="13"/>
+ </edges>
+</net>)V0G0N";
+
+ InferenceEngine::CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+
+ InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {9728});
+ weights->allocate();
+ fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
+ InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+
+ net_reader.SetWeights(weights_ptr);
+ std::shared_ptr<ICNNNetwork> network = Builder::convertToICNNNetwork(Builder::Network(net_reader.getNetwork()).build());
+
+ try {
+ compareICNNNetworks(*network, net_reader.getNetwork());
+ } catch (InferenceEngine::details::InferenceEngineException &ex) {
+ FAIL() << ex.what();
+ }
+}
+
+TEST_F(NetworkBuilderTest, connectTwoNetworks) {
+ std::string model = R"V0G0N(
+<net name="PVANET" version="2" batch="1">
+ <layers>
+ <layer name="data" type="Input" precision="FP32" id="0">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>3</dim>
+ <dim>544</dim>
+ <dim>992</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="conv1_1_conv" type="Convolution" precision="FP32" id="2">
+ <convolution_data stride-x="2" stride-y="2" pad-x="3" pad-y="3" pad-r="3" pad-b="3" kernel-x="7" kernel-y="7" output="16" group="1"/>
+ <input>
+ <port id="2">
+ <dim>1</dim>
+ <dim>3</dim>
+ <dim>544</dim>
+ <dim>992</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ <weights offset="0" size="9408"/>
+ <biases offset="9408" size="64"/>
+ </layer>
+ <layer name="conv1_1_neg" type="Power" precision="FP32" id="3">
+ <power_data power="1" scale="-1" shift="0"/>
+ <input>
+ <port id="4">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="5">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="conv1_1_concat" type="Concat" precision="FP32" id="4">
+ <concat_data axis="1"/>
+ <input>
+ <port id="6">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ <port id="7">
+ <dim>1</dim>
+ <dim>16</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </input>
+ <output>
+ <port id="8">
+ <dim>1</dim>
+ <dim>32</dim>
+ <dim>272</dim>
+ <dim>496</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+ <edge from-layer="2" from-port="3" to-layer="3" to-port="4"/>
+ <edge from-layer="2" from-port="3" to-layer="4" to-port="6"/>
+ <edge from-layer="3" from-port="5" to-layer="4" to-port="7"/>
+ </edges>
+</net>)V0G0N";
+
+ InferenceEngine::CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+
+ InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {9472});
+ weights->allocate();
+ fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
+ InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+
+ net_reader.SetWeights(weights_ptr);
+ Builder::Network originalNetwork(net_reader.getNetwork());
+ Builder::Network addNetwork(net_reader.getNetwork());
+
+ // Find output
+ idx_t lastLayerId(0);
+ for (const auto& layer : originalNetwork.getLayers()) {
+ if (layer.getType() != "Output")
+ continue;
+ const auto connections = originalNetwork.getLayerConnections(layer.getId());
+ ASSERT_EQ(1, connections.size());
+ ASSERT_EQ(layer.getId(), connections[0].to().layerId());
+ ASSERT_EQ(0, connections[0].from().portId());
+ lastLayerId = connections[0].from().layerId();
+ originalNetwork.disconnect(connections[0]);
+ originalNetwork.removeLayer(layer.getId());
+ break;
+ }
+
+ std::map<idx_t, idx_t> oldNewId;
+ for (const auto& layer : addNetwork.getLayers()) {
+ if (layer.getType() == "Input") {
+ oldNewId[layer.getId()] = lastLayerId;
+ continue;
+ }
+ oldNewId[layer.getId()] = originalNetwork.addLayer(layer);
+ const auto connections = addNetwork.getLayerConnections(layer.getId());
+ for (const auto& connection : connections) {
+ if (oldNewId.find(connection.from().layerId()) == oldNewId.end() ||
+ oldNewId.find(connection.to().layerId()) == oldNewId.end())
+ continue;
+ originalNetwork.connect({oldNewId[connection.from().layerId()], connection.from().portId()},
+ {oldNewId[connection.to().layerId()], connection.to().portId()});
+ }
+
+ if (layer.getType() == "Convolution") {
+ Builder::ConvolutionLayer(originalNetwork.getLayer(oldNewId[layer.getId()])).setWeights(generateBlob(Precision::FP32, {16, 32, 7, 7}, Layout::OIHW));
+ }
+ }
+ ASSERT_NO_THROW(originalNetwork.build());
+}
+
+TEST_F(NetworkBuilderTest, createLayersWithTheSameNames) {
+ InferenceEngine::Builder::Network netBuilder("");
+
+ // Connect conolutional layer with it's inputs and outputs.
+ InferenceEngine::Builder::InputLayer inpLayer("data");
+ inpLayer.setPort(InferenceEngine::Port({1, 1, 10, 10}));
+ auto inpLayerId = netBuilder.addLayer(inpLayer);
+
+ // Create convolutional layer
+ const size_t outCn = 1, inpCn = 1, kernelH = 3, kernelW = 3;
+ InferenceEngine::Builder::ConvolutionLayer ieLayer("conv1");
+
+ ieLayer.setKernel({outCn, inpCn, kernelH, kernelW});
+ ieLayer.setStrides({1, 1, 1, 1});
+ ieLayer.setDilation({1, 1, 1, 1});
+ ieLayer.setPaddingsBegin({0, 0, 0, 0});
+ ieLayer.setPaddingsEnd({0, 0, 0, 0});
+ ieLayer.setGroup(1);
+ ieLayer.setOutDepth(outCn);
+ auto convLayerId = netBuilder.addLayer({inpLayerId}, ieLayer);
+
+ // Connect convolution layer with it's output
+ InferenceEngine::Builder::OutputLayer outLayer("conv1");
+ auto convOutLayerId = netBuilder.addLayer({convLayerId}, outLayer);
+ ASSERT_NE(netBuilder.getLayer(convLayerId).getName(), netBuilder.getLayer(convOutLayerId).getName());
+ InferenceEngine::Builder::ReLULayer reLULayer("relu1");
+ reLULayer.setNegativeSlope(0);
+ auto reluLayerId = netBuilder.addLayer({convLayerId}, reLULayer);
+ InferenceEngine::Builder::OutputLayer outReLULayer("relu1");
+ auto reluOutLayerId = netBuilder.addLayer({reluLayerId}, outReLULayer);
+ ASSERT_NE(netBuilder.getLayer(reluLayerId).getName(), netBuilder.getLayer(reluOutLayerId).getName());
+
+ ASSERT_NO_THROW(netBuilder.build());
+}
+
+TEST_F(NetworkBuilderTest, RemoveLayerAndBuild) {
+ auto builder = prepateAlexnetBuilder();
+ builder.removeLayer(builder.getLayers()[2].getId());
+
+ ASSERT_THROW(builder.build(), InferenceEngine::details::InferenceEngineException);
+}
+
+TEST_F(NetworkBuilderTest, DocumentationExample) {
+ // Create graph with name
+ InferenceEngine::Builder::Network graph("Example1");
+
+ // Create network
+ // In-place add input layer
+ idx_t inputLayerId = graph.addLayer(Builder::InputLayer("in").setPort(Port({1, 3, 22, 22})));
+
+ // In-place add ReLU layer builder with a negative slope 0.1 and connect it with 0 output port of the Input layer builder
+ // In this example layerId is equal new Input layer builder ID, port index isn't set because 0 is a default value ({layerId} == {layerId, 0})
+ idx_t relu1Id = graph.addLayer({{inputLayerId}}, Builder::ReLULayer("relu1").setNegativeSlope(0.1f));
+
+ // In-place add ScaleShift layer builder
+ InferenceEngine::Blob::Ptr blobWithScaleShiftBiases = make_shared_blob<float>(TensorDesc(Precision::FP32, {3}, Layout::C));
+ blobWithScaleShiftBiases->allocate();
+ auto *data = blobWithScaleShiftBiases->buffer().as<float *>();
+ data[0] = 1;
+ data[1] = 2;
+ data[2] = 3;
+ idx_t scaleShiftId = graph.addLayer(Builder::ScaleShiftLayer("scaleShift1").setBiases(blobWithScaleShiftBiases));
+
+ // Connect ScaleShift layer with relu1
+ graph.connect({relu1Id}, {scaleShiftId}); // Also port indexes could be defined (0 is default value) builder.connect({layerId, outPortIdx}, {scaleShiftId, inPortIdx});
+
+ // Create ReLU layer with a negative slope 0.2 using generic layer builder and connect it with scaleShift
+ idx_t relu2Id = graph.addLayer({{scaleShiftId}}, Builder::Layer("ReLU", "relu2").setParameters({{"negative_slope", 0.2f}}).setOutputPorts({Port()}).setInputPorts({Port()}));
+
+ // All branches in the graph should be ended by Output layer. Let's create Output layer
+ idx_t outId = graph.addLayer({{relu2Id, 0}}, Builder::OutputLayer("out"));
+
+ // Build original network
+ InferenceEngine::INetwork::Ptr finalNetwork = graph.build();
+ std::shared_ptr<InferenceEngine::ICNNNetwork> cnnNetwork = InferenceEngine::Builder::convertToICNNNetwork(finalNetwork);
+
+ // Modify network
+ // Remove relu2 layer from the topology
+ std::vector<InferenceEngine::Connection> connections = graph.getLayerConnections(relu2Id);
+ for (const auto& connection : connections) {
+ graph.disconnect(connection);
+ }
+ graph.removeLayer(relu2Id);
+
+ // Connect scaleShift1 and out
+ graph.connect({scaleShiftId}, {outId});
+ // Build network without relu2
+ InferenceEngine::INetwork::Ptr changedNetwork = graph.build();
+}
diff --git a/inference-engine/tests/unit/cnn_network/cnn_net_reader_impl_test.cpp b/inference-engine/tests/unit/cnn_network/cnn_net_reader_impl_test.cpp
index 09cbb5089..e33362f19 100644
--- a/inference-engine/tests/unit/cnn_network/cnn_net_reader_impl_test.cpp
+++ b/inference-engine/tests/unit/cnn_network/cnn_net_reader_impl_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -1832,4 +1831,265 @@ TEST_F(CNNNetReaderImplTest, canRead3DPooling) {
ASSERT_EQ(pool->_pads_end[X_AXIS], 5);
ASSERT_EQ(pool->_pads_end[Y_AXIS], 3);
ASSERT_EQ(pool->_pads_end[Z_AXIS], 1);
-} \ No newline at end of file
+}
+
+TEST_F(CNNNetReaderImplTest, canParseWithoutInput_1to2) {
+ std::string model = R"V0G0N(
+<net batch="1" name="SimpleNet" version="2">
+ <layers>
+ <layer id="1" name="Boo" precision="FP32" type="Split">
+ <data operation="sum"/>
+ <input>
+ <port id="0">
+ <dim>2</dim>
+ <dim>16</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ <port id="2">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+</net>
+ )V0G0N";
+
+ CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
+ sts = reader.ReadNetwork(model.data(), model.length(), &resp);
+ ASSERT_EQ(OK, sts) << resp.msg;
+
+ auto net = reader.getNetwork(&resp);
+ ASSERT_NE(nullptr, net ) << resp.msg;
+
+ InputsDataMap in_map;
+ OutputsDataMap out_map;
+ net->getInputsInfo(in_map);
+ net->getOutputsInfo(out_map);
+
+ ASSERT_EQ(in_map.size(), 1); auto i = in_map.begin();
+ ASSERT_EQ(i++->second->name(), "Boo");
+
+ ASSERT_EQ(out_map.size(), 2); auto o = out_map.begin();
+ ASSERT_EQ(o++->second->getName(), "Boo.0");
+ ASSERT_EQ(o++->second->getName(), "Boo.1");
+}
+
+TEST_F(CNNNetReaderImplTest, canParseWithoutInput_2to1) {
+ std::string model = R"V0G0N(
+<net batch="1" name="SimpleNet" version="2">
+ <layers>
+ <layer id="1" name="Foo" precision="FP32" type="Eltwise">
+ <data operation="sum"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+</net>
+ )V0G0N";
+
+ CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
+ sts = reader.ReadNetwork(model.data(), model.length(), &resp);
+ ASSERT_EQ(OK, sts) << resp.msg;
+
+ auto net = reader.getNetwork(&resp);
+ ASSERT_NE(nullptr, net ) << resp.msg;
+
+ InputsDataMap in_map;
+ OutputsDataMap out_map;
+ net->getInputsInfo(in_map);
+ net->getOutputsInfo(out_map);
+
+ ASSERT_EQ(in_map.size(), 2); auto i = in_map.begin();
+ ASSERT_EQ(i++->second->name(), "Foo.0");
+ ASSERT_EQ(i++->second->name(), "Foo.1");
+
+ ASSERT_EQ(out_map.size(), 1); auto o = out_map.begin();
+ ASSERT_EQ(o++->second->getName(), "Foo");
+}
+
+TEST_F(CNNNetReaderImplTest, canParseSimpleTI) {
+ std::string model = R"V0G0N(
+<net batch="1" name="Simple_TI" version="4">
+ <layers>
+ <layer id="0" name="input" precision="FP32" type="Input">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>5</dim>
+ <dim>16</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="1" name="Bias" precision="FP32" type="Const">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </output>
+ <blobs>
+ <custom offset="0" size="64"/>
+ </blobs>
+ </layer>
+ <layer id="2" name="SomeTI" precision="FP32" type="TensorIterator">
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>5</dim>
+ <dim>16</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>1</dim>
+ <dim>5</dim>
+ <dim>16</dim>
+ </port>
+ </output>
+ <port_map>
+ <input external_port_id="0" internal_layer_id="0" internal_port_id="0" axis="1" />
+ <input external_port_id="1" internal_layer_id="1" internal_port_id="1"/>
+ <output external_port_id="3" internal_layer_id="2" internal_port_id="1" axis="1" />
+ </port_map>
+ <back_edges>
+ <edge from-layer="1" from-port="2" to-layer="1" to-port="1"/>
+ </back_edges>
+ <body>
+ <layers>
+ <layer id="0" name="TI_reshape_in" precision="FP32" type="Reshape">
+ <data axis="0" dim="1,512" num_axes="-1"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="1" name="TI_sum" precision="FP32" type="Eltwise">
+ <data operation="sum"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="2" name="TI_reshape_out" precision="FP32" type="Reshape">
+ <data axis="0" dim="1,1,256" num_axes="-1"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="1" to-layer="1" to-port="0"/>
+ <edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
+ </edges>
+ </body>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+ <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
+ </edges>
+</net>
+ )V0G0N";
+
+ CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
+ sts = reader.ReadNetwork(model.data(), model.length(), &resp);
+ ASSERT_EQ(OK, sts) << resp.msg;
+
+ auto network = reader.getNetwork(&resp);
+ ASSERT_NE(nullptr, network ) << resp.msg;
+
+ CNNLayerPtr layer;
+ sts = network->getLayerByName("SomeTI", layer, &resp);
+ ASSERT_EQ(OK, sts) << resp.msg;
+
+ auto *ti = dynamic_cast<TensorIterator*>(layer.get());
+ ASSERT_NE(nullptr, ti);
+ ASSERT_EQ(ti->type, "TensorIterator");
+
+ // Check Input port mapping
+ ASSERT_EQ(ti->input_port_map.size(), 2);
+ int i = ti->input_port_map[0].axis == 1 ? 0 : 1;
+ ASSERT_EQ(ti->input_port_map[i].axis, 1);
+ ASSERT_EQ(ti->input_port_map[i].stride, 1);
+ ASSERT_EQ(ti->input_port_map[i].start, 0);
+ ASSERT_EQ(ti->input_port_map[i].end, -1);
+ ASSERT_EQ(ti->input_port_map[i].part_size, 1);
+ ASSERT_EQ(ti->input_port_map[1-i].axis, -1);
+ ASSERT_EQ(ti->input_port_map[1-i].stride, 1);
+ ASSERT_EQ(ti->input_port_map[1-i].start, 0);
+ ASSERT_EQ(ti->input_port_map[1-i].end, -1);
+ ASSERT_EQ(ti->input_port_map[1-i].part_size, 1);
+
+ // Check Output port mapping
+ ASSERT_EQ(ti->output_port_map.size(), 1);
+ ASSERT_EQ(ti->output_port_map[0].axis, 1);
+ ASSERT_EQ(ti->output_port_map[0].stride, 1);
+ ASSERT_EQ(ti->output_port_map[0].start, 0);
+ ASSERT_EQ(ti->output_port_map[0].end, -1);
+ ASSERT_EQ(ti->output_port_map[0].part_size, 1);
+
+ // No back edges
+ ASSERT_EQ(ti->back_edges.size(), 1);
+ ASSERT_EQ(ti->back_edges[0].from, 0);
+ ASSERT_EQ(ti->back_edges[0].to, 1);
+ ASSERT_EQ(ti->back_edges[0].axis, -1);
+ ASSERT_EQ(ti->back_edges[0].stride, 1);
+ ASSERT_EQ(ti->back_edges[0].start, 0);
+ ASSERT_EQ(ti->back_edges[0].end, -1);
+ ASSERT_EQ(ti->back_edges[0].part_size, 1);
+}
diff --git a/inference-engine/tests/unit/cnn_network/cnn_network_impl_test.cpp b/inference-engine/tests/unit/cnn_network/cnn_network_impl_test.cpp
index 6f271efe9..9a5a47ab1 100644
--- a/inference-engine/tests/unit/cnn_network/cnn_network_impl_test.cpp
+++ b/inference-engine/tests/unit/cnn_network/cnn_network_impl_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/cnn_network/layout_tests.cpp b/inference-engine/tests/unit/cnn_network/layout_tests.cpp
index 38f06f5ee..11ad64572 100644
--- a/inference-engine/tests/unit/cnn_network/layout_tests.cpp
+++ b/inference-engine/tests/unit/cnn_network/layout_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/cnn_network/mean_image.cpp b/inference-engine/tests/unit/cnn_network/mean_image.cpp
index 88a9dfd3d..2c31fa1ef 100644
--- a/inference-engine/tests/unit/cnn_network/mean_image.cpp
+++ b/inference-engine/tests/unit/cnn_network/mean_image.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/cnn_network/mean_image.h b/inference-engine/tests/unit/cnn_network/mean_image.h
index a6a481af5..3b4ffcedf 100644
--- a/inference-engine/tests/unit/cnn_network/mean_image.h
+++ b/inference-engine/tests/unit/cnn_network/mean_image.h
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/cnn_network/parser_tests_base.hpp b/inference-engine/tests/unit/cnn_network/parser_tests_base.hpp
index 3ec99f137..28c4646b2 100644
--- a/inference-engine/tests/unit/cnn_network/parser_tests_base.hpp
+++ b/inference-engine/tests/unit/cnn_network/parser_tests_base.hpp
@@ -1,21 +1,20 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
+#include <fstream>
#include <gtest/gtest.h>
#include "xml_father.hpp"
#include "cnn_network_impl.hpp"
#include <tests_common.hpp>
-#include "v2_format_parser.h"
+#include "ie_format_parser.h"
#include <string>
#include "pugixml.hpp"
#include "xml_parse_utils.h"
#include "mean_image.h"
#include "ie_blob_proxy.hpp"
-#include <fstream>
class FormatParserTest : public TestsCommon {
public:
@@ -93,7 +92,7 @@ class FormatParserTest : public TestsCommon {
int version = XMLParseUtils::GetIntAttr(root, "version", 2);
if (version < 2) THROW_IE_EXCEPTION << "Deprecated IR's versions: " << version;
if (version > 3) THROW_IE_EXCEPTION << "cannot parse future versions: " << version;
- parser.reset(new InferenceEngine::details::V2FormatParser(version));
+ parser.reset(new InferenceEngine::details::FormatParser(version));
net = parser->Parse(root);
}
@@ -331,7 +330,7 @@ xml().node("net").attr("name", "AlexNet").attr("version", x)\
return testing::XMLFather();
}
- std::shared_ptr<InferenceEngine::details::V2FormatParser> parser;
+ std::shared_ptr<InferenceEngine::details::FormatParser> parser;
public:
@@ -380,4 +379,4 @@ xml().node("net").attr("name", "AlexNet").attr("version", x)\
std::istreambuf_iterator<char>());
return str;
}
-}; \ No newline at end of file
+};
diff --git a/inference-engine/tests/unit/cnn_network/v2_format_parser_test.cpp b/inference-engine/tests/unit/cnn_network/v2_format_parser_test.cpp
index f5713dbf7..1b9cdc042 100644
--- a/inference-engine/tests/unit/cnn_network/v2_format_parser_test.cpp
+++ b/inference-engine/tests/unit/cnn_network/v2_format_parser_test.cpp
@@ -1,12 +1,11 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
#include <cpp/ie_cnn_network.h>
#include <gtest/gtest.h>
#include "xml_father.hpp"
-#include "inference_engine/v2_format_parser.h"
+#include "inference_engine/ie_format_parser.h"
#include <string>
#include <pugixml.hpp>
#include <fstream>
diff --git a/inference-engine/tests/unit/cnn_network/v3_format_parser_test.cpp b/inference-engine/tests/unit/cnn_network/v3_format_parser_test.cpp
index a8c6e7003..b80d2cb20 100644
--- a/inference-engine/tests/unit/cnn_network/v3_format_parser_test.cpp
+++ b/inference-engine/tests/unit/cnn_network/v3_format_parser_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -95,3 +94,97 @@ TEST_F(V3FormatParserTest, DISABLE_conv3DInvalidKernel) {
ASSERT_NO_FATAL_FAILURE(assertParseFail(content));
}
+
+class V2ParserPublicSegments: public InferenceEngine::details::FormatParser {
+public:
+ const std::map<std::string, LayerParseParameters>& getLayerParseParameters() {
+ return layersParseInfo;
+ }
+};
+
+TEST_F(V3FormatParserTest, LargeWeights) {
+ std::string model = R"V0G0N(
+<net name="PVANET" version="3" batch="1">
+ <layers>
+ <layer name="data" type="Input" precision="FP32" id="0">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>1024</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="1" name="MatMul" precision="FP32" type="FullyConnected">
+ <data out-size="800000"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>1024</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>1</dim>
+ <dim>800000</dim>
+ </port>
+ </output>
+ <blobs>
+ <weights offset="891492352" size="3276800000"/>
+ <biases offset="4168292352" size="3200000"/>
+ </blobs>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+ </edges>
+</net>)V0G0N";
+
+ parse(model);
+
+ auto params = ((V2ParserPublicSegments *)parser.get())->getLayerParseParameters();
+ ASSERT_NE(params.end(), params.find("MatMul"));
+ ASSERT_EQ(891492352, params["MatMul"].blobs["weights"].start);
+ ASSERT_EQ(3276800000, params["MatMul"].blobs["weights"].size);
+ ASSERT_EQ(4168292352, params["MatMul"].blobs["biases"].start);
+ ASSERT_EQ(3200000, params["MatMul"].blobs["biases"].size);
+}
+
+TEST_F(V3FormatParserTest, IncorrectWeights) {
+ std::string model = R"V0G0N(
+<net name="PVANET" version="3" batch="1">
+ <layers>
+ <layer name="data" type="Input" precision="FP32" id="0">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>1024</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="1" name="MatMul" precision="FP32" type="FullyConnected">
+ <data out-size="800000"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>1024</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>1</dim>
+ <dim>800000</dim>
+ </port>
+ </output>
+ <blobs>
+ <weights offset="891492352" size="-64"/>
+ <biases offset="4168292352" size="3200000"/>
+ </blobs>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+ </edges>
+</net>)V0G0N";
+
+ assertParseFail(model);
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/cnn_network/xml_father_tests.cpp b/inference-engine/tests/unit/cnn_network/xml_father_tests.cpp
index 86eb5dbc7..451a15b94 100644
--- a/inference-engine/tests/unit/cnn_network/xml_father_tests.cpp
+++ b/inference-engine/tests/unit/cnn_network/xml_father_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/gna/I8_quantisation_test.cpp b/inference-engine/tests/unit/engines/gna/I8_quantisation_test.cpp
new file mode 100644
index 000000000..8e69a3b63
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/I8_quantisation_test.cpp
@@ -0,0 +1,117 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <gtest/gtest.h>
+#include <inference_engine/layer_transform.hpp>
+#include <gna_plugin/quantization/model_quantizer.hpp>
+#include <cpp/ie_cnn_net_reader.h>
+#include "gna_plugin/quantization/layer_quantizer.hpp"
+#include "gna_matcher.hpp"
+
+using namespace InferenceEngine;
+using namespace GNAPluginNS;
+using namespace GNATestIRs;
+
+class I8QuantisationTest : public GNATest {
+ protected:
+ LayersQuantizer<QuantI8> lc = LayersQuantizer<QuantI8> (1.0f);
+
+ InferenceEngine::CNNLayerPtr quantize (InferenceEngine::CNNLayerPtr lp) {
+ auto newLayer = InferenceEngine::injectData<QuantizedLayerParams>(lp);
+ transformLayer(newLayer, lc);
+ return newLayer;
+ };
+
+ void SetUp() override {
+ }
+
+};
+
+// TODO: add test for FC weights after quantization
+TEST_F(I8QuantisationTest, canQuantizeFCLayer){
+
+ auto fc = std::make_shared<FullyConnectedLayer>(LayerParams{"name", "type", Precision::FP32});
+ fc->_out_num = 9;
+ auto weights = make_shared_blob<float>(Precision::FP32, {1, 1});
+ fc->_weights = weights;
+ fc->_biases = make_shared_blob<float>(Precision::FP32, {1, 1});
+ fc->_weights->allocate();
+ fc->_biases->allocate();
+ std::shared_ptr<Data> outData = std::make_shared<Data>("data", SizeVector({1, 1}), Precision::FP32, Layout::NC);
+ fc->outData.push_back(outData);
+ fc->insData.push_back(outData);
+
+ // actual quantisation algorithm is involved
+ for (auto && w : *weights) {
+ w = MAX_OUT_MULTIPLIER * MAX_VAL_1B_WEIGHT;
+ }
+
+ fillWeights(fc->_biases);
+
+ ASSERT_NO_THROW(quantize(fc));
+}
+
+TEST_F(I8QuantisationTest, canQuantizeActivation){
+
+ auto sigmoid = std::make_shared<GenericLayer >(LayerParams{"name", "type", Precision::FP32});
+ sigmoid->params["value"] = 2;
+ sigmoid->type = "Activation";
+
+ ASSERT_NO_THROW(quantize(sigmoid));
+}
+
+TEST_F(I8QuantisationTest, inputPrecisionIs16Bits){
+
+ ModelQuantizer<QuantI8> q;
+
+ CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(Fc2DOutputModel().data(), Fc2DOutputModel().length()));
+
+ auto weights = make_shared_blob<uint8_t >(Precision::U8, C, {440});
+ weights->allocate();
+ fillWeights(weights);
+ net_reader.SetWeights(weights);
+ auto newNet = q.quantize(net_reader.getNetwork(), 1000);
+ InputsDataMap inputs;
+ newNet->getInputsInfo(inputs);
+ auto inputLayer = inputs.begin()->second->getInputData()->inputTo.begin()->second->insData.front().lock()->creatorLayer.lock();
+
+ ASSERT_EQ(inputLayer->precision, Precision::I16);
+}
+
+TEST_F(I8QuantisationTest, failIfFCDimensionIs1){
+
+ ModelQuantizer<QuantI8> q;
+
+ CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(FCOnlyModel().data(), FCOnlyModel().length()));
+
+ auto weights = make_shared_blob<uint8_t >(Precision::U8, C, {440});
+ weights->allocate();
+ fillWeights(weights);
+ net_reader.SetWeights(weights);
+
+ ASSERT_ANY_THROW(q.quantize(net_reader.getNetwork(), 1000));
+}
+
+TEST_F(I8QuantisationTest, outputAffinePrecisionIs32Bits){
+
+ ModelQuantizer<QuantI8> q;
+
+ CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(Fc2DOutputModel().data(), Fc2DOutputModel().length()));
+
+ auto weights = make_shared_blob<uint8_t >(Precision::U8, C, {440});
+ weights->allocate();
+ fillWeights(weights);
+ net_reader.SetWeights(weights);
+
+ auto newNet = q.quantize(net_reader.getNetwork(), 1000);
+ InputsDataMap inputs;
+ newNet->getInputsInfo(inputs);
+ auto affineDataPtr = inputs.begin()->second->getInputData()->inputTo.begin()->second->outData.front();
+
+ ASSERT_EQ(affineDataPtr->precision, Precision::I32);
+}
diff --git a/inference-engine/tests/unit/engines/gna/configuration_test.cpp b/inference-engine/tests/unit/engines/gna/configuration_test.cpp
new file mode 100644
index 000000000..e17e6dbee
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/configuration_test.cpp
@@ -0,0 +1,136 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include <gtest/gtest.h>
+#include <mock_icnn_network.hpp>
+#include "gna_plugin/gna_plugin_config.hpp"
+#include "gna_matcher.hpp"
+#include "test_irs.hpp"
+
+using namespace InferenceEngine;
+using namespace GNAPluginNS;
+using namespace ::testing;
+
+class GNAConfigTest : public GNATest {
+
+ protected:
+ MockICNNNetwork net;
+
+ void SetUp() override {
+ }
+};
+
+TEST_F(GNAConfigTest, reportAnErrorIfConfigNotFound) {
+
+ Config c ({{TargetDevice :: eGNA, Precision::I16},
+ {TargetDevice :: eCPU, Precision::FP32}});
+
+ EXPECT_CALL(net, getPrecision()).WillRepeatedly(Return(Precision::FP32));
+ EXPECT_CALL(net, getTargetDevice()).WillRepeatedly(Return(TargetDevice::eGNA));
+
+ ASSERT_ANY_THROW(c.find_configuration(net));
+}
+
+TEST_F(GNAConfigTest, canFindConfiguration) {
+
+ Config c ({{TargetDevice :: eGNA, Precision::I16},
+ {TargetDevice :: eCPU, Precision::FP32}});
+
+ EXPECT_CALL(net, getPrecision()).WillRepeatedly(Return(Precision::FP32));
+ EXPECT_CALL(net, getTargetDevice()).WillRepeatedly(Return(TargetDevice::eCPU));
+
+ auto match = c.find_configuration(net);
+
+ EXPECT_EQ(match.device, TargetDevice::eCPU);
+ EXPECT_EQ(match.networkPrec, Precision::FP32);
+}
+
+TEST_F(GNAConfigTest, canPassTroughNetworkAfterFindConfiguration) {
+
+ Config c ({{TargetDevice :: eGNA, Precision::I16},
+ {TargetDevice :: eCPU, Precision::FP32}});
+
+ EXPECT_CALL(net, getPrecision()).WillRepeatedly(Return(Precision::FP32));
+ EXPECT_CALL(net, getTargetDevice()).WillRepeatedly(Return(TargetDevice::eCPU));
+
+ auto match = c.find_configuration(net);
+
+ auto net2 = match.convert(net);
+
+ EXPECT_EQ(net2->getTargetDevice(), TargetDevice::eCPU);
+ EXPECT_EQ(net2->getPrecision(), Precision::FP32);
+}
+
+TEST_F(GNAConfigTest, canNotMatchWithDefaultDevice) {
+
+ Config c ({{TargetDevice :: eGNA, Precision::I16},
+ {TargetDevice :: eCPU, Precision::FP32}});
+
+ c.setDefaultDevice(TargetDevice::eGNA);
+
+ EXPECT_CALL(net, getPrecision()).WillRepeatedly(Return(Precision::FP32));
+ EXPECT_CALL(net, getTargetDevice()).WillRepeatedly(Return(TargetDevice::eDefault));
+
+ EXPECT_ANY_THROW(c.find_configuration(net).convert(net));
+}
+
+TEST_F(GNAConfigTest, canMatchWithDefaultDevice) {
+
+ Config c ({{TargetDevice :: eGNA, Precision::I16},
+ {TargetDevice :: eCPU, Precision::FP32}});
+
+ c.setDefaultDevice(TargetDevice::eGNA);
+
+ EXPECT_CALL(net, getPrecision()).WillRepeatedly(Return(Precision::I16));
+ EXPECT_CALL(net, getTargetDevice()).WillRepeatedly(Return(TargetDevice::eDefault));
+
+ auto net2 = c.find_configuration(net).convert(net);
+
+ EXPECT_EQ(net2->getTargetDevice(), TargetDevice::eDefault);
+ EXPECT_EQ(net2->getPrecision(), Precision::I16);
+}
+
+TEST_F(GNAConfigTest, canMatchWith1AsyncThread) {
+ assert_that()
+ .onInferModel(GNATestIRs::Fc2DOutputModel())
+ .inNotCompactMode()
+ .withAcceleratorThreadsNumber("1")
+ .gna().propagate_forward().called_without().pwl_inserted_into_nnet();
+}
+
+TEST_F(GNAConfigTest, canMatchWith4AsyncThreads) {
+ assert_that()
+ .onInferModel(GNATestIRs::Fc2DOutputModel())
+ .inNotCompactMode()
+ .withAcceleratorThreadsNumber("4")
+ .gna().propagate_forward().called_without().pwl_inserted_into_nnet();
+}
+
+TEST_F(GNAConfigTest, canNOTMatchWith0AsyncThreads) {
+ assert_that()
+ .onInferModel(GNATestIRs::Fc2DOutputModel())
+ .inNotCompactMode()
+ .withAcceleratorThreadsNumber("0")
+ .gna().propagate_forward().called_without().pwl_inserted_into_nnet()
+ .throws();
+}
+
+TEST_F(GNAConfigTest, canNOTMatchWith128AsyncThreads) {
+ assert_that()
+ .onInferModel(GNATestIRs::Fc2DOutputModel())
+ .inNotCompactMode()
+ .withAcceleratorThreadsNumber("128")
+ .gna().propagate_forward().called_without().pwl_inserted_into_nnet()
+ .throws();
+}
+
+TEST_F(GNAConfigTest, canMatchWithSingleMultipleOMPThreads) {
+ assert_that()
+ .onInferModel(GNATestIRs::Fc2DOutputModel())
+ .inNotCompactMode()
+ .enable_omp_multithreading()
+ .gna().propagate_forward().called_without().pwl_inserted_into_nnet();
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/engines/gna/gna_allocator_test.cpp b/inference-engine/tests/unit/engines/gna/gna_allocator_test.cpp
new file mode 100644
index 000000000..35ddc770c
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_allocator_test.cpp
@@ -0,0 +1,78 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gna_plugin/gna_allocator.hpp"
+
+#include <vector>
+#include <thread>
+
+#include <gtest/gtest.h>
+#include "gna_plugin/gna_device.hpp"
+//dummy definitions to work around issue with Linux userspace library
+typedef unsigned long long time_tsc;
+typedef struct
+{
+ time_tsc start; // time value on profiler start
+ time_tsc stop; // time value on profiler stop
+ time_tsc passed; // time passed between start and stop
+} intel_gna_profiler_tsc;
+
+void profilerTscStop(intel_gna_profiler_tsc* p) {
+ if (NULL == p) return;
+ p->passed = 0;
+ p->stop = 0;
+ p->start = 0;
+}
+void profilerTscStartAccumulate(intel_gna_profiler_tsc* p)
+{
+ if (NULL == p) return;
+ p->stop = 0;
+ p->start = 0;
+}
+void profilerTscStopAccumulate(intel_gna_profiler_tsc* p)
+{
+ if (NULL == p) return;
+ p->stop = 0;
+ p->passed += p->stop - p->start;
+}
+
+class GNAAllocatorTest : public ::testing::Test {
+
+ protected:
+ std::unique_ptr<GNADeviceHelper> gnadevice;
+ void SetUp() override {
+ // gnadevice.reset(new GNADeviceHelper());
+ }
+};
+
+TEST_F(GNAAllocatorTest, canAllocateStdMemory) {
+ auto sp = make_polymorph<std::allocator<uint8_t>>();
+ uint8_t *x = nullptr;
+ ASSERT_NO_THROW(x = sp.allocate(100));
+ ASSERT_NE(x, nullptr);
+ ASSERT_NO_THROW(sp.deallocate(x, 100));
+}
+
+TEST_F(GNAAllocatorTest, canAllocateGNAMemory) {
+ //GNA device can be opened one per process for now
+ gnadevice.reset(new GNADeviceHelper());
+ auto sp = make_polymorph<GNAAllocator>(*gnadevice.get());
+ uint8_t *x = nullptr;
+ ASSERT_NO_THROW(x = sp.allocate(100));
+ ASSERT_NE(x, nullptr);
+ ASSERT_NO_THROW(sp.deallocate(x, 100));
+}
+
+TEST_F(GNAAllocatorTest, DISABLED_canOpenDevice) {
+ std::thread th([]()
+ {
+ GNADeviceHelper h1;
+ });
+ th.join();
+ std::thread th2([]()
+ {
+ GNADeviceHelper h1;
+ });
+ th2.join();
+}
diff --git a/inference-engine/tests/unit/engines/gna/gna_api_stub.cpp b/inference-engine/tests/unit/engines/gna/gna_api_stub.cpp
new file mode 100644
index 000000000..5417e52ad
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_api_stub.cpp
@@ -0,0 +1,218 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#define INTEL_GNA_DLLEXPORT 1
+#include <gna-api.h>
+#include <gna-api-dumper.h>
+#include <gna-api-instrumentation.h>
+#include "gna_mock_api.hpp"
+
+static GNACppApi * current = nullptr;
+
+GNACppApi :: GNACppApi() {
+ current = this;
+}
+
+GNACppApi :: ~GNACppApi() {
+ current = nullptr;
+}
+
+#ifdef __cplusplus
+extern "C" { // API uses C linkage so that it can be used by C and C++ applications
+#endif
+
+
+/**
+ * intel_gna_status_t members printable descriptions
+ * Size: NUMGNASTATUS + 1
+ */
+DLLDECL const char *GNAStatusName[] = {"status"};
+
+/**
+ * intel_gmm_mode_t members printable descriptions
+ * Size: NUMGMMMODES + 1
+ */
+DLLDECL const char *GMMModeName[] = {"model"};
+
+/**
+ * // TODO: fill
+ */
+DLLDECL intel_gna_status_t GNAScoreGaussians(
+ intel_gna_handle_t handle,
+ const intel_feature_type_t* pFeatureType,
+ const intel_feature_t* pFeatureData,
+ const intel_gmm_type_t* pModelType,
+ const intel_gmm_t* pModelData,
+ const uint32_t* pActiveGMMIndices,
+ uint32_t nActiveGMMIndices,
+ uint32_t uMaximumScore,
+ intel_gmm_mode_t nGMMMode,
+ uint32_t* pScores,
+ uint32_t* pReqId,
+ intel_gna_proc_t nAccelerationType
+) {
+ if (current != nullptr) {
+ return current->GNAScoreGaussians(
+ //handle,
+ //pFeatureType,
+ pFeatureData,
+ pModelType,
+ pModelData,
+ pActiveGMMIndices,
+ nActiveGMMIndices,
+ uMaximumScore,
+ nGMMMode,
+ pScores,
+ pReqId,
+ nAccelerationType);
+ }
+ return GNA_NOERROR;
+}
+
+DLLDECL intel_gna_status_t GNAPropagateForward(
+ intel_gna_handle_t handle,
+ const intel_nnet_type_t* pNeuralNetwork,
+ const uint32_t* pActiveIndices,
+ uint32_t nActiveIndices,
+ uint32_t* pReqId,
+ intel_gna_proc_t nAccelerationType
+) {
+ if (current != nullptr) {
+ return current->GNAPropagateForward(
+ handle,
+ pNeuralNetwork,
+ pActiveIndices,
+ nActiveIndices,
+ pReqId,
+ nAccelerationType);
+ }
+ return GNA_NOERROR;
+}
+
+// TODO: add output status
+/**
+ * // TODO: fill
+ */
+DLLDECL void *GNAAlloc(
+ intel_gna_handle_t nGNADevice, // handle to GNA accelerator
+ uint32_t sizeRequested,
+ uint32_t* sizeGranted
+) {
+ if (current != nullptr) {
+ return current->GNAAlloc(nGNADevice, sizeRequested, sizeGranted);
+ }
+ if (sizeGranted != nullptr) {
+ *sizeGranted = sizeRequested;
+ }
+ return (void*)1;
+}
+
+/**
+ * // TODO: fill
+ */
+DLLDECL intel_gna_status_t GNAFree(
+ intel_gna_handle_t nGNADevice // handle to GNA accelerator
+) {
+ if (current != nullptr) {
+ return current->GNAFree(nGNADevice);
+ }
+ return GNA_NOERROR;
+}
+
+/**
+ * // TODO: fill
+ */
+DLLDECL intel_gna_handle_t GNADeviceOpen(
+ intel_gna_status_t* status // Status of the call
+) {
+ if (current != nullptr) {
+ return current->GNADeviceOpen(status);
+ }
+ return 0;
+
+}
+
+/**
+* // TODO: fill
+*/
+DLLDECL intel_gna_handle_t GNADeviceOpenSetThreads(
+ intel_gna_status_t* status, // Status of the call
+ uint8_t n_threads // Number of worker threads
+) {
+ if (current != nullptr) {
+ return current->GNADeviceOpenSetThreads(status, n_threads);
+ }
+ return GNA_NOERROR;
+
+}
+
+/**
+ * // TODO: fill
+ */
+DLLDECL intel_gna_status_t GNADeviceClose(
+ intel_gna_handle_t nGNADevice // handle to GNA accelerator
+) {
+ if (current != nullptr) {
+ return current->GNADeviceClose(nGNADevice);
+ }
+ return GNA_NOERROR;
+
+}
+
+/**
+ * // TODO: fill
+ */
+DLLDECL intel_gna_status_t GNAWait(
+ intel_gna_handle_t nGNADevice, // handle to GNA accelerator
+ uint32_t nTimeoutMilliseconds,
+ uint32_t reqId // IN score request ID
+) {
+ if (current != nullptr) {
+ return current->GNAWait(nGNADevice, nTimeoutMilliseconds, reqId);
+ }
+ return GNA_NOERROR;
+}
+
+DLLDECL intel_gna_status_t GNAWaitPerfRes(
+ intel_gna_handle_t nGNADevice, // handle to GNA accelerator
+ uint32_t nTimeoutMilliseconds,
+ uint32_t reqId, // IN score request ID
+ intel_gna_perf_t* nGNAPerfResults
+) {
+ if (current != nullptr) {
+ return current->GNAWaitPerfRes(nGNADevice,
+ nTimeoutMilliseconds,
+ reqId,
+ nGNAPerfResults);
+ }
+ return GNA_NOERROR;
+}
+
+DLLDECL void* GNADumpXnn(
+ const intel_nnet_type_t* neuralNetwork,
+ const uint32_t* activeIndices,
+ uint32_t activeIndicesCount,
+ intel_gna_model_header* modelHeader,
+ intel_gna_status_t* status,
+ intel_gna_alloc_cb customAlloc) {
+ if (current != nullptr) {
+ return current->GNADumpXnn(neuralNetwork,
+ activeIndices,
+ activeIndicesCount,
+ modelHeader,
+ status,
+ customAlloc);
+ }
+ return nullptr;
+}
+
+DLLDECL void gmmSetThreads(
+ int num
+) {
+ current->gmmSetThreads((num != 0) ? num : 1);
+}
+#ifdef __cplusplus
+}
+#endif
+
diff --git a/inference-engine/tests/unit/engines/gna/gna_graph_aot_test.cpp b/inference-engine/tests/unit/engines/gna/gna_graph_aot_test.cpp
new file mode 100644
index 000000000..45385bee3
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_graph_aot_test.cpp
@@ -0,0 +1,85 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <gtest/gtest.h>
+#include <inference_engine/layer_transform.hpp>
+#include <gna_plugin/quantization/model_quantizer.hpp>
+#include "gna_plugin/quantization/layer_quantizer.hpp"
+#include "gna_matcher.hpp"
+
+using namespace InferenceEngine;
+using namespace GNAPluginNS;
+using namespace GNATestIRs;
+
+class GNAAOTTests : public GNATest {
+ protected:
+ std::list<std::string> files_to_remove;
+ std::string registerFileForRemove(std::string file_to_remove) {
+ files_to_remove.push_back(file_to_remove);
+ return file_to_remove;
+ }
+ void TearDown() override {
+ for (auto & file : files_to_remove) {
+ std::remove(file.c_str());
+ }
+ }
+
+ void SetUp() override {
+ }
+};
+
+TEST_F(GNAAOTTests, AffineWith2AffineOutputs_canbe_export_imported) {
+
+ const std::string X = registerFileForRemove("unit_tests.bin");
+
+ // running export to a file
+ export_network(AffineWith2AffineOutputsModel())
+ .inNotCompactMode().as().gna().model().to(X);
+
+ // running infer using imported model instead of IR
+ assert_that().onInferModel().importedFrom(X)
+ .inNotCompactMode().gna().propagate_forward().called().once();
+}
+
+
+TEST_F(GNAAOTTests, AffineWith2AffineOutputs_canbe_imported_verify_structure) {
+
+ auto & nnet_type = storage<intel_nnet_type_t>();
+
+ // saving pointer to nnet - todo probably deep copy required
+ save_args().onInferModel(AffineWith2AffineOutputsModel())
+ .inNotCompactMode().from().gna().propagate_forward().to(&nnet_type);
+
+ const std::string X = registerFileForRemove("unit_tests.bin");
+
+ // running export to a file
+ export_network(AffineWith2AffineOutputsModel())
+ .inNotCompactMode().as().gna().model().to(X);
+
+ // running infer using imported model instead of IR
+ assert_that().onInferModel().importedFrom(X)
+ .inNotCompactMode().gna().propagate_forward().called_with().exact_nnet_structure(&nnet_type);
+
+}
+
+TEST_F(GNAAOTTests, CanConvertFromAOTtoSueModel) {
+
+ auto & nnet_type = storage<intel_nnet_type_t>();
+
+ // saving pointer to nnet - todo probably deep copy required
+ save_args().onInferModel(AffineWith2AffineOutputsModel())
+ .inNotCompactMode().from().gna().propagate_forward().to(&nnet_type);
+
+ const std::string X = registerFileForRemove("unit_tests.bin");
+
+ // running export to a file
+ export_network(AffineWith2AffineOutputsModel())
+ .inNotCompactMode().as().gna().model().to(X);
+
+ // running infer using imported model instead of IR
+ assert_that().onInferModel().importedFrom(X)
+ .inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE), "sue.dump").gna().dumpXNN().called();
+}
+
diff --git a/inference-engine/tests/unit/engines/gna/gna_hardware_precision_test.cpp b/inference-engine/tests/unit/engines/gna/gna_hardware_precision_test.cpp
new file mode 100644
index 000000000..b7dba213e
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_hardware_precision_test.cpp
@@ -0,0 +1,49 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include <gtest/gtest.h>
+#include <mock_icnn_network.hpp>
+#include <cpp/ie_cnn_net_reader.h>
+#include <gmock/gmock-generated-actions.h>
+#include "gna_matcher.hpp"
+
+using namespace std;
+using namespace InferenceEngine;
+using namespace ::testing;
+
+class GNAHWPrecisionTest : public GNATest {
+
+};
+
+TEST_F(GNAHWPrecisionTest, defaultPrecisionIsInt16) {
+ assert_that().onInfer1AFModel().gna().propagate_forward().called_with().
+ nnet_input_precision(Precision::I16).
+ nnet_ouput_precision(Precision::I32).
+ nnet_weights_precision(Precision::I16).
+ nnet_biases_precision(Precision::I32);
+}
+
+TEST_F(GNAHWPrecisionTest, canPassInt8Precision) {
+ assert_that().onInfer1AFModel().withConfig(PRECISION, Precision::I8).
+ gna().propagate_forward().called_with().
+ nnet_input_precision(Precision::I16).
+ nnet_ouput_precision(Precision::I32).
+ nnet_weights_precision(Precision::I8).
+ nnet_biases_precision(Precision::fromType<intel_compound_bias_t>());
+}
+
+TEST_F(GNAHWPrecisionTest, canPassInt16Precision) {
+ assert_that().onInfer1AFModel().withConfig(PRECISION, Precision::I16).
+ gna().propagate_forward().called_with().
+ nnet_input_precision(Precision::I16).
+ nnet_ouput_precision(Precision::I32).
+ nnet_weights_precision(Precision::I16).
+ nnet_biases_precision(Precision::I32);
+}
+
+TEST_F(GNAHWPrecisionTest, failToCreatePluginWithUnsuportedPrecision) {
+ assert_that().creating().gna_plugin().withConfig(PRECISION, Precision::FP32).throws();
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/engines/gna/gna_matcher.cpp b/inference-engine/tests/unit/engines/gna/gna_matcher.cpp
new file mode 100644
index 000000000..c609e4e8f
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_matcher.cpp
@@ -0,0 +1,440 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <mock_icnn_network.hpp>
+#include "gna_matcher.hpp"
+#include <gna/gna_config.hpp>
+#include <gna-api-types-xnn.h>
+#include <gna_plugin/gna_executable_network.hpp>
+#include "gna_plugin.hpp"
+#include "gna_mock_api.hpp"
+#include "matchers/precision_matcher.hpp"
+#include "matchers/pwl_matcher.hpp"
+#include "matchers/copy_matcher.hpp"
+#include "matchers/diag_matcher.hpp"
+#include "matchers/pwl_quantization_metrics_matcher.hpp"
+#include "matchers/conv_matcher.hpp"
+#include "matchers/pool_matcher.hpp"
+
+#include <gmock/gmock-generated-actions.h>
+#include <gmock/gmock-more-actions.h>
+#include "gmock/gmock.h"
+
+using namespace std;
+using namespace InferenceEngine;
+using namespace GNAPluginNS;
+using namespace ::testing;
+
+class NullAllocator : public IAllocator {
+ void * ptr = nullptr;
+public:
+ NullAllocator() {
+ ptr = malloc(1);
+ }
+ ~NullAllocator() {
+ free(ptr);
+ }
+ void * lock(void * handle, LockOp = LOCK_FOR_WRITE) noexcept override {
+ return ptr;
+ }
+ void unlock(void * handle) noexcept override {
+
+ }
+ void * alloc(size_t size) noexcept override {
+ return ptr;
+ }
+ virtual bool free(void* handle) noexcept {
+ return true;
+ }
+ virtual void Release() noexcept {
+ delete this;
+ }
+};
+
+void GNAPropagateMatcher :: match() {
+ try {
+ // matching gna propagate forward call.
+ GNAPlugin plugin(_env.config);
+ size_t inputSize = 10;
+ size_t outputSize = 10;
+
+ auto loadNetworkFromIR = [&] () {
+ CNNNetReader net_reader;
+ ASSERT_NO_THROW_IE_EXCEPTION(net_reader.ReadNetwork(_env.model.data(), _env.model.length()));
+
+ auto weights_fake = make_shared<TBlob<uint8_t>>(Precision::U8, C, SizeVector({std::numeric_limits<uint32_t>::max()}), make_shared<NullAllocator>());
+ net_reader.SetWeights(weights_fake);
+
+ auto net_original = net_reader.getNetwork();
+ auto input_dims = net_original.getInputsInfo().begin()->second->getTensorDesc().getDims();
+ auto output = net_original.getOutputsInfo();
+ // sometimes network might be created without outputs - ex memory output only
+ auto output_dims = !output.empty() ? output.begin()->second->getTensorDesc().getDims() : input_dims;
+
+ inputSize = details::product(std::begin(input_dims), std::end(input_dims));
+ outputSize = details::product(std::begin(output_dims), std::end(output_dims));
+
+ size_t weightsSize = 0;
+ for (auto &layer : net_original) {
+ auto w = layer->blobs["weights"];
+ auto b = layer->blobs["biases"];
+
+ if (w) {
+ weightsSize += w->byteSize();
+ }
+ if (b) {
+ weightsSize += b->byteSize();
+ }
+ }
+ auto weights = make_shared_blob<uint8_t >(Precision::U8, C, {weightsSize});
+
+ weights->allocate();
+ GNATest::fillWeights(weights);
+ net_reader.SetWeights(weights);
+
+ net_reader.getNetwork().setTargetDevice(_env.target_device);
+
+ if (_env.cb) {
+ auto network = net_reader.getNetwork();
+ _env.cb(network);
+ }
+
+ plugin.LoadNetwork(net_reader.getNetwork());
+ };
+
+ auto loadNetworkFromAOT = [&] () {
+ plugin.ImportNetwork(_env.importedModelFileName);
+ };
+
+ TBlob<float>::Ptr input, output;
+ size_t in_N = 1;
+ size_t out_N = in_N;
+ size_t in_C;
+ size_t out_C;
+
+
+ auto loadNetwork = [&]() {
+ if (!_env.importedModelFileName.empty()) {
+ ASSERT_NO_FATAL_FAILURE(loadNetworkFromAOT());
+ } else {
+ ASSERT_NO_FATAL_FAILURE(loadNetworkFromIR());
+ }
+ in_C = _env.matchOutput == true ? _env.input_init.size(): inputSize;
+ out_C = _env.matchOutput == true ? _env.expected_output.size(): outputSize;
+
+ input.reset(new TBlob<float>(Precision::FP32, NC, {in_C, in_N}));
+ input->allocate();
+
+ if(_env.matchOutput == true) {
+ std::copy_n(_env.input_init.cbegin(), in_N * in_C, input->buffer().as<float *>());
+ }
+
+ output.reset(new TBlob<float>(Precision::FP32, NC, {out_C, out_N}));
+ output->allocate();
+ };
+
+
+ StrictMock<GNACppApi> mockApi;
+ std::vector<uint8_t> data;
+
+ if (_env.target_device == InferenceEngine::TargetDevice::eGNA &&
+ !_env.matchThrows) {
+
+ EXPECT_CALL(mockApi, GNAAlloc(_,_,_)).WillOnce(Invoke([&data](
+ intel_gna_handle_t nGNADevice, // handle to GNA accelerator
+ uint32_t sizeRequested,
+ uint32_t* sizeGranted
+ ) {
+ data.resize(sizeRequested);
+ *sizeGranted = sizeRequested;
+ return &data.front();
+ }));
+ EXPECT_CALL(mockApi, GNADeviceOpenSetThreads(_, _)).WillOnce(Return(1));
+
+ if(_env.is_profiling_enabled == false) {
+ EXPECT_CALL(mockApi, GNAWait(_, _, _)).WillOnce(Return(GNA_NOERROR));
+ } else {
+ EXPECT_CALL(mockApi, GNAWaitPerfRes(_, _, _, _)).WillOnce(Return(GNA_NOERROR));
+ }
+
+ if(_env.is_setup_of_omp_theads_expected == true) {
+ EXPECT_CALL(mockApi, gmmSetThreads(_)).Times(1);
+ } else {
+ EXPECT_CALL(mockApi, gmmSetThreads(_)).Times(0);
+ }
+
+ std::unique_ptr<NNetComponentMatcher> combined(new NNetComponentMatcher());
+
+ for (auto & matchWhat : _env.whatToMatch) {
+ switch(matchWhat) {
+ case GnaPluginTestEnvironment::matchPrecision :
+ combined->add(new NNetPrecisionMatcher(_env.nnet_precision, INTEL_AFFINE));
+ break;
+ case GnaPluginTestEnvironment::matchProcType :
+ EXPECT_CALL(mockApi, GNAPropagateForward(_, _, _, _, _, Eq(_env.proc_type)))
+ .WillOnce(Return(GNA_NOERROR));
+ break;
+ case GnaPluginTestEnvironment::matchPwlInserted :
+ combined->add(new PWLMatcher(_env.matchInserted, _env.matchQuantity));
+ break;
+ case GnaPluginTestEnvironment::matchConvInserted:
+ combined->add(new ConvoluionLayerMatcher(_env.matchInserted, _env.matchQuantity));
+ break;
+ case GnaPluginTestEnvironment::matchMaxPoolingInserted:
+ combined->add(new PoolingLayerMatcher(_env.matchInserted, _env.matchQuantity, true));
+ break;
+ case GnaPluginTestEnvironment::matchPwlQuantizeMetrics :
+ combined->add(new PWLQuantizationMetricsMatcher(_env.type,
+ _env.quantization_presicion_threshold,
+ _env.quantization_segments_threshold));
+ break;
+ case GnaPluginTestEnvironment::matchCopyInserted :
+ combined->add(new CopyLayerMatcher(_env.matchInserted, _env.matchQuantity));
+ break;
+ case GnaPluginTestEnvironment::matchDiagonalInserted :
+ combined->add(new DiagLayerMatcher(_env.matchInserted, _env.matchQuantity));
+ break;
+ case GnaPluginTestEnvironment::saveArgs :
+ EXPECT_CALL(mockApi, GNAPropagateForward(_, _, _, _, _, _))
+ .WillOnce(DoAll(SaveArgPointee<1>(savedNet), Return(GNA_NOERROR)));
+ break;
+ default:
+ EXPECT_CALL(mockApi, GNAPropagateForward(_, _, _, _, _, _))
+ .WillOnce(Return(GNA_NOERROR));
+ break;
+ }
+ }
+ if (combined && !combined->empty()) {
+ EXPECT_CALL(mockApi, GNAPropagateForward(_, ::testing::MakeMatcher(combined.release()), _, _, _,_)).WillOnce(Return(GNA_NOERROR));
+ }
+ }
+
+ loadNetwork();
+ plugin.Infer(*input, *output);
+ if(_env.matchOutput == true) {
+ std::vector<float> actual_output(output->size());
+
+ std::copy_n(output->cbuffer().as<float *>(), out_C * out_N, actual_output.begin());
+
+ ASSERT_EQ(true,
+ std::equal(_env.expected_output.begin(), _env.expected_output.end(), actual_output.begin())
+ );
+ }
+
+ std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
+ plugin.GetPerformanceCounts(perfMap);
+
+ if(_env.is_profiling_enabled != false) {
+ ASSERT_NE(perfMap.empty(),true);
+ } else {
+ ASSERT_NE(perfMap.empty(),false);
+ }
+
+ }
+ catch(std::exception &ex) {
+ if (!_env.matchThrows) {
+ FAIL() << ex.what();
+ }
+ }
+ catch(...) {
+ if (!_env.matchThrows) {
+ FAIL() << "unknown exception thrown";
+ }
+ }
+
+}
+
+void GNAPluginCreationMatcher :: match() {
+ if (_env.matchThrows) {
+ ASSERT_ANY_THROW(GNAPlugin(_env.config));
+ return;
+ }
+ GNAPlugin(_env.config);
+}
+
+
+void GNAPluginAOTMatcher :: match() {
+ // matching gna_propagate forward call.
+ MockICNNNetwork net;
+ CNNNetReader net_reader;
+ ASSERT_NO_THROW_IE_EXCEPTION(net_reader.ReadNetwork(_env.model.data(), _env.model.length()));
+
+ size_t weightsSize = 440*3;
+
+ auto weights = make_shared_blob<uint8_t >(Precision::U8, C, {weightsSize});
+ weights->allocate();
+ GNATest::fillWeights(weights);
+ net_reader.SetWeights(weights);
+
+ GNAPlugin plugin(_env.config);
+
+ TBlob<float> input(Precision::FP32, NC, {10, 1});
+ input.allocate();
+
+
+ TBlob<float> output(Precision::FP32, NC, {10, 1});
+ output.allocate();
+
+ net_reader.getNetwork().setTargetDevice(TargetDevice::eGNA);
+
+ if (_env.cb) {
+ auto network = net_reader.getNetwork();
+ _env.cb(network);
+ }
+
+ GNACppApi mockApi;
+ std::vector<uint8_t> data(10000);
+ EXPECT_CALL(mockApi, GNAAlloc(_,_,_)).WillOnce(DoAll(SetArgPointee<2>(10000), Return(&data.front())));
+ EXPECT_CALL(mockApi, GNADeviceOpenSetThreads(_, _)).WillOnce(Return(1));
+
+ plugin.LoadNetwork(net_reader.getNetwork());
+ plugin.Export(_env.exportedModelFileName);
+}
+
+
+void GNADumpXNNMatcher::load(GNAPlugin & plugin) {
+
+ // matching gna DumpXNN forward call.
+ plugin = GNAPlugin(_env.config);
+
+ auto loadNetworkFromIR = [&]() {
+ MockICNNNetwork net;
+ CNNNetReader net_reader;
+ ASSERT_NO_THROW_IE_EXCEPTION(net_reader.ReadNetwork(_env.model.data(), _env.model.length()));
+
+ size_t weightsSize = 440 * 3;
+
+ auto weights = make_shared_blob<uint8_t>(Precision::U8, C, {weightsSize});
+ weights->allocate();
+ GNATest::fillWeights(weights);
+ net_reader.SetWeights(weights);
+
+ net_reader.getNetwork().setTargetDevice(TargetDevice::eGNA);
+
+ if (_env.cb) {
+ auto network = net_reader.getNetwork();
+ _env.cb(network);
+ }
+
+ plugin.LoadNetwork(net_reader.getNetwork());
+ };
+
+ auto loadNetworkFromAOT = [&]() {
+ plugin.ImportNetwork(_env.importedModelFileName);
+ };
+
+ auto loadNetwork = [&]() {
+ if (!_env.importedModelFileName.empty()) {
+ loadNetworkFromAOT();
+ } else {
+ loadNetworkFromIR();
+ }
+ };
+
+ loadNetwork();
+}
+
+void GNADumpXNNMatcher::match() {
+
+ GNACppApi mockApi;
+ std::vector<uint8_t> data(10000);
+ if (!_env.matchThrows) {
+
+ EXPECT_CALL(mockApi, GNAAlloc(_,_,_)).WillOnce(DoAll(SetArgPointee<2>(10000), Return(&data.front())));
+ EXPECT_CALL(mockApi, GNADeviceOpenSetThreads(_, _)).WillOnce(Return(1));
+ intel_gna_model_header header = {};
+ header.model_size = 1;
+ EXPECT_CALL(mockApi, GNADumpXnn(_, _, _, _, _,_)).WillOnce(DoAll(SetArgPointee<3>(header), Return((void*)::operator new(1))));
+ EXPECT_CALL(mockApi, GNAFree(_)).WillOnce(Return(GNA_NOERROR));
+ EXPECT_CALL(mockApi, GNADeviceClose(_)).WillOnce(Return(GNA_NOERROR));
+ }
+
+ try {
+ // matching gna DumpXNN forward call.
+ GNAPluginNS::GNAPlugin plugin;
+ load(plugin);
+ }
+ catch(std::exception &ex) {
+ if (!_env.matchThrows) {
+ FAIL() << ex.what();
+ }
+ }
+ catch(...) {
+ if (!_env.matchThrows) {
+ FAIL() << "unknown exception thrown";
+ }
+ }
+
+}
+
+void GNAQueryStateMatcher :: match() {
+
+ // TODO : avoid copy pastes
+ GNACppApi mockApi;
+ std::vector<uint8_t> data(10000);
+
+ std::shared_ptr<IExecutableNetworkInternal> executer;
+ auto loadNetworkFromIR = [&]() {
+ MockICNNNetwork net;
+ CNNNetReader net_reader;
+ ASSERT_NO_THROW_IE_EXCEPTION(net_reader.ReadNetwork(_env.model.data(), _env.model.length()));
+
+ size_t weightsSize = 440 * 3;
+
+ auto weights = make_shared_blob<uint8_t>(Precision::U8, C, {weightsSize});
+ weights->allocate();
+ GNATest::fillWeights(weights);
+ net_reader.SetWeights(weights);
+
+ net_reader.getNetwork().setTargetDevice(TargetDevice::eGNA);
+
+ if (_env.cb) {
+ auto network = net_reader.getNetwork();
+ _env.cb(network);
+ }
+
+ executer.reset(new GNAExecutableNetwork(net_reader.getNetwork(), _env.config));
+ };
+
+ auto loadNetworkFromAOT = [&]() {
+ executer.reset(new GNAExecutableNetwork(_env.importedModelFileName, _env.config));
+ };
+
+ auto loadNetwork = [&]() {
+ if (!_env.importedModelFileName.empty()) {
+ return loadNetworkFromAOT();
+ } else {
+ return loadNetworkFromIR();
+ }
+ };
+
+
+ EXPECT_CALL(mockApi, GNAAlloc(_,_,_)).WillOnce(DoAll(SetArgPointee<2>(10000), Return(&data.front())));
+ EXPECT_CALL(mockApi, GNADeviceOpenSetThreads(_, _)).WillOnce(Return(1));
+ EXPECT_CALL(mockApi, GNAFree(_)).WillOnce(Return(GNA_NOERROR));
+ EXPECT_CALL(mockApi, GNADeviceClose(_)).WillOnce(Return(GNA_NOERROR));
+
+ try {
+ loadNetwork();
+ if (GnaPluginTestEnvironment::kAnyNotNull == _env.numberOfStates) {
+ auto states = executer->QueryState();
+ ASSERT_NE(states.size(), 0);
+ // usually states are callable
+ for (auto & state : states) {
+ state->Reset();
+ }
+ } else if (_env.numberOfStates >= 0) {
+ ASSERT_EQ(executer->QueryState().size(), _env.numberOfStates);
+ } else {
+ FAIL() << "number of memory states expectation not set";
+ }
+
+ }
+ catch(std::exception &ex) {
+ FAIL() << ex.what();
+ }
+ catch(...) {
+ FAIL() << "unknown exception thrown";
+ }
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/engines/gna/gna_matcher.hpp b/inference-engine/tests/unit/engines/gna/gna_matcher.hpp
new file mode 100644
index 000000000..b249aa2a8
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_matcher.hpp
@@ -0,0 +1,490 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+#include <limits>
+#include <inference_engine/graph_tools.hpp>
+#include "gtest/gtest.h"
+#include "inference_engine.hpp"
+#include "gna/gna_config.hpp"
+#include "gna_plugin.hpp"
+#include "gna-api.h"
+#include "test_irs.hpp"
+#include "dnn.h"
+
+
+#define withConfig(key, value) withGNAConfig(GNA_CONFIG_KEY(key), value)
+#define ASSERT_NO_THROW_IE_EXCEPTION(expr) \
+try {\
+expr;\
+}catch(std::exception & e) {\
+ FAIL() << e.what();\
+}\
+catch(...) {\
+ FAIL() << "unknown exception";\
+}
+
+/**
+ * GNA unit tests environment
+ */
+class GnaPluginTestEnvironment {
+ public:
+ struct NnetPrecision {
+ InferenceEngine::Precision input_precision;
+ InferenceEngine::Precision output_precision;
+ InferenceEngine::Precision weights_precision;
+ InferenceEngine::Precision biases_precision;
+ };
+ enum MatchWhat {
+ exactNNetStructure,
+ matchNone,
+ matchProcType,
+ matchPrecision,
+ matchPwlInserted,
+ matchConvInserted,
+ matchMaxPoolingInserted,
+ matchPwlQuantizeMetrics,
+ matchCopyInserted,
+ matchDiagonalInserted,
+ saveArgs
+ };
+ std::vector<MatchWhat> whatToMatch;
+ enum {
+ kUnset = -1,
+ kAnyNotNull= -2
+ };
+ InferenceEngine::TargetDevice target_device =
+ InferenceEngine::TargetDevice::eGNA;
+ int matchQuantity = kUnset;
+ int numberOfStates = kUnset;
+ bool matchInserted = true;
+ NnetPrecision nnet_precision;
+ float quantization_presicion_threshold = 1.0f;
+ uint16_t quantization_segments_threshold = UINT16_MAX;
+ uint32_t type = 0;
+ std::string model;
+ std::string exportedModelFileName;
+ bool exportNetworkOnly = false;
+ std::function<void (InferenceEngine::CNNNetwork &)> cb;
+ std::map<std::string, std::string> config;
+ bool matchThrows = false;
+ uint32_t proc_type = static_cast<intel_gna_proc_t>(GNA_SOFTWARE & GNA_HARDWARE);
+ std::string importedModelFileName;
+ bool is_profiling_enabled = false;
+ bool matchOutput = false;
+ bool is_setup_of_omp_theads_expected = false;
+ std::vector<float> input_init;
+ std::vector<float> expected_output;
+};
+
+class GNATestBase {
+ public:
+ virtual ~GNATestBase() = default;
+};
+
+template <class T>
+class GNATestConfigurability : public GNATestBase{
+ protected:
+ bool needNextMatcher = true;
+ GnaPluginTestEnvironment _env;
+ GnaPluginTestEnvironment::MatchWhat & getMatcher() {
+ if (needNextMatcher) {
+ needNextMatcher = false;
+ _env.whatToMatch.push_back({});
+ }
+ return _env.whatToMatch.back();
+ }
+ public:
+ GNATestConfigurability(GnaPluginTestEnvironment env) : _env(env) {
+ }
+ T & And() {
+ needNextMatcher = true;
+ return *dynamic_cast<T*>(this);
+ }
+ template <class VType>
+ T & withGNAConfig(const std::string keyName, const VType &value) {
+ std::stringstream ss;
+ ss << value;
+ _env.config[keyName] = ss.str();
+ return *dynamic_cast<T*>(this);
+ }
+ T & withGNADeviceMode(std::string value) {
+ _env.config[GNA_CONFIG_KEY(DEVICE_MODE)] = value;
+ return *dynamic_cast<T*>(this);
+ }
+ T & withAcceleratorThreadsNumber(std::string value) {
+ _env.config[GNA_CONFIG_KEY(LIB_N_THREADS)] = value;
+ return *dynamic_cast<T*>(this);
+ }
+ T & throws() {
+ _env.matchThrows = true;
+ return *dynamic_cast<T*>(this);
+ }
+ T & profiling_counters() {
+ _env.is_profiling_enabled = true;
+ _env.config[CONFIG_KEY(PERF_COUNT)] = InferenceEngine::PluginConfigParams::YES;
+ return *dynamic_cast<T*>(this);
+ }
+
+ T & enable_omp_multithreading() {
+ _env.is_setup_of_omp_theads_expected = true;
+ _env.config[CONFIG_KEY(SINGLE_THREAD)] = InferenceEngine::PluginConfigParams::NO;
+ return *dynamic_cast<T*>(this);
+ }
+};
+
+/**
+ * @brief matches loadnetwork + infer + call to gna_api propagate
+ */
+class GNAPropagateMatcher : public GNATestConfigurability<GNAPropagateMatcher> {
+ public:
+ using base = GNATestConfigurability<GNAPropagateMatcher>;
+ using base::base;
+ using base::getMatcher;
+
+ ~GNAPropagateMatcher() {
+ match();
+ }
+
+ GNAPropagateMatcher & called() {
+ // inserting default matcher that matches any propagate_forward call
+ getMatcher();
+ return *this;
+ }
+
+ GNAPropagateMatcher & called_with() {
+ return *this;
+ }
+
+ GNAPropagateMatcher & called_without() {
+ _env.matchInserted = false;
+ return *this;
+ }
+
+ GNAPropagateMatcher & called_with_input_and_expected_output(std::vector<float>& input_data,
+ std::vector<float>& expect) {
+ _env.matchOutput = true;
+ _env.input_init = input_data;
+ _env.expected_output = expect;
+ return *this;
+ }
+
+ GNAPropagateMatcher & once() {
+ _env.matchQuantity = 1;
+ return *this;
+ }
+
+ GNAPropagateMatcher & twice() {
+ _env.matchQuantity = 2;
+ return *this;
+ }
+
+ GNAPropagateMatcher & args(std::string args) {
+ return *this;
+ }
+
+ GNAPropagateMatcher & exact_nnet_structure(intel_nnet_type_t * pNet) {
+
+ getMatcher() = GnaPluginTestEnvironment::exactNNetStructure;
+ original_nnet = pNet;
+ return *this;
+ }
+
+ GNAPropagateMatcher & pwl_inserted_into_nnet() {
+ getMatcher() = GnaPluginTestEnvironment::matchPwlInserted;
+ return *this;
+ }
+
+ GNAPropagateMatcher & max_pooling_inserted_into_nnet() {
+ getMatcher() = GnaPluginTestEnvironment::matchMaxPoolingInserted;
+ return *this;
+ }
+
+ GNAPropagateMatcher & succeed() {
+ return *this;
+ }
+
+ GNAPropagateMatcher & convolution_inserted_into_nnet() {
+ getMatcher() = GnaPluginTestEnvironment::matchConvInserted;
+ return *this;
+ }
+
+
+ GNAPropagateMatcher & pwl_quantization_activation(uint32_t activation_type) {
+ getMatcher() = GnaPluginTestEnvironment::matchPwlQuantizeMetrics;
+ _env.type = activation_type;
+ return *this;
+ }
+
+ GNAPropagateMatcher & pwl_quantization_precision_threshold(float threshold) {
+ getMatcher() = GnaPluginTestEnvironment::matchPwlQuantizeMetrics;
+ _env.quantization_presicion_threshold = threshold;
+ return *this;
+ }
+
+ GNAPropagateMatcher & pwl_quantization_segments_threshold(uint16_t threshold) {
+ getMatcher() = GnaPluginTestEnvironment::matchPwlQuantizeMetrics;
+ _env.quantization_segments_threshold = threshold;
+ return *this;
+ }
+
+ GNAPropagateMatcher & diagonal_inserted_into_nnet() {
+ getMatcher() = GnaPluginTestEnvironment::matchDiagonalInserted;
+ return *this;
+ }
+
+ GNAPropagateMatcher & copy_inserted_into_nnet() {
+ getMatcher() = GnaPluginTestEnvironment::matchCopyInserted;
+ return *this;
+ }
+
+ GNAPropagateMatcher & nnet_input_precision(const InferenceEngine::Precision &precision) {
+ getMatcher() = GnaPluginTestEnvironment::matchPrecision;
+ _env.nnet_precision.input_precision = precision;
+ return *this;
+ }
+ GNAPropagateMatcher & nnet_ouput_precision(const InferenceEngine::Precision &precision) {
+ getMatcher() = GnaPluginTestEnvironment::matchPrecision;
+ _env.nnet_precision.output_precision = precision;
+ return *this;
+ }
+ GNAPropagateMatcher & nnet_weights_precision(const InferenceEngine::Precision &precision) {
+ getMatcher() = GnaPluginTestEnvironment::matchPrecision;
+ _env.nnet_precision.weights_precision = precision;
+ return *this;
+ }
+ GNAPropagateMatcher & nnet_biases_precision(const InferenceEngine::Precision &precision) {
+ getMatcher() = GnaPluginTestEnvironment::matchPrecision;
+ _env.nnet_precision.biases_precision = precision;
+ return *this;
+ }
+
+ GNAPropagateMatcher & proc_type(uint32_t proc_type) {
+ getMatcher() = GnaPluginTestEnvironment::matchProcType;
+ _env.proc_type = proc_type;
+ return * this;
+ }
+
+ GNAPropagateMatcher & to(intel_nnet_type_t *savedNet) {
+ this->savedNet = savedNet;
+ return *this;
+ }
+
+ GNAPropagateMatcher & onCPU() {
+ _env.target_device = InferenceEngine::TargetDevice::eCPU;
+ return *this;
+ }
+ protected:
+ void match();
+ intel_nnet_type_t * original_nnet = nullptr;
+ intel_nnet_type_t * savedNet = nullptr;
+};
+
+
+/**
+ * @brief GNAPlugin matches creation only case
+ */
+class GNAPluginCreationMatcher : public GNATestConfigurability<GNAPluginCreationMatcher> {
+ public:
+ using base = GNATestConfigurability<GNAPluginCreationMatcher>;
+ using base::base;
+
+ GNAPluginCreationMatcher & gna_plugin() {
+ return * this;
+ }
+ ~GNAPluginCreationMatcher () {
+ match();
+ }
+ protected:
+ void match();
+};
+
+/**
+ * @brief GNAPlugin matches creation only case
+ */
+class GNAPluginAOTMatcher : public GNATestConfigurability<GNAPluginAOTMatcher> {
+ public:
+ using base = GNATestConfigurability<GNAPluginAOTMatcher>;
+ using base::base;
+
+ ~GNAPluginAOTMatcher() {
+ match();
+ }
+ protected:
+ void match();
+};
+
+/**
+ * @brief xnn api tests
+ */
+class GNADumpXNNMatcher : public GNATestConfigurability<GNADumpXNNMatcher> {
+ public:
+ using base = GNATestConfigurability<GNADumpXNNMatcher>;
+ using base::base;
+
+ ~GNADumpXNNMatcher() {
+ if (match_in_dctor) {
+ match();
+ }
+ }
+ GNADumpXNNMatcher& called() {
+ return *this;
+ }
+ protected:
+
+ bool match_in_dctor = true;
+ void load(GNAPluginNS::GNAPlugin & plugin);
+ void match();
+};
+
+/**
+ * @brief xnn api tests
+ */
+class GNAQueryStateMatcher : public GNADumpXNNMatcher {
+ public:
+ using base = GNADumpXNNMatcher;
+ using base::base;
+
+ ~GNAQueryStateMatcher() {
+ if (match_in_dctor) {
+ match();
+ match_in_dctor = false;
+ }
+ }
+ void isEmpty() {
+ _env.numberOfStates = 0;
+ }
+ void isNotEmpty() {
+ _env.numberOfStates = GnaPluginTestEnvironment::kAnyNotNull;
+ }
+
+ protected:
+ void match();
+};
+
+
+
+/**
+ * @brief base for test fixture
+ */
+class GNATest : public ::testing::Test, public GNATestConfigurability<GNATest> {
+ using base = GNATestConfigurability<GNATest>;
+ using base::_env;
+ std::list<std::vector<uint8_t>> dataUsedInMatchers;
+ std::list<std::shared_ptr<GNATestBase>> returnedMatchers;
+
+ public:
+ template <class T>
+ T & storage () {
+ dataUsedInMatchers.push_back(std::vector<uint8_t >(sizeof(T)));
+ return *reinterpret_cast<T*> (&dataUsedInMatchers.back().front());
+ }
+ GNATest() : base(GnaPluginTestEnvironment()) {}
+ GNATest & as() {
+ return *this;
+ }
+ GNATest & model() {
+ return *this;
+ }
+ GNATest & assert_that() {
+ return *this;
+ }
+ GNATest & export_network(std::string modelName) {
+ _env.model = modelName;
+ _env.exportNetworkOnly = true;
+ return *this;
+ }
+ GNATest & save_args() {
+ getMatcher() = GnaPluginTestEnvironment::saveArgs;
+ return *this;
+ }
+
+ GNATest & onInfer1AFModel() {
+ _env.model = GNATestIRs::Fc2DOutputModel();
+ return *this;
+ }
+ GNATest & onLoad(std::string _model) {
+ _env.model = _model;
+ return *this;
+ }
+ GNATest & afterLoadingModel(std::string _model) {
+ _env.model = _model;
+ return *this;
+ }
+
+ GNAQueryStateMatcher & queryState() {
+ returnedMatchers.push_back(std::make_shared<GNAQueryStateMatcher>(_env));
+ // clearing env;
+ _env = GnaPluginTestEnvironment();
+ return dynamic_cast<GNAQueryStateMatcher&>(*returnedMatchers.back());
+ }
+
+ /**importing indicates no infer happened ata all **/
+ GNAPropagateMatcher & importingModelFrom(std::string fileName) {
+ _env.importedModelFileName = fileName;
+ returnedMatchers.push_back(std::make_shared<GNAPropagateMatcher>(_env));
+ // clearing env;
+ _env = GnaPluginTestEnvironment();
+ return dynamic_cast<GNAPropagateMatcher&>(*returnedMatchers.back());
+ }
+ GNATest & importedFrom(std::string fileName) {
+ _env.importedModelFileName = fileName;
+ return *this;
+ }
+ GNATest & onInferModel(std::string _model = "",
+ std::function<void (InferenceEngine::CNNNetwork &)> _cb = [](InferenceEngine::CNNNetwork & net){}) {
+ _env.model = _model;
+ _env.cb = _cb;
+ return *this;
+ }
+ GNATest & gna() {
+ return *this;
+ }
+ GNATest & from() {
+ return *this;
+ }
+ GNATest & inNotCompactMode() {
+ _env.config[GNA_CONFIG_KEY(COMPACT_MODE)] = CONFIG_VALUE(NO);
+ return *this;
+ }
+ GNATest & withUniformPWLAlgo() {
+ base::_env.config[GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN)] = CONFIG_VALUE(YES);
+ return *this;
+ }
+ GNAPropagateMatcher& propagate_forward() {
+ returnedMatchers.push_back(std::make_shared<GNAPropagateMatcher>(_env));
+ //clearing env;
+ _env = GnaPluginTestEnvironment();
+ return dynamic_cast<GNAPropagateMatcher&>(*returnedMatchers.back());
+ }
+ GNADumpXNNMatcher& dumpXNN() {
+ returnedMatchers.push_back(std::make_shared<GNADumpXNNMatcher>(_env));
+ //clearing env;
+ _env = GnaPluginTestEnvironment();
+ return dynamic_cast<GNADumpXNNMatcher&>(*returnedMatchers.back());
+ }
+ GNATest & withNanScaleFactor() {
+ base::_env.config[GNA_CONFIG_KEY(SCALE_FACTOR)] = std::to_string(std::numeric_limits<float>::quiet_NaN());
+ return *this;
+ }
+ GNATest & withInfScaleFactor() {
+ base::_env.config[GNA_CONFIG_KEY(SCALE_FACTOR)] = std::to_string(std::numeric_limits<float>::infinity());
+ return *this;
+ }
+ GNAPluginCreationMatcher creating() {
+ return _env;
+ }
+
+ GNAPluginAOTMatcher & to (std::string fileName) {
+ _env.exportedModelFileName = fileName;
+ returnedMatchers.push_back(std::make_shared<GNAPluginAOTMatcher>(_env));
+ //clearing env;
+ _env = GnaPluginTestEnvironment();
+ return dynamic_cast<GNAPluginAOTMatcher&>(*returnedMatchers.back());
+ }
+
+ static void fillWeights(InferenceEngine::Blob::Ptr weights, float value = 1) {
+ std::fill_n(weights->buffer().as<float*>(), weights->byteSize()/sizeof(float), value);
+ }
+};
diff --git a/inference-engine/tests/unit/engines/gna/gna_memory_test.cpp b/inference-engine/tests/unit/engines/gna/gna_memory_test.cpp
new file mode 100644
index 000000000..aaf0f5776
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_memory_test.cpp
@@ -0,0 +1,440 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <gtest/gtest.h>
+#include "gna_plugin/gna_memory.hpp"
+
+using namespace GNAPluginNS;
+
+class GNAMemoryTest : public ::testing::Test {
+
+ protected:
+ GNAMemory<std::allocator<uint8_t>> mem;
+
+ void SetUp() override {
+ }
+};
+
+TEST_F(GNAMemoryTest, canStoreActualBlob){
+ float input [] = {1,2,3};
+ float* pFuture = nullptr;
+ size_t len = sizeof(input);
+
+ mem.push_ptr(&pFuture, input, len);
+ mem.commit();
+
+ ASSERT_NE(pFuture, nullptr);
+ ASSERT_NE(pFuture, input);
+ ASSERT_EQ(pFuture[0], 1);
+ ASSERT_EQ(pFuture[1], 2);
+ ASSERT_EQ(pFuture[2], 3);
+}
+
+TEST_F(GNAMemoryTest, canStore2Blobs) {
+ float input [] = {1,2,3,4};
+ float* pFuture = nullptr;
+ float* pFuture2 = nullptr;
+
+ mem.push_ptr(&pFuture, input, 3*4);
+ mem.push_ptr(&pFuture2, input+1, 3*4);
+ mem.commit();
+
+ ASSERT_NE(pFuture, input);
+ ASSERT_NE(pFuture2, input);
+ ASSERT_EQ(pFuture + 3, pFuture2);
+
+ ASSERT_EQ(pFuture[0], 1);
+ ASSERT_EQ(pFuture[1], 2);
+ ASSERT_EQ(pFuture[2], 3);
+ ASSERT_EQ(pFuture[3], 2);
+ ASSERT_EQ(pFuture[4], 3);
+ ASSERT_EQ(pFuture[5], 4);
+}
+
+TEST_F(GNAMemoryTest, canStoreBlobsALIGNED) {
+ float input [] = {1,2,3,4,5,6,7,8};
+ float* pFuture = nullptr;
+
+ mem.push_ptr(&pFuture, input, 3*4, 8);
+ mem.commit();
+
+ ASSERT_EQ(16 , mem.getTotalBytes());
+
+ ASSERT_NE(pFuture, input);
+ ASSERT_NE(pFuture, nullptr);
+
+ ASSERT_EQ(pFuture[0], 1);
+ ASSERT_EQ(pFuture[1], 2);
+ ASSERT_EQ(pFuture[2], 3);
+ //least probability for next element to be equal if not copied
+ ASSERT_NE(pFuture[3], 4);
+}
+
+TEST_F(GNAMemoryTest, canStore2BlobsALIGNED) {
+ float input [] = {1,2,3,4,5,6,7,8};
+ float* pFuture = nullptr;
+ float* pFuture2 = nullptr;
+
+ mem.push_ptr(&pFuture, input, 3*4, 8);
+ mem.push_ptr(&pFuture2, input, 3*4, 16);
+ mem.commit();
+
+ ASSERT_EQ(32 , mem.getTotalBytes());
+
+ ASSERT_NE(pFuture, nullptr);
+
+ ASSERT_EQ(pFuture[0], 1);
+ ASSERT_EQ(pFuture[1], 2);
+ ASSERT_EQ(pFuture[2], 3);
+ //least probability for next element to be equal if not copied
+ ASSERT_EQ(pFuture[4], 1);
+ ASSERT_EQ(pFuture[5], 2);
+ ASSERT_EQ(pFuture[6], 3);
+
+}
+
+TEST_F(GNAMemoryTest, canReserveData) {
+
+ float* pFuture = nullptr;
+ mem.reserve_ptr(&pFuture, 3*4);
+ mem.commit();
+
+ ASSERT_NE(pFuture, nullptr);
+}
+
+TEST_F(GNAMemoryTest, canReserveDataByVoid) {
+ mem.reserve_ptr(nullptr, 3*4);
+ ASSERT_NO_THROW(mem.commit());
+}
+
+
+TEST_F(GNAMemoryTest, canReserveAndPushData) {
+
+ float input[] = {1, 2, 3};
+ float *pFuture = nullptr;
+ float* pFuture2 = nullptr;
+ size_t len = sizeof(input) ;
+
+ mem.push_ptr(&pFuture, input, len);
+ mem.reserve_ptr(&pFuture2, 3*4);
+ mem.commit();
+
+ ASSERT_NE(pFuture, nullptr);
+ ASSERT_NE(pFuture2, nullptr);
+ ASSERT_NE(pFuture, input);
+ ASSERT_NE(pFuture2, pFuture);
+
+ pFuture2[0] = -1;
+ pFuture2[1] = -1;
+ pFuture2[2] = -1;
+
+ ASSERT_EQ(pFuture[0], 1);
+ ASSERT_EQ(pFuture[1], 2);
+ ASSERT_EQ(pFuture[2], 3);
+}
+
+TEST_F(GNAMemoryTest, canBindAndResolve) {
+
+ float input[] = {1, 2, 3};
+ float *pFuture = nullptr;
+ float *pFuture2 = nullptr;
+ float *pFuture3 = nullptr;
+ size_t len = sizeof(input);
+
+ mem.bind_ptr(&pFuture3, &pFuture);
+ mem.push_ptr(&pFuture, input, len);
+ mem.bind_ptr(&pFuture2, &pFuture);
+
+ mem.commit();
+
+ ASSERT_NE(pFuture, input);
+ ASSERT_NE(pFuture2, nullptr);
+ ASSERT_EQ(pFuture2, pFuture);
+ ASSERT_EQ(pFuture3, pFuture);
+
+ ASSERT_EQ(pFuture2[0], 1);
+ ASSERT_EQ(pFuture2[1], 2);
+ ASSERT_EQ(pFuture2[2], 3);
+}
+
+TEST_F(GNAMemoryTest, canBindTransitevlyAndResolve) {
+
+ float input[] = {1, 2, 3};
+ float *pFuture = nullptr;
+ float *pFuture3 = nullptr;
+ float *pFuture4 = nullptr;
+ size_t len = sizeof(input);
+
+ mem.bind_ptr(&pFuture4, &pFuture3);
+ mem.bind_ptr(&pFuture3, &pFuture);
+ mem.push_ptr(&pFuture, input, len);
+
+ mem.commit();
+
+ ASSERT_NE(pFuture, input);
+ ASSERT_EQ(pFuture3, pFuture);
+ ASSERT_EQ(pFuture4, pFuture);
+
+ ASSERT_NE(pFuture4, nullptr);
+
+ ASSERT_EQ(pFuture4[0], 1);
+ ASSERT_EQ(pFuture4[1], 2);
+ ASSERT_EQ(pFuture4[2], 3);
+}
+
+TEST_F(GNAMemoryTest, canBindTransitevlyWithOffsetsAndResolve) {
+
+ float input[] = {1, 2, 3};
+ float *pFuture = nullptr;
+ float *pFuture3 = nullptr;
+ float *pFuture4 = nullptr;
+ size_t len = sizeof(input);
+
+ mem.bind_ptr(&pFuture4, &pFuture3, 4);
+ mem.bind_ptr(&pFuture3, &pFuture, 4);
+ mem.push_ptr(&pFuture, input, len);
+
+ mem.commit();
+
+ ASSERT_NE(pFuture, input);
+ ASSERT_EQ(pFuture3, pFuture + 1);
+ ASSERT_EQ(pFuture4, pFuture + 2);
+
+ ASSERT_NE(pFuture, nullptr);
+
+ ASSERT_EQ(pFuture[0], 1);
+ ASSERT_EQ(pFuture[1], 2);
+ ASSERT_EQ(pFuture[2], 3);
+}
+
+TEST_F(GNAMemoryTest, canBindWithOffsetAndResolve) {
+
+ float input[] = {1, 2, 3};
+ float *pFuture = nullptr;
+ float *pFuture2 = nullptr;
+ float *pFuture3 = nullptr;
+ size_t len = sizeof(input);
+
+ mem.bind_ptr(&pFuture3, &pFuture, 4);
+ mem.push_ptr(&pFuture, input, len);
+ mem.bind_ptr(&pFuture2, &pFuture);
+
+ mem.commit();
+
+ ASSERT_NE(pFuture, input);
+ ASSERT_NE(pFuture2, nullptr);
+ ASSERT_EQ(pFuture2, pFuture);
+ ASSERT_NE(pFuture3, nullptr);
+ ASSERT_EQ(pFuture3, pFuture + 1);
+
+ ASSERT_EQ(pFuture2[0], 1);
+ ASSERT_EQ(pFuture2[1], 2);
+ ASSERT_EQ(pFuture2[2], 3);
+ ASSERT_EQ(pFuture3[0], 2);
+}
+
+
+TEST_F(GNAMemoryTest, canPushLocal) {
+
+ float* pFuture = (float*)&pFuture;
+
+ {
+ std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f};
+ mem.push_local_ptr(pFuture, &*input.begin(), 4 * 4, 1);
+ }
+
+ //poison stack
+ float input [] = {11,21,31,41};
+ mem.commit();
+
+ ASSERT_FLOAT_EQ(pFuture[0], 1);
+ ASSERT_FLOAT_EQ(pFuture[1], 2);
+ ASSERT_FLOAT_EQ(pFuture[2], 3);
+ ASSERT_FLOAT_EQ(pFuture[3], 4);
+}
+
+TEST_F(GNAMemoryTest, canPushValue) {
+
+ float* pFuture = (float*)&pFuture;
+ float* pFuture2 = (float*)&pFuture2;
+
+ {
+ mem.push_value(pFuture, 3.f, 2);
+ mem.push_value(pFuture2, 13.f, 2);
+ }
+
+ mem.commit();
+
+ ASSERT_FLOAT_EQ(pFuture[0], 3);
+ ASSERT_FLOAT_EQ(pFuture[1], 3);
+ ASSERT_FLOAT_EQ(pFuture[2], 13);
+ ASSERT_FLOAT_EQ(pFuture[3], 13);
+}
+
+TEST_F(GNAMemoryTest, canPushReadOnlyValue) {
+
+ float* pFuture = (float*)&pFuture;
+ float* pFuture2 = (float*)&pFuture2;
+
+ {
+ mem.push_value(pFuture, 3.f, 2);
+ mem.readonly().push_value(pFuture2, 13.f, 2);
+ }
+
+ mem.commit();
+
+ ASSERT_FLOAT_EQ(pFuture[0], 3);
+ ASSERT_FLOAT_EQ(pFuture[1], 3);
+ ASSERT_FLOAT_EQ(pFuture[2], 13);
+ ASSERT_FLOAT_EQ(pFuture[3], 13);
+}
+
+TEST_F(GNAMemoryTest, canCalculateReadWriteSectionSize) {
+
+ mem.push_value(nullptr, 3.f, 2);
+ mem.readonly().push_value(nullptr, 13.f, 2);
+ mem.commit();
+
+ ASSERT_EQ(mem.getTotalBytes(), 4 * sizeof(float));
+ ASSERT_EQ(mem.getRWBytes(), 2 * sizeof(float));
+}
+
+TEST_F(GNAMemoryTest, canCalculateReadWriteSectionSizeWithAlignment) {
+
+ GNAMemory<std::allocator<uint8_t>> memAligned(64);
+
+ memAligned.push_value(nullptr, 3.f, 2);
+ memAligned.readonly().push_value(nullptr, 13.f, 2);
+ memAligned.commit();
+
+ ASSERT_EQ(memAligned.getTotalBytes(), 128);
+ ASSERT_EQ(memAligned.getRWBytes(), 64);
+}
+
+TEST_F(GNAMemoryTest, canSetUpReadWriteSectionPtr) {
+
+ float* pFuture2 = (float*)&pFuture2;
+ float* pFuture1 = (float*)&pFuture1;
+ float* pFuture3 = (float*)&pFuture3;
+
+
+ mem.readonly().push_value(pFuture1, 3.f, 2);
+ mem.push_value(pFuture2, 13.f, 3);
+ mem.readonly().push_value(pFuture3, 32.f, 4);
+ mem.commit();
+
+ ASSERT_EQ(mem.getTotalBytes(), (2+3+4) * sizeof(float));
+ ASSERT_EQ(mem.getRWBytes(), 3 * sizeof(float));
+
+ ASSERT_LT(&pFuture2[0], &pFuture1[0]);
+ ASSERT_LT(&pFuture1[0], &pFuture3[0]);
+
+ ASSERT_FLOAT_EQ(pFuture1[0], 3.f);
+ ASSERT_FLOAT_EQ(pFuture1[1], 3.f);
+
+ ASSERT_FLOAT_EQ(pFuture2[0], 13.f);
+ ASSERT_FLOAT_EQ(pFuture2[1], 13.f);
+ ASSERT_FLOAT_EQ(pFuture2[2], 13.f);
+
+ ASSERT_FLOAT_EQ(pFuture3[0], 32.f);
+ ASSERT_FLOAT_EQ(pFuture3[1], 32.f);
+ ASSERT_FLOAT_EQ(pFuture3[2], 32.f);
+ ASSERT_FLOAT_EQ(pFuture3[3], 32.f);
+}
+
+
+TEST_F(GNAMemoryTest, canUpdateSizeOfPushRequestWithBindRequest) {
+ float input[] = {1, 2, 3};
+
+ float *pFuture = nullptr;
+ float *pFuture2 = nullptr;
+ float *pFuture3 = nullptr;
+
+ size_t len = sizeof(input);
+
+ mem.push_ptr(&pFuture, input, len);
+ mem.bind_ptr(&pFuture2, &pFuture, len, len);
+ mem.bind_ptr(&pFuture3, &pFuture2, 2 * len, len);
+
+ mem.commit();
+
+ ASSERT_EQ(mem.getTotalBytes(), 4 * len);
+ ASSERT_NE(pFuture, nullptr);
+ ASSERT_EQ(pFuture2, pFuture + 3);
+ ASSERT_EQ(pFuture3, pFuture + 9);
+
+ ASSERT_FLOAT_EQ(pFuture[0], 1);
+ ASSERT_FLOAT_EQ(pFuture[1], 2);
+ ASSERT_FLOAT_EQ(pFuture[2], 3);
+ ASSERT_FLOAT_EQ(pFuture[3], 0);
+ ASSERT_FLOAT_EQ(pFuture[4], 0);
+ ASSERT_FLOAT_EQ(pFuture[5], 0);
+ ASSERT_FLOAT_EQ(pFuture[6], 0);
+ ASSERT_FLOAT_EQ(pFuture[7], 0);
+ ASSERT_FLOAT_EQ(pFuture[8], 0);
+}
+
+TEST_F(GNAMemoryTest, canUpdateSizeOfPushRequestWithBindRequestWhenPush) {
+ float input[] = {1, 2, 3};
+ float input2[] = {6, 7, 8};
+
+ float *pFutureInput2 = nullptr;
+ float *pFuture = nullptr;
+ float *pFuture2 = nullptr;
+
+ size_t len = sizeof(input);
+
+ mem.push_ptr(&pFuture, input, len);
+ mem.bind_ptr(&pFuture2, &pFuture, len, len);
+ mem.push_ptr(&pFutureInput2, input2, len);
+
+ mem.commit();
+
+ ASSERT_EQ(mem.getTotalBytes(), 3 * len);
+ ASSERT_NE(pFuture, nullptr);
+ ASSERT_NE(pFutureInput2, nullptr);
+ ASSERT_EQ(pFuture2, pFuture + 3);
+
+ ASSERT_FLOAT_EQ(pFuture[0], 1);
+ ASSERT_FLOAT_EQ(pFuture[1], 2);
+ ASSERT_FLOAT_EQ(pFuture[2], 3);
+ ASSERT_FLOAT_EQ(pFuture[3], 0);
+ ASSERT_FLOAT_EQ(pFuture[4], 0);
+
+ ASSERT_FLOAT_EQ(pFutureInput2[0], 6);
+ ASSERT_FLOAT_EQ(pFutureInput2[1], 7);
+ ASSERT_FLOAT_EQ(pFutureInput2[2], 8);
+}
+
+TEST_F(GNAMemoryTest, canUpdateSizeOfPushRequestWithBindRequestWhenAlloc) {
+ float input[] = {1, 2, 3};
+
+ float *pFutureInput = nullptr;
+ float *pFuture = nullptr;
+ float *pFuture2 = nullptr;
+
+ size_t len = sizeof(input);
+
+ mem.reserve_ptr(&pFuture, len);
+ mem.bind_ptr(&pFuture2, &pFuture, len, len);
+ mem.push_ptr(&pFutureInput, input, len);
+
+ mem.commit();
+
+ ASSERT_EQ(mem.getTotalBytes(), 3 * len);
+ ASSERT_NE(pFuture, nullptr);
+ ASSERT_NE(pFutureInput, nullptr);
+ ASSERT_EQ(pFuture2, pFuture + 3);
+
+ ASSERT_FLOAT_EQ(pFuture[0], 0);
+ ASSERT_FLOAT_EQ(pFuture[1], 0);
+ ASSERT_FLOAT_EQ(pFuture[2], 0);
+ ASSERT_FLOAT_EQ(pFuture[3], 0);
+ ASSERT_FLOAT_EQ(pFuture[4], 0);
+
+ ASSERT_FLOAT_EQ(pFutureInput[0], 1);
+ ASSERT_FLOAT_EQ(pFutureInput[1], 2);
+ ASSERT_FLOAT_EQ(pFutureInput[2], 3);
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/engines/gna/gna_mock_api.hpp b/inference-engine/tests/unit/engines/gna/gna_mock_api.hpp
new file mode 100644
index 000000000..230c5ab94
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_mock_api.hpp
@@ -0,0 +1,70 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+#include <gmock/gmock-generated-function-mockers.h>
+
+class GNACppApi {
+
+ public:
+ GNACppApi();
+ ~GNACppApi();
+ MOCK_METHOD10(GNAScoreGaussians, intel_gna_status_t(
+ //intel_gna_handle_t nGNADevice, // handle to GNA accelerator
+ //const intel_feature_type_t* pFeatureType,
+ const intel_feature_t* pFeatureData,
+ const intel_gmm_type_t* pModelType,
+ const intel_gmm_t* pModelData,
+ const uint32_t* pActiveGMMIndices,
+ uint32_t nActiveGMMIndices,
+ uint32_t uMaximumScore,
+ intel_gmm_mode_t nGMMMode,
+ uint32_t* pScores,
+ uint32_t* pReqId,
+ intel_gna_proc_t nAccelerationType));
+
+
+ MOCK_METHOD6(GNAPropagateForward, intel_gna_status_t (
+ intel_gna_handle_t nGNADevice, // handle to GNA accelerator
+ const intel_nnet_type_t* pNeuralNetwork,
+ const uint32_t* pActiveIndices,
+ uint32_t nActiveIndices,
+ uint32_t* pReqId,
+ intel_gna_proc_t nAccelerationType));
+
+ MOCK_METHOD3(GNAAlloc, void *(
+ intel_gna_handle_t nGNADevice, // handle to GNA accelerator
+ uint32_t sizeRequested,
+ uint32_t* sizeGranted));
+
+ MOCK_METHOD1(GNAFree, intel_gna_status_t (intel_gna_handle_t nGNADevice));
+
+ MOCK_METHOD1(GNADeviceOpen, intel_gna_handle_t (intel_gna_status_t* status));
+
+ MOCK_METHOD2(GNADeviceOpenSetThreads, intel_gna_handle_t (intel_gna_status_t* status, uint8_t n_threads));
+ MOCK_METHOD1(GNADeviceClose, intel_gna_status_t (intel_gna_handle_t nGNADevice));
+
+ MOCK_METHOD3(GNAWait, intel_gna_status_t(
+ intel_gna_handle_t nGNADevice, // handle to GNA accelerator
+ uint32_t nTimeoutMilliseconds,
+ uint32_t reqId // IN score request ID);
+ ));
+
+ MOCK_METHOD4(GNAWaitPerfRes, intel_gna_status_t(
+ intel_gna_handle_t nGNADevice, // handle to GNA accelerator
+ uint32_t nTimeoutMilliseconds,
+ uint32_t reqId, // IN score request ID);
+ intel_gna_perf_t* nGNAPerfResults
+ ));
+
+ MOCK_METHOD6(GNADumpXnn, void* (
+ const intel_nnet_type_t* neuralNetwork,
+ const uint32_t* activeIndices,
+ uint32_t activeIndicesCount,
+ intel_gna_model_header* modelHeader,
+ intel_gna_status_t* status,
+ intel_gna_alloc_cb customAlloc));
+
+ MOCK_METHOD1(gmmSetThreads, intel_gna_handle_t (uint8_t num));
+};
diff --git a/inference-engine/tests/unit/engines/gna/gna_proc_type_test.cpp b/inference-engine/tests/unit/engines/gna/gna_proc_type_test.cpp
new file mode 100644
index 000000000..de17de735
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_proc_type_test.cpp
@@ -0,0 +1,40 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include <gtest/gtest.h>
+#include <mock_icnn_network.hpp>
+#include <gmock/gmock-generated-actions.h>
+#include <gna/gna_config.hpp>
+#include "gna_plugin.hpp"
+#include "gna_mock_api.hpp"
+#include "gna_matcher.hpp"
+
+using namespace std;
+using namespace InferenceEngine;
+using namespace GNAPluginNS;
+using namespace ::testing;
+
+class GNAProcTypeTest : public GNATest {
+
+ protected:
+};
+
+TEST_F(GNAProcTypeTest, defaultProcTypeIsSWEXACT) {
+ assert_that().onInfer1AFModel().gna().propagate_forward().called_with().proc_type(GNA_SOFTWARE & GNA_HARDWARE);
+}
+
+TEST_F(GNAProcTypeTest, canPassHWProcTypeToGNA) {
+ assert_that().onInfer1AFModel().withGNADeviceMode("GNA_HW").gna().propagate_forward().called_with().proc_type(GNA_HARDWARE);
+}
+
+TEST_F(GNAProcTypeTest, canPassSWProcTypeToGNA) {
+ assert_that().onInfer1AFModel().withGNADeviceMode("GNA_SW").gna().propagate_forward().called_with().proc_type(GNA_SOFTWARE);
+}
+
+TEST_F(GNAProcTypeTest, canPassSWEXACTProcTypeToGNA) {
+ assert_that().onInfer1AFModel().withGNADeviceMode("GNA_SW_EXACT").gna().
+ propagate_forward().called_with().proc_type(GNA_SOFTWARE & GNA_HARDWARE);
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/engines/gna/gna_pwl_test.cpp b/inference-engine/tests/unit/engines/gna/gna_pwl_test.cpp
new file mode 100644
index 000000000..408deec15
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_pwl_test.cpp
@@ -0,0 +1,214 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <gtest/gtest.h>
+#include "gna_matcher.hpp"
+
+class PWLAproximationTest : public GNATest {
+ protected:
+ void SetUp() override {
+ }
+};
+using namespace GNATestIRs;
+
+// Recursive Algorithm
+// Precision Threshold
+
+TEST_F(PWLAproximationTest, forTanhOnRecursiveAlgoWithPrecisionThresholdIsSuccess) {
+ assert_that().onInferModel(TanhActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActTanh)
+ .pwl_quantization_precision_threshold(0.0053);
+}
+
+TEST_F(PWLAproximationTest, forSigmoidOnRecursiveAlgoWithPrecisionThresholdIsSuccess) {
+ assert_that().onInferModel(SigmoidActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActSigmoid)
+ .pwl_quantization_precision_threshold(0.0027);
+}
+
+TEST_F(PWLAproximationTest, forReLUonRecursiveAlgoWithPrecisionThresholdIsSuccess) {
+ assert_that().onInferModel(ReLUActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActRelu)
+ .pwl_quantization_precision_threshold(0.0001);
+}
+
+TEST_F(PWLAproximationTest, forLeakyReLUonRecursiveAlgoWithPrecisionThresholdIsSuccess) {
+ assert_that().onInferModel(LeakyReLUActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActLeakyRelu)
+ .pwl_quantization_precision_threshold(0.0003);
+}
+
+TEST_F(PWLAproximationTest, DISABLED_forIdentityOnRecursiveAlgoWithPrecisionThresholdIsSuccess) {
+ assert_that().onInferModel(IdentityActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActIdentity)
+ .pwl_quantization_precision_threshold(0.0003);
+}
+
+TEST_F(PWLAproximationTest, forClampOnRecursiveAlgoWithPrecisionThresholdIsSuccess) {
+ assert_that().onInferModel(ClampActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActKaldiLstmClipping)
+ .pwl_quantization_precision_threshold(0.0001);
+}
+
+// Uniform Algorithm
+// Precision Threshold
+
+TEST_F(PWLAproximationTest, forTanhOnUniformAlgoWithPrecisionThresholdIsSuccess) {
+ assert_that().onInferModel(TanhActivationModel())
+ .inNotCompactMode()
+ .withUniformPWLAlgo()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActTanh)
+ .pwl_quantization_precision_threshold(0.0009);
+}
+
+TEST_F(PWLAproximationTest, forSigmoidOnUniformAlgoWithPrecisionThresholdIsSuccess) {
+ assert_that().onInferModel(SigmoidActivationModel())
+ .inNotCompactMode()
+ .withUniformPWLAlgo()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActSigmoid)
+ .pwl_quantization_precision_threshold(0.0004);
+}
+
+TEST_F(PWLAproximationTest, DISABLED_forIdentityOnUniformAlgoWithPrecisionThresholdIsSuccess) {
+ assert_that().onInferModel(IdentityActivationModel())
+ .inNotCompactMode()
+ .withUniformPWLAlgo()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActIdentity)
+ .pwl_quantization_precision_threshold(0.0003);
+}
+
+TEST_F(PWLAproximationTest, forClampOnUniformAlgoWithPrecisionThresholdIsSuccess) {
+ assert_that().onInferModel(ClampActivationModel())
+ .inNotCompactMode()
+ .withUniformPWLAlgo()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActKaldiLstmClipping)
+ .pwl_quantization_precision_threshold(0.0001);
+}
+
+// Recursive Algorithm
+// Segment Threshold
+
+TEST_F(PWLAproximationTest, forSigmoidonRecursiveAlgoWithSegmentThresholdIsSuccess) {
+ assert_that().onInferModel(SigmoidActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActSigmoid)
+ .pwl_quantization_segments_threshold(12);
+}
+
+TEST_F(PWLAproximationTest, forTanhonRecursiveAlgoWithSegmentThresholdIsSuccess) {
+ assert_that().onInferModel(TanhActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActTanh)
+ .pwl_quantization_segments_threshold(12);
+}
+
+TEST_F(PWLAproximationTest, forReLUonRecursiveAlgoWithSegmentThresholdIsSuccess) {
+ assert_that().onInferModel(ReLUActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActRelu)
+ .pwl_quantization_segments_threshold(2);
+}
+
+TEST_F(PWLAproximationTest, forLeakyReLUonRecursiveAlgoWithSegmentThresholdIsSuccess) {
+ assert_that().onInferModel(LeakyReLUActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActLeakyRelu)
+ .pwl_quantization_segments_threshold(2);
+}
+
+TEST_F(PWLAproximationTest, DISABLED_forIdentityOnRecursiveAlgoWithSegmentThresholdIsSuccess) {
+ assert_that().onInferModel(IdentityActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActIdentity)
+ .pwl_quantization_segments_threshold(3);
+}
+
+TEST_F(PWLAproximationTest, forClampOnRecursiveAlgoWithSegmentThresholdIsSuccess) {
+ assert_that().onInferModel(ClampActivationModel())
+ .inNotCompactMode()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActKaldiLstmClipping)
+ .pwl_quantization_segments_threshold(3);
+}
+
+// Uniform Algorithm
+// Segment Threshold
+
+TEST_F(PWLAproximationTest, forSigmoidonUniformAlgoWithSegmentThresholdIsSuccess) {
+ assert_that().onInferModel(SigmoidActivationModel())
+ .inNotCompactMode()
+ .withUniformPWLAlgo()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActSigmoid)
+ .pwl_quantization_segments_threshold(65);
+}
+
+TEST_F(PWLAproximationTest, forTanhonUniformAlgoWithSegmentThresholdIsSuccess) {
+ assert_that().onInferModel(TanhActivationModel())
+ .inNotCompactMode()
+ .withUniformPWLAlgo()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActTanh)
+ .pwl_quantization_segments_threshold(65);
+}
+
+TEST_F(PWLAproximationTest, DISABLED_forIdentityOnUniformAlgoWithSegmentThresholdIsSuccess) {
+ assert_that().onInferModel(IdentityActivationModel())
+ .inNotCompactMode()
+ .withUniformPWLAlgo()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActIdentity)
+ .pwl_quantization_segments_threshold(3);
+}
+
+TEST_F(PWLAproximationTest, forClampOnUniformAlgoWithSegmentThresholdIsSuccess) {
+ assert_that().onInferModel(ClampActivationModel())
+ .inNotCompactMode()
+ .withUniformPWLAlgo()
+ .propagate_forward()
+ .called_with()
+ .pwl_quantization_activation(DnnActivationType::kActKaldiLstmClipping)
+ .pwl_quantization_segments_threshold(3);
+}
diff --git a/inference-engine/tests/unit/engines/gna/gna_query_state_tests.cpp b/inference-engine/tests/unit/engines/gna/gna_query_state_tests.cpp
new file mode 100644
index 000000000..f61aecd47
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/gna_query_state_tests.cpp
@@ -0,0 +1,25 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <gtest/gtest.h>
+#include "gna_matcher.hpp"
+
+class QueryStateTest : public GNATest {
+ protected:
+ void SetUp() override {
+ }
+};
+using namespace GNATestIRs;
+
+// Recursive Algorithm
+// Precision Threshold
+
+TEST_F(QueryStateTest, returnEmptyCollectionOfStatesIfNoMemoryInIR) {
+ assert_that().afterLoadingModel(TanhActivationModel()).queryState().isEmpty();
+}
+
+TEST_F(QueryStateTest, returnNonEmptyCollectionOfStatesForMemoryIR) {
+ assert_that().afterLoadingModel(affineToMemoryModel()).queryState().isNotEmpty();
+}
diff --git a/inference-engine/tests/unit/engines/gna/i16_quantisation_test.cpp b/inference-engine/tests/unit/engines/gna/i16_quantisation_test.cpp
new file mode 100644
index 000000000..c8767b0b5
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/i16_quantisation_test.cpp
@@ -0,0 +1,381 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <gtest/gtest.h>
+#include <inference_engine/layer_transform.hpp>
+#include "gna_plugin/quantization/model_quantizer.hpp"
+#include "gna_plugin/quantization/layer_quantizer.hpp"
+#include "gna_matcher.hpp"
+
+using namespace InferenceEngine;
+using namespace GNAPluginNS;
+using namespace GNATestIRs;
+
+class I16QuantisationTest : public GNATest {
+ protected:
+ LayersQuantizer<QuantI16> lc = LayersQuantizer<QuantI16>(1.0f);
+
+ InferenceEngine::CNNLayerPtr quantize (InferenceEngine::CNNLayerPtr lp) {
+ auto newLayer = InferenceEngine::injectData<QuantizedLayerParams>(lp);
+ transformLayer(newLayer, lc);
+ return newLayer;
+ };
+
+
+ void SetUp() override {
+ }
+
+};
+
+template <class T>
+T setWeights(T blob) {
+ blob->allocate();
+ // actual quantisation algorithm is involved - we need to provide weights that will be quantized with scale factor of 1
+ for (auto && w : *blob) {
+ w = MAX_VAL_2B_WEIGHT;
+ }
+ return blob;
+}
+
+template <>
+TBlob<uint8_t>::Ptr setWeights(TBlob<uint8_t>::Ptr blob) {
+ blob->allocate();
+ auto buf = blob->buffer();
+ auto ptr = buf.as<float*>();
+
+ for (int i = 0; i != blob->byteSize() / 4; i++) {
+ ptr[i] = MAX_VAL_2B_WEIGHT;
+ }
+ return blob;
+}
+
+
+// TODO: add test for FC weights after quantization
+TEST_F(I16QuantisationTest, canQuantizeFCLayer){
+
+ auto fc = std::make_shared<FullyConnectedLayer>(LayerParams{"name", "type", Precision::FP32});
+ fc->_out_num = 9;
+ fc->_weights = setWeights(make_shared_blob<float>(Precision::FP32, {1, 1}));
+ fillWeights(fc->_weights);
+ fc->_biases = make_shared_blob<float>(Precision::FP32, Layout::NC, {1, 1});
+ fc->_biases->allocate();
+ fillWeights(fc->_biases);
+
+ std::shared_ptr<Data> outData = std::make_shared<Data>("data", SizeVector({1, 1}), Precision::FP32, Layout::NC);
+ fc->outData.push_back(outData);
+ fc->insData.push_back(outData);
+
+
+ ASSERT_NO_THROW(quantize(fc));
+}
+
+TEST_F(I16QuantisationTest, canQuantizeActivation){
+
+ auto sigmoid = std::make_shared<GenericLayer >(LayerParams{"name", "type", Precision::FP32});
+ sigmoid->params["value"] = 2;
+ sigmoid->type = "Activation";
+
+ ASSERT_NO_THROW(quantize(sigmoid));
+}
+
+TEST_F(I16QuantisationTest, outputAffinePrecisionIs32Bits){
+
+ ModelQuantizer<QuantI16> q;
+
+ CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(Fc2DOutputModel().data(), Fc2DOutputModel().length()));
+
+ auto weights = make_shared_blob<uint8_t>(Precision::U8, C, {440});
+ weights->allocate();
+ fillWeights(weights);
+ net_reader.SetWeights(weights);
+
+ auto newNet = q.quantize(net_reader.getNetwork(), 1000);
+ InputsDataMap inputs;
+ newNet->getInputsInfo(inputs);
+ auto affineDataPtr = inputs.begin()->second->getInputData()->inputTo.begin()->second->outData.front();
+
+ ASSERT_EQ(affineDataPtr->precision, Precision::I32);
+}
+
+
+TEST_F(I16QuantisationTest, canQuantizeLstmLikeTopology) {
+ ModelQuantizer<QuantI16> q;
+
+ CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(affineToMemoryModel().data(), affineToMemoryModel().length()));
+
+ auto weights = setWeights(make_shared_blob<uint8_t >(Precision::U8, C, {440}));
+ //std::fill_n(weights->buffer().as<float*>(), weights->byteSize()/sizeof(float), 0);
+ net_reader.SetWeights(weights);
+
+ ASSERT_NO_THROW(q.quantize(net_reader.getNetwork(), 1000));
+}
+
+TEST_F(I16QuantisationTest, DISABLED_outputScaleFactorForAffineIsCorrect){
+
+ ModelQuantizer<QuantI16> q;
+
+ CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(Fc2DOutputModel().data(), Fc2DOutputModel().length()));
+
+ auto weights = make_shared_blob<uint8_t >(Precision::U8, C, {440});
+ weights->allocate();
+ fillWeights(weights, 100);
+ net_reader.SetWeights(weights);
+
+ auto newNet = q.quantize(net_reader.getNetwork(), 1000);
+ InputsDataMap inputs;
+ newNet->getInputsInfo(inputs);
+ auto affineLayerPtr = inputs.begin()->second->getInputData()->inputTo.begin()->second;
+
+ auto quantParams = getInjectedData<QuantizedLayerParams>(affineLayerPtr);
+
+
+ ASSERT_FLOAT_EQ(quantParams->_dst_quant.scale, 100);
+ ASSERT_FLOAT_EQ(quantParams->_weights_quant.scale, 100);
+}
+
+TEST_F(I16QuantisationTest, OnlyAffine_NoActivationInsertion) {
+ assert_that()
+ .onInferModel(Fc2DOutputModel())
+ .inNotCompactMode()
+ .gna().propagate_forward().called_without().pwl_inserted_into_nnet();
+}
+
+TEST_F(I16QuantisationTest, OnlyAffine_NoActivationInsertion_ProfilingEnabled) {
+ assert_that()
+ .onInferModel(Fc2DOutputModel())
+ .inNotCompactMode()
+ .gna().propagate_forward().called_without().pwl_inserted_into_nnet().profiling_counters();
+}
+
+TEST_F(I16QuantisationTest, OnlyAffineWithNanScaleFactorFails) {
+ gna()
+ .onInferModel(Fc2DOutputModel())
+ .withNanScaleFactor()
+ .propagate_forward().throws();
+}
+
+TEST_F(I16QuantisationTest, OnlyAffineWithInfScaleFactorFails) {
+ gna()
+ .onInferModel(Fc2DOutputModel())
+ .withInfScaleFactor()
+ .propagate_forward().throws();
+}
+
+TEST_F(I16QuantisationTest, AffineToMemoryWillResultInActivationInsertion) {
+ assert_that()
+ .onInferModel(affineToMemoryModel())
+ .inNotCompactMode()
+ .gna().propagate_forward().called_with().pwl_inserted_into_nnet();
+}
+
+TEST_F(I16QuantisationTest, EltwiseToMemoryWithNoOutputActivationInsertion) {
+ assert_that().onInferModel(eltwiseToMemoryModelNoOutput(), [](CNNNetwork & net){
+ net.addOutput("Eltwise_8");
+ }).inNotCompactMode().gna().propagate_forward().called_with().pwl_inserted_into_nnet();
+}
+
+TEST_F(I16QuantisationTest, EltwiseToMemory_ActivationInsertion) {
+ assert_that().onInferModel(eltwiseToMemoryModel())
+ .inNotCompactMode().gna().propagate_forward().called_with().pwl_inserted_into_nnet();
+}
+
+
+TEST_F(I16QuantisationTest, SplitFollowedByActivation_DummyDiagonalAffineInsertion) {
+ assert_that().onInferModel(activationAfterSplitModel())
+ .inNotCompactMode().gna().propagate_forward().called_with().diagonal_inserted_into_nnet();
+}
+
+TEST_F(I16QuantisationTest, SplitFollowedByFCAndEltwiseOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
+ std::vector<float> expected_result = {12.0, 12.0, 12.0, 12.0, 12.0,
+ 12.0, 12.0, 12.0, 12.0, 12.0};
+ assert_that().onInferModel(FCWithPaddingAfterSplitModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, SliceFollowedByFCAndEltwiseOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
+ std::vector<float> expected_result = {14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0};
+ assert_that().onInferModel(FCWithPaddingAfterSliceModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, SliceFollowedByAlignedFCAndEltwiseOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
+ std::vector<float> expected_result = {18.0, 18.0, 18.0, 18.0};
+ assert_that().onInferModel(SliceModelWithAlignedOutputs())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, SliceFollowedBy2FCsAnd2EltwisesOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
+ std::vector<float> expected_result = {27.0, 27.0, 27.0, 27.0, 27.0, 27.0, 27.0, 27.0};
+ assert_that().onInferModel(twoFCWithPaddingAfterSliceModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, EltwiseSumm_onlyOneIdentityInsertion) {
+ assert_that().onInferModel(eltwiseSummModel())
+ .inNotCompactMode().gna().propagate_forward().called_with().pwl_inserted_into_nnet().once();
+}
+
+
+TEST_F(I16QuantisationTest, canDetectLeakyRelu) {
+ assert_that().onInferModel(TFLeakyReluModel())
+ .inNotCompactMode().gna().propagate_forward().called_with().pwl_inserted_into_nnet();
+}
+
+TEST_F(I16QuantisationTest, MaxPool_followedAfterActivation) {
+ assert_that().onInferModel(maxpoolAfterRelu())
+ .inNotCompactMode().gna().propagate_forward().called_with()
+ .convolution_inserted_into_nnet()
+ .And()
+ .pwl_inserted_into_nnet()
+ .And()
+ .max_pooling_inserted_into_nnet();
+}
+
+TEST_F(I16QuantisationTest, EltwiseMull_willInsertTwoIdentities) {
+ assert_that().onInferModel(eltwiseMulModel())
+ .inNotCompactMode().gna().propagate_forward().called_with().pwl_inserted_into_nnet().twice();
+}
+
+TEST_F(I16QuantisationTest, ConcatPropagateForwardWithSuccessOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
+ std::vector<float> expected_result = {121.0, 121.0, 121.0, 121.0, 121.0,
+ 121.0, 121.0, 121.0, 121.0, 121.0,
+ 121.0, 121.0, 121.0, 121.0, 121.0,
+ 121.0, 121.0, 121.0, 121.0, 121.0};
+
+ assert_that().onInferModel(concatModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, DoubleConcatPropageteForwardWithSuccessOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
+ std::vector<float> expected_result = {141.0, 141.0, 141.0, 141.0, 141.0,
+ 141.0, 141.0, 141.0, 141.0, 141.0,
+ 141.0, 141.0, 141.0, 141.0, 141.0,
+ 141.0, 141.0, 141.0, 141.0, 141.0,
+ 141.0, 141.0, 141.0, 141.0, 141.0,
+ 141.0, 141.0, 141.0, 141.0, 141.0,
+ 141.0, 141.0, 141.0, 141.0, 141.0,
+ 141.0, 141.0, 141.0, 141.0, 141.0};
+
+ assert_that().onInferModel(doubleConcatModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, ScaleShift_Affine_WillResultInIdentityInsertion) {
+ assert_that().onInferModel(scaleShiftAffineModel())
+ .inNotCompactMode().gna().propagate_forward().called_with().pwl_inserted_into_nnet().once();
+}
+
+TEST_F(I16QuantisationTest, ClampFollowedByTanh_ResultInDiagonalInsertion) {
+ assert_that().onInferModel(clampFollowedByTanhModel())
+ .inNotCompactMode().gna().propagate_forward().called_with().diagonal_inserted_into_nnet().twice();
+}
+
+TEST_F(I16QuantisationTest, EltwiseWithMemoryAndActivationInput_ResultInDiagonalInsertion) {
+ assert_that().onInferModel(eltwiseWithMemoryAndActivationInputModel())
+ .inNotCompactMode().gna().propagate_forward().called_with().diagonal_inserted_into_nnet().once();
+}
+
+TEST_F(I16QuantisationTest, AffineWith2AffineOutputs_ResultInOnlyOneIdentityInsertion) {
+ // one Identity activation from first FC, and one Identity activation for eltwise
+ assert_that().onInferModel(AffineWith2AffineOutputsModel())
+ .inNotCompactMode().gna().propagate_forward().called_with().pwl_inserted_into_nnet().twice();
+}
+
+// TODO: this mode not required in rel life scenarios so far
+TEST_F(I16QuantisationTest, DISABLED_AffineWithOutputToMemoryAndToAnotherNode_ResultInCopyInsertion) {
+ assert_that().onInferModel(affineToMemoryModel()).inNotCompactMode().gna().propagate_forward().
+ called_with().copy_inserted_into_nnet();
+}
+
+TEST_F(I16QuantisationTest, CropWithoutOffsetPropagateForwardWithSuccessOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
+ std::vector<float> expected_result = {11.0, 11.0, 11.0, 11.0, 11.0,
+ 11.0, 11.0, 11.0, 11.0, 11.0};
+
+ assert_that().onInferModel(cropWithoutOffsetModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, CropWithAlignedOffsetPropagateForwardWithSuccessOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
+ std::vector<float> expected_result = {3.0, 3.0, 3.0, 3.0, 3.0,
+ 3.0, 3.0, 3.0, 3.0, 3.0};
+
+ assert_that().onInferModel(cropWithAlignedOffsetModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, CropWithOffsetPropagateForwardWithSuccessOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0};
+ std::vector<float> expected_result = {7.0, 7.0, 7.0, 7.0, 7.0,
+ 7.0, 7.0, 7.0, 7.0, 7.0};
+
+ assert_that().onInferModel(cropWithOffsetModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, CropWithMaxOffsetPropagateForwardWithSuccessOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
+ std::vector<float> expected_result = {1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0};
+
+ assert_that().onInferModel(cropWithMaxOffsetModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, CropWithOffsetAfterFCPropagateForwardWithSuccessOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
+ std::vector<float> expected_result = {111.0, 111.0, 111.0, 111.0, 111.0,
+ 111.0, 111.0, 111.0, 111.0, 111.0};
+
+ assert_that().onInferModel(cropWithOffsetExtendedModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
+
+TEST_F(I16QuantisationTest, CopySimpleCasePropagateForwardWithSuccessOnCPU) {
+ std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
+ std::vector<float> expected_result = {12.0, 12.0, 12.0, 12.0, 12.0,
+ 12.0, 12.0, 12.0, 12.0, 12.0,
+ 11.0, 11.0, 11.0, 11.0, 11.0,
+ 11.0, 11.0, 11.0, 11.0, 11.0,};
+
+ assert_that().onInferModel(copyModel())
+ .inNotCompactMode().gna().propagate_forward().onCPU()
+ .called_with_input_and_expected_output(input_data, expected_result);
+}
diff --git a/inference-engine/tests/unit/engines/gna/matchers/conv_matcher.hpp b/inference-engine/tests/unit/engines/gna/matchers/conv_matcher.hpp
new file mode 100644
index 000000000..4d5947093
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/matchers/conv_matcher.hpp
@@ -0,0 +1,34 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include"gna-api.h"
+#include "nnet_base_matcher.hpp"
+#include "quantization/quantization.h"
+
+class ConvoluionLayerMatcher : public ::testing::MatcherInterface<const intel_nnet_type_t*> {
+ bool matchInserted;
+ int matchQuantity;
+ public:
+ ConvoluionLayerMatcher(bool matchInserted, int matchQuantity) : matchInserted(matchInserted), matchQuantity(matchQuantity) {}
+ bool MatchAndExplain(const intel_nnet_type_t *foo, ::testing::MatchResultListener *listener) const override {
+ if (foo == nullptr)
+ return false;
+ for(int i = 0; i < foo->nLayers; i++) {
+ if (foo->pLayers[i].nLayerKind != INTEL_CONVOLUTIONAL) continue;
+
+ auto conv = (intel_convolutional_layer_t*)foo->pLayers[i].pLayerStruct;
+
+ return matchInserted;
+ }
+ return !matchInserted;
+ };
+ void DescribeTo(::std::ostream *os) const override {
+ *os << "should "<< (matchInserted ? "" : "not ") << "have Convolution primitive as part of nnet structure";
+ }
+};
+
+
+
diff --git a/inference-engine/tests/unit/engines/gna/matchers/copy_matcher.hpp b/inference-engine/tests/unit/engines/gna/matchers/copy_matcher.hpp
new file mode 100644
index 000000000..c947ecd9e
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/matchers/copy_matcher.hpp
@@ -0,0 +1,32 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+#include "nnet_base_matcher.hpp"
+class CopyLayerMatcher : public ::testing::MatcherInterface<const intel_nnet_type_t*> {
+ bool matchInserted;
+ const int matchQuantity;
+ public:
+ CopyLayerMatcher(bool matchInserted, int matchQuantity) : matchInserted(matchInserted), matchQuantity(matchQuantity) {}
+ bool MatchAndExplain(const intel_nnet_type_t *foo, ::testing::MatchResultListener *listener) const override {
+ if (foo == nullptr)
+ return false;
+ for(int i = 0; i < foo->nLayers; i++) {
+ if (foo->pLayers[i].nLayerKind != INTEL_COPY) continue;
+ return matchInserted;
+ }
+ return !matchInserted;
+ };
+ void DescribeTo(::std::ostream *os) const override {
+ *os << "should "<< (matchInserted ? "" : "not ") << "have Copy primitive as part of nnet structure";
+ }
+};
+
+inline ::testing::Matcher<const intel_nnet_type_t*> HasCopyLayer(bool matchInserted = false, int matchQuantity = -1) {
+ std::unique_ptr<NNetComponentMatcher> c (new NNetComponentMatcher());
+ c->add(new CopyLayerMatcher(matchInserted, matchQuantity));
+ return ::testing::MakeMatcher(c.release());
+}
+
+
diff --git a/inference-engine/tests/unit/engines/gna/matchers/diag_matcher.hpp b/inference-engine/tests/unit/engines/gna/matchers/diag_matcher.hpp
new file mode 100644
index 000000000..cd6c2469e
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/matchers/diag_matcher.hpp
@@ -0,0 +1,51 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+#include"gna-api.h"
+#include "nnet_base_matcher.hpp"
+#include "quantization/quantization.h"
+
+class DiagLayerMatcher : public ::testing::MatcherInterface<const intel_nnet_type_t*> {
+ bool matchInserted;
+ int matchQuantity;
+ public:
+ DiagLayerMatcher(bool matchInserted, int matchQuantity) : matchInserted(matchInserted), matchQuantity(matchQuantity) {}
+ bool MatchAndExplain(const intel_nnet_type_t *foo, ::testing::MatchResultListener *listener) const override {
+ if (foo == nullptr)
+ return false;
+ for(int i = 0; i < foo->nLayers; i++) {
+ if (foo->pLayers[i].nLayerKind != INTEL_AFFINE_DIAGONAL) continue;
+ // diagonal layer has to have 1 for weights and 0 for biases
+
+ auto diag = (intel_affine_func_t*)foo->pLayers[i].pLayerStruct;
+
+ bool bWeightsOK = true;
+ for (int j =0; j < foo->pLayers[i].nOutputRows; j++) {
+ auto weights = (int16_t*)diag->pWeights;
+ auto biases = (int32_t*)diag->pBiases;
+ // identity matrix tansformed to 16384 values
+ if (weights[j] != MAX_VAL_2B_WEIGHT || biases[j] != 0) {
+ bWeightsOK = false;
+ break;
+ }
+ }
+ if (!bWeightsOK) continue;
+
+ return matchInserted;
+ }
+ return !matchInserted;
+ };
+ void DescribeTo(::std::ostream *os) const override {
+ *os << "should "<< (matchInserted ? "" : "not ") << "have Identity Diagonal Primitive primitive as part of nnet structure";
+ }
+};
+
+inline ::testing::Matcher<const intel_nnet_type_t*> HasDiagonalLayer(bool matchInserted = false, int matchQuantity = -1) {
+ std::unique_ptr<NNetComponentMatcher> c (new NNetComponentMatcher());
+ c->add(new DiagLayerMatcher(matchInserted, matchQuantity));
+ return ::testing::MakeMatcher(c.release());
+}
+
+
diff --git a/inference-engine/tests/unit/engines/gna/matchers/nnet_base_matcher.hpp b/inference-engine/tests/unit/engines/gna/matchers/nnet_base_matcher.hpp
new file mode 100644
index 000000000..7c1f69b15
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/matchers/nnet_base_matcher.hpp
@@ -0,0 +1,86 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+
+class NNetComponentMatcher : public ::testing::MatcherInterface<const intel_nnet_type_t*> {
+ std::vector<std::shared_ptr<::testing::MatcherInterface<const intel_nnet_type_t*>>> matchers;
+ mutable int failIdx = -1;
+ mutable std::stringstream reason;
+ int bitness;
+ public:
+ NNetComponentMatcher(int bitness = 16) : bitness(bitness) {}
+ NNetComponentMatcher& add(::testing::MatcherInterface<const intel_nnet_type_t*> * p) {
+ matchers.push_back(std::shared_ptr<::testing::MatcherInterface<const intel_nnet_type_t*>>(p));
+ return *this;
+ }
+ bool empty() const {
+ return matchers.empty();
+ }
+ bool MatchAndExplain(const intel_nnet_type_t* foo, ::testing::MatchResultListener* listener) const override {
+ if (foo == nullptr)
+ return false;
+ reason.str("");
+ // checking pointers are set
+ for (int i=0; i < foo->nLayers; i++) {
+ if (nullptr == foo->pLayers[i].pInputs ||
+ nullptr == foo->pLayers[i].pOutputs) {
+ reason << "input/output pointers in pLayers[" << i << "] shouldn't be null NULL";
+ return false;
+ }
+ if (foo->pLayers[i].nBytesPerInput * 8 != bitness) {
+ reason << "numberOfBytes per input in pLayers[" << i << "] should be " << (bitness/8) << ", but was "
+ << foo->pLayers[i].nBytesPerInput;
+ return false;
+ }
+
+ if (foo->pLayers[i].nBytesPerOutput * 8 != bitness) {
+ // if this output is a output to a bias this is fine
+ // also if this output is defacto network output - other words this whouldnt use in inputs,
+ for (int j=0; j < foo->nLayers; j++) {
+ // bad
+ if (foo->pLayers[j].pInputs == foo->pLayers[i].pOutputs) {
+ reason << "numberOfBytes per output int pLayers[" << i << "] should be " << (bitness/8) << ", but was "
+ << foo->pLayers[i].nBytesPerOutput << "cannot use this output as inputs for layer :" << j;
+ return false;
+ }
+ if (foo->pLayers[j].nLayerKind == INTEL_AFFINE ||
+ foo->pLayers[j].nLayerKind == INTEL_AFFINE_DIAGONAL) {
+ auto pAffine = reinterpret_cast<intel_affine_func_t*>(foo->pLayers[j].pLayerStruct);
+
+ if (pAffine->pWeights == foo->pLayers[i].pOutputs) {
+ reason << "numberOfBytes per output int pLayers[" << i << "] should be " << (bitness/8) << ", but was "
+ << foo->pLayers[i].nBytesPerOutput << "cannot use this output as weights for affine layer :" << j;
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ int i = 0;
+ for (auto && matcher : matchers) {
+ bool res = matcher->MatchAndExplain(foo, listener);
+ if (!res) {
+ failIdx = i;
+ return false;
+ }
+ i++;
+ }
+ return true;
+ }
+
+ void DescribeTo(::std::ostream* os) const override {
+
+ if (failIdx != -1) {
+ matchers[failIdx]->DescribeTo(os);
+ return;
+ }
+
+ *os << reason.str();
+ }
+
+};
+
diff --git a/inference-engine/tests/unit/engines/gna/matchers/pool_matcher.hpp b/inference-engine/tests/unit/engines/gna/matchers/pool_matcher.hpp
new file mode 100644
index 000000000..009e61c7c
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/matchers/pool_matcher.hpp
@@ -0,0 +1,37 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include"gna-api.h"
+#include "nnet_base_matcher.hpp"
+#include "quantization/quantization.h"
+
+class PoolingLayerMatcher : public ::testing::MatcherInterface<const intel_nnet_type_t*> {
+ bool matchInserted;
+ int matchQuantity;
+ bool bMaxPool;
+ public:
+ PoolingLayerMatcher(bool matchInserted, int matchQuantity, bool bMaxPool)
+ : matchInserted(matchInserted), matchQuantity(matchQuantity), bMaxPool(bMaxPool) {}
+ bool MatchAndExplain(const intel_nnet_type_t *foo, ::testing::MatchResultListener *listener) const override {
+ if (foo == nullptr)
+ return false;
+ for(int i = 0; i < foo->nLayers; i++) {
+ if (foo->pLayers[i].nLayerKind != INTEL_CONVOLUTIONAL) continue;
+
+ auto conv = (intel_convolutional_layer_t*)foo->pLayers[i].pLayerStruct;
+ if (conv->poolType != INTEL_MAX_POOLING) continue;
+
+ return matchInserted;
+ }
+ return !matchInserted;
+ };
+ void DescribeTo(::std::ostream *os) const override {
+ *os << "should "<< (matchInserted ? "" : "not ") << "have MaxPooling primitive as part of nnet structure";
+ }
+};
+
+
+
diff --git a/inference-engine/tests/unit/engines/gna/matchers/precision_matcher.hpp b/inference-engine/tests/unit/engines/gna/matchers/precision_matcher.hpp
new file mode 100644
index 000000000..9dfdc8780
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/matchers/precision_matcher.hpp
@@ -0,0 +1,54 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+#include "nnet_base_matcher.hpp"
+
+class NNetPrecisionMatcher : public ::testing::MatcherInterface<const intel_nnet_type_t*> {
+ GnaPluginTestEnvironment::NnetPrecision nnetPrecision;
+ intel_layer_kind_t layerKind = (intel_layer_kind_t)-1;
+ public:
+ explicit NNetPrecisionMatcher(GnaPluginTestEnvironment::NnetPrecision nnetPrecision,
+ intel_layer_kind_t layerKind = (intel_layer_kind_t)-1) : nnetPrecision(nnetPrecision), layerKind(layerKind) {}
+ bool MatchAndExplain(const intel_nnet_type_t* foo, ::testing::MatchResultListener* listener) const override {
+
+ auto ioPrecision = (foo->pLayers->nBytesPerInput == nnetPrecision.input_precision.size()) &&
+ (foo->pLayers->nBytesPerOutput== nnetPrecision.output_precision.size());
+ if (!ioPrecision) {
+ return false;
+ }
+ if (layerKind != (intel_layer_kind_t)-1) {
+ if (foo->pLayers->nLayerKind != layerKind) {
+ return false;
+ }
+ switch (layerKind) {
+ case INTEL_AFFINE : {
+ auto affine = (intel_affine_layer_t *) (foo->pLayers->pLayerStruct);
+
+ return affine->affine.nBytesPerBias == nnetPrecision.biases_precision.size() &&
+ affine->affine.nBytesPerWeight == nnetPrecision.weights_precision.size();
+ }
+ default :
+ return false;
+ }
+
+ }
+ return true;
+ }
+
+ void DescribeTo(::std::ostream* os) const override {
+ *os << "intel_nnet_layer_t nBytesPerInput equals " << nnetPrecision.input_precision.size() << std::endl;
+ *os << "intel_nnet_layer_t nBytesPerOutput equals " << nnetPrecision.output_precision.size() << std::endl;
+ *os << "intel_nnet_layer_t nBytesPerWeights equals " << nnetPrecision.weights_precision.size() << std::endl;
+ *os << "intel_nnet_layer_t nBytesPerBises equals " << nnetPrecision.biases_precision.size() << std::endl;
+ *os << "foo->pLayers->nLayerKind INTEL_AFFINE" ;
+ }
+};
+
+inline ::testing::Matcher<const intel_nnet_type_t*> BitnessOfNNetEq(GnaPluginTestEnvironment::NnetPrecision nnetPrecision,
+ intel_layer_kind_t component) {
+ std::unique_ptr<NNetComponentMatcher> c (new NNetComponentMatcher());
+ c->add(new NNetPrecisionMatcher(nnetPrecision, component));
+ return ::testing::MakeMatcher(c.release());
+}
diff --git a/inference-engine/tests/unit/engines/gna/matchers/pwl_matcher.hpp b/inference-engine/tests/unit/engines/gna/matchers/pwl_matcher.hpp
new file mode 100644
index 000000000..9060cd516
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/matchers/pwl_matcher.hpp
@@ -0,0 +1,61 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+#include "nnet_base_matcher.hpp"
+
+class PWLMatcher : public ::testing::MatcherInterface<const intel_nnet_type_t*> {
+ bool matchInserted;
+ const int matchQuantity;
+ mutable int timesInserted = 0;
+ public:
+ PWLMatcher(bool inserted, int matchQuantity) : matchInserted(inserted), matchQuantity(matchQuantity) {}
+
+ bool MatchAndExplain(const intel_nnet_type_t *foo, ::testing::MatchResultListener *listener) const override {
+ if (foo == nullptr)
+ return false;
+ timesInserted = 0;
+ for(int i = 0; i < foo->nLayers; i++) {
+ if (foo->pLayers[i].nLayerKind != INTEL_AFFINE &&
+ foo->pLayers[i].nLayerKind != INTEL_AFFINE_DIAGONAL &&
+ foo->pLayers[i].nLayerKind != INTEL_CONVOLUTIONAL) continue;
+ auto affine = reinterpret_cast<intel_affine_layer_t*>(foo->pLayers[i].pLayerStruct);
+ if (affine == nullptr) continue;
+
+ bool hasPwl = affine->pwl.nSegments != 0 && affine->pwl.pSegments != nullptr;
+
+ if (hasPwl) {
+ if (matchQuantity == -1)
+ return matchInserted;
+ else
+ timesInserted ++;
+ }
+ }
+ if (matchInserted) {
+ if (matchQuantity != -1) {
+ return timesInserted == matchQuantity;
+ }
+ return timesInserted != 0;
+ }
+
+ return timesInserted == 0;
+ };
+ void DescribeTo(::std::ostream *os) const override {
+ if (!matchInserted ) {
+ *os << "should not have PWL layer as part of nnet structure, but was found " << timesInserted <<" times" ;
+ } else {
+ if (matchQuantity == -1) {
+ *os << "should have PWL layer as part of nnet structure, but it was not found " ;
+ } else {
+ *os << "should have PWL layer as part of nnet structure, for " << matchQuantity <<" times, but was found only " << timesInserted ;
+ }
+ }
+ }
+};
+
+inline ::testing::Matcher<const intel_nnet_type_t*> HasPwlLayer(bool inserted = true, int matchQuantity = -1) {
+ std::unique_ptr<NNetComponentMatcher> c (new NNetComponentMatcher());
+ c->add(new PWLMatcher(inserted, matchQuantity));
+ return ::testing::MakeMatcher(c.release());
+}
diff --git a/inference-engine/tests/unit/engines/gna/matchers/pwl_quantization_metrics_matcher.hpp b/inference-engine/tests/unit/engines/gna/matchers/pwl_quantization_metrics_matcher.hpp
new file mode 100644
index 000000000..cccd94069
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/matchers/pwl_quantization_metrics_matcher.hpp
@@ -0,0 +1,139 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+#include <cmath>
+#include <numeric>
+
+#include "nnet_base_matcher.hpp"
+#include "dnn.h"
+#include "pwl.h"
+#include "iostream"
+
+class PWLQuantizationMetricsMatcher : public ::testing::MatcherInterface<const intel_nnet_type_t*> {
+ const float rmse_threshold;
+ const uint32_t activation_type;
+ const uint16_t segment_threshold;
+ public:
+ PWLQuantizationMetricsMatcher(uint32_t type, float presicion_threshold, uint16_t segments) :
+ activation_type(type),
+ rmse_threshold(presicion_threshold),
+ segment_threshold(segments) {}
+
+ bool MatchAndExplain(const intel_nnet_type_t *nnet, ::testing::MatchResultListener *listener) const override {
+ float rmse = 0.f;
+ const float test_arg_scale_factor = 16384;
+
+ if (nnet == nullptr)
+ return false;
+
+ for(int i = 0; i < nnet->nLayers; ++i) {
+ if (nnet->pLayers[i].nLayerKind != INTEL_AFFINE &&
+ nnet->pLayers[i].nLayerKind != INTEL_AFFINE_DIAGONAL &&
+ nnet->pLayers[i].nLayerKind != INTEL_CONVOLUTIONAL) continue;
+
+ auto affine = reinterpret_cast<intel_affine_layer_t*>(nnet->pLayers[i].pLayerStruct);
+
+ if (affine == nullptr ||
+ affine->pwl.nSegments == 0 ||
+ affine->pwl.pSegments == nullptr) continue;
+
+ if (affine->pwl.nSegments > segment_threshold) {
+ return false;
+ }
+
+ int32_t domain = 0;
+ std::function<float(float)> activation_func = nullptr;
+ switch (activation_type) {
+ case kActSigmoid:
+ domain = 10000;
+ activation_func = [](float x)-> float {
+ float exp_value;
+ exp_value =
+ exp(static_cast<double>(-(x)));
+ return 1 / (1 + exp_value);};
+ break;
+ case kActTanh:
+ domain = 5000;
+ activation_func = [](float x)-> float {return tanh(x);};
+ break;
+ case kActIdentity:
+ domain = 1000;
+ activation_func = [](float x)-> float {return x;};
+ break;
+ case kActRelu:
+ domain = 1000;
+ activation_func = [](float x)-> float {return relu(x);};
+ break;
+ case kActLeakyRelu:
+ domain = 1000;
+ activation_func = [](float x)-> float {return leaky_relu(x);};
+ break;
+ case kActKaldiLstmClipping:
+ domain = 16000;
+ activation_func = [](float x)-> float {
+ return clipping(x,
+ KALDI_LSTM_CLIP_LOWER,
+ KALDI_LSTM_CLIP_UPPER);};
+ break;
+ default:
+ domain = 50000;
+ activation_func = [](float x)-> float {return 0;};
+ }
+
+ std::vector<double> y_diviation(2*domain);
+ std::vector<intel_pwl_segment_t*> segments_vector(affine->pwl.nSegments);
+ std::iota(segments_vector.begin(), segments_vector.begin()+affine->pwl.nSegments,
+ affine->pwl.pSegments);
+
+ auto current_segment = segments_vector.begin();
+ auto diviation_itr = y_diviation.begin();
+
+ for(int i=-domain; i<domain; ++i) {
+ float value = 0.0;
+ const float arg = i/1000.0;
+ while(current_segment != segments_vector.end() &&
+ arg > static_cast<int32_t>((*current_segment)->xBase & XBASEMASK) / test_arg_scale_factor) {
+ ++current_segment;
+ }
+ auto prev_segment = std::prev(current_segment,1);
+ value = activation_func(arg);
+
+ float base_arg = static_cast<int32_t>((*prev_segment)->xBase & XBASEMASK) / test_arg_scale_factor;
+ float base_value = static_cast<int32_t>((*prev_segment)->yBase) / ACTIVATION_SCALE_FACTOR;
+
+ uint32_t slope_scale_index = (*prev_segment)->xBase & ~XBASEMASK;
+
+ uint64_t slope_scale = static_cast<uint64_t>(1) << (8 * (1 + slope_scale_index));
+ float slope =
+ test_arg_scale_factor*(static_cast<float>((*prev_segment)->slope ) / (slope_scale*ACTIVATION_SCALE_FACTOR));
+
+ float quant_value = (arg - base_arg)*slope + base_value;
+
+ *diviation_itr = std::pow(std::abs(value-quant_value),2);
+ ++diviation_itr;
+ }
+
+ // sort ascending to do not lost precision
+ std::sort(y_diviation.begin(),y_diviation.end());
+ double sum = std::accumulate(y_diviation.begin(), y_diviation.end(), 0.0);
+ rmse = std::sqrt(sum/static_cast<float>(y_diviation.size()));
+ }
+
+ return rmse_threshold > rmse;
+ };
+ void DescribeTo(::std::ostream *os) const override {
+ *os << "Has the activation layer type " << activation_type <<" rmse less that threshold "<< rmse_threshold
+ << " or segments count less that threshold " << segment_threshold
+ << " ?";
+ }
+};
+
+inline ::testing::Matcher<const intel_nnet_type_t*> PrecisionOfQuantizedPwlMetrics(uint32_t type,
+ float threshold,
+ uint16_t segments) {
+ std::unique_ptr<NNetComponentMatcher> c (new NNetComponentMatcher());
+ c->add(new PWLQuantizationMetricsMatcher(type, threshold, segments));
+ return ::testing::MakeMatcher(c.release());
+}
diff --git a/inference-engine/tests/unit/engines/gna/test_irs.cpp b/inference-engine/tests/unit/engines/gna/test_irs.cpp
new file mode 100644
index 000000000..f9a035341
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/test_irs.cpp
@@ -0,0 +1,2678 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "test_irs.hpp"
+
+namespace GNATestIRs {
+
+std::string FCOnlyModel() {
+ return R"V0G0N(
+<Net Name="FullyConnected_Only" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+
+ <layer name="FullyConnected" id="1" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+
+std::string Fc2DOutputModel() {
+ return R"V0G0N(
+<Net Name="FullyConnected_Only" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+
+ <layer name="FullyConnected" id="1" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+
+std::string affineToMemoryModel() {
+ return R"V0G0N(
+<Net Name="FullyConnected_ToMemory" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="FullyConnected" id="1" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Eltwise_8" type="Eltwise" id="11" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <!--connected to FullyConnected-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <!--connected to Memory_28-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Memory_27" type="Memory" id="27" precision="FP32">
+ <data id="r_27-28" index="0" size="2" />
+ <input>
+ <port id="60">
+ <!--connected to FullyConnected-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ </layer>
+
+ <layer name="Memory_28" type="Memory" id="28" precision="FP32">
+ <data id="r_27-28" index="1" size="2" />
+ <output>
+ <port id="59">
+ <!--connected to , Eltwise_8-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="27" to-port="60" />
+ <edge from-layer="1" from-port="1" to-layer="11" to-port="1" />
+ <edge from-layer="28" from-port="59" to-layer="11" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+std::string eltwiseToMemoryModelNoOutput() {
+ return R"V0G0N(
+<Net Name="FullyConnected_ToMemory" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+
+ <layer name="Eltwise_8" type="Eltwise" id="11" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <!--connected to FullyConnected-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <!--connected to Memory_28-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+
+ <layer name="Memory_27" type="Memory" id="27" precision="FP32">
+ <data id="r_27-28" index="0" size="2" />
+ <input>
+ <port id="60">
+ <!--connected to Eltwise_8-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ </layer>
+
+ <layer name="Memory_28" type="Memory" id="28" precision="FP32">
+ <data id="r_27-28" index="1" size="2" />
+ <output>
+ <port id="59">
+ <!--connected to , Eltwise_8-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="11" to-port="1" />
+ <edge from-layer="11" from-port="2" to-layer="27" to-port="60" />
+ <edge from-layer="28" from-port="59" to-layer="11" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+std::string eltwiseToMemoryModel() {
+ return R"V0G0N(
+<Net Name="FullyConnected_ToMemory" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Eltwise_8" type="Eltwise" id="11" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <!--connected to Memory_28-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <!--connected to input-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Eltwise_9" type="Eltwise" id="12" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <!--connected Memory_28 to -->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <!--connected to Elwise_8-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Memory_27" type="Memory" id="27" precision="FP32">
+ <data id="r_27-28" index="0" size="2" />
+ <input>
+ <port id="60">
+ <!--connected to Eltwise_8-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ </layer>
+
+ <layer name="Memory_28" type="Memory" id="28" precision="FP32">
+ <data id="r_27-28" index="1" size="2" />
+ <output>
+ <port id="59">
+ <!--connected to , Eltwise_8-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="5010">
+ <!--connected to , Eltwise_9-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="11" to-port="1" />
+ <edge from-layer="11" from-port="2" to-layer="27" to-port="60" />
+ <edge from-layer="11" from-port="2" to-layer="12" to-port="1" />
+ <edge from-layer="28" from-port="59" to-layer="11" to-port="0" />
+ <edge from-layer="28" from-port="5010" to-layer="12" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+
+std::string activationAfterSplitModel() {
+ return R"V0G0N(
+ <Net Name="activationAfterSplit" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Eltwise_8" type="Eltwise" id="11" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <!--connected to split-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <!--connected to tanh_28-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Split_1" type="Split" id="12" precision="FP32">
+ <data axis="1" />
+ <input>
+ <port id="0">
+ <!--connected to input-->
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <!--connected to tanh-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="2">
+ <!--connected to eltwise-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Activation_38" type="Activation" id="38" precision="FP32">
+ <data type="tanh" />
+ <input>
+ <port id="82">
+ <!--connected to Eltwise_37-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="83">
+ <!--connected to , Eltwise_41-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="12" to-port="0" />
+ <edge from-layer="12" from-port="1" to-layer="11" to-port="0" />
+ <edge from-layer="12" from-port="2" to-layer="38" to-port="82" />
+ <edge from-layer="38" from-port="83" to-layer="11" to-port="1" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string FCWithPaddingAfterSplitModel() {
+ return R"V0G0N(
+ <Net Name="FCWithPaddingAfterSplitModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Split_1" type="Split" id="1" precision="FP32">
+ <data axis="1" />
+ <input>
+ <port id="0">
+ <!--connected to input-->
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <!--connected to eltwise-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="2">
+ <!--connected to fc-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected" id="11" type="InnerProduct" precision="FP32">
+ <fc out-size="10" />
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Eltwise_8" type="Eltwise" id="21" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="21" to-port="0" />
+ <edge from-layer="1" from-port="2" to-layer="11" to-port="0" />
+ <edge from-layer="11" from-port="1" to-layer="21" to-port="1" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string twoFCWithPaddingAfterSliceModel() {
+ return R"V0G0N(
+ <Net Name="twoFCWithPaddingAfterSliceModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Slice_1" type="Slice" id="1" precision="FP32">
+ <data axis="1" slice_point="8" slice_dim="1"/>
+ <input>
+ <port id="0">
+ <!--connected to input-->
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <!--connected to eltwise-->
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ <port id="2">
+ <!--connected to fc-->
+ <dim>1</dim>
+ <dim>12</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected1" id="11" type="InnerProduct" precision="FP32">
+ <fc out-size="8" />
+ <biases offset="0" size="32" />
+ <weights offset="32" size="384" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>12</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected2" id="12" type="InnerProduct" precision="FP32">
+ <fc out-size="8" />
+ <biases offset="0" size="32" />
+ <weights offset="32" size="384" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>12</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Eltwise_1" type="Eltwise" id="21" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Eltwise_2" type="Eltwise" id="22" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="21" to-port="0" />
+ <edge from-layer="1" from-port="2" to-layer="11" to-port="0" />
+ <edge from-layer="1" from-port="2" to-layer="12" to-port="0" />
+ <edge from-layer="11" from-port="1" to-layer="21" to-port="1" />
+ <edge from-layer="21" from-port="2" to-layer="22" to-port="0" />
+ <edge from-layer="12" from-port="1" to-layer="22" to-port="1" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string FCWithPaddingAfterSliceModel() {
+ return R"V0G0N(
+ <Net Name="FCWithPaddingAfterSliceModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Slice_1" type="Slice" id="1" precision="FP32">
+ <data axis="1" slice_point="8" slice_dim="1"/>
+ <input>
+ <port id="0">
+ <!--connected to input-->
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <!--connected to eltwise-->
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ <port id="2">
+ <!--connected to fc-->
+ <dim>1</dim>
+ <dim>12</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected" id="11" type="InnerProduct" precision="FP32">
+ <fc out-size="8" />
+ <biases offset="0" size="32" />
+ <weights offset="32" size="384" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>12</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Eltwise_8" type="Eltwise" id="21" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>8</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="21" to-port="0" />
+ <edge from-layer="1" from-port="2" to-layer="11" to-port="0" />
+ <edge from-layer="11" from-port="1" to-layer="21" to-port="1" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string SliceModelWithAlignedOutputs() {
+ return R"V0G0N(
+ <Net Name="SliceModelWithAlignedOutputs" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Slice_1" type="Slice" id="1" precision="FP32">
+ <data axis="1" slice_point="8" slice_dim="1"/>
+ <input>
+ <port id="0">
+ <!--connected to input-->
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <!--connected to fc-->
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ <port id="2">
+ <!--connected to eltwise-->
+ <dim>1</dim>
+ <dim>4</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected" id="11" type="InnerProduct" precision="FP32">
+ <fc out-size="4" />
+ <biases offset="0" size="16" />
+ <weights offset="16" size="512" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>16</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>4</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Eltwise_8" type="Eltwise" id="21" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>4</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>4</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>4</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="11" to-port="0" />
+ <edge from-layer="1" from-port="2" to-layer="21" to-port="0" />
+ <edge from-layer="11" from-port="1" to-layer="21" to-port="1" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string eltwiseSummModel() {
+ return R"V0G0N(
+ <Net Name="activationAfterSplit" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected" id="2" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="FullyConnected_1" id="3" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Eltwise_8" type="Eltwise" id="11" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <!--connected to FC1-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <!--connected to FC2-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ <edge from-layer="0" from-port="0" to-layer="3" to-port="0" />
+ <edge from-layer="2" from-port="1" to-layer="11" to-port="0" />
+ <edge from-layer="3" from-port="1" to-layer="11" to-port="1" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+
+std::string eltwiseMulModel() {
+ return R"V0G0N(
+ <Net Name="eltwiseMul" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected" id="2" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="FullyConnected_1" id="3" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Eltwise_8" type="Eltwise" id="11" precision="FP32">
+ <data operation="mul" />
+ <input>
+ <port id="0">
+ <!--connected to FC1-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <!--connected to FC2-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ <edge from-layer="0" from-port="0" to-layer="3" to-port="0" />
+ <edge from-layer="2" from-port="1" to-layer="11" to-port="0" />
+ <edge from-layer="3" from-port="1" to-layer="11" to-port="1" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string scaleShiftAffineModel() {
+ return R"V0G0N(
+<Net Name="FullyConnected_Only" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="ScaleShift_21" type="ScaleShift" id="21" precision="FP32">
+ <input>
+ <port id="46">
+ <!--connected to input-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="45">
+ <!--connected to , FullyConnected-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ <weights offset="0" size="40" precision="FP32" />
+ </layer>
+
+ <layer name="FullyConnected" id="1" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="21" to-port="46" />
+ <edge from-layer="21" from-port="45" to-layer="1" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+
+}
+
+std::string clampFollowedByTanhModel() {
+ return R"V0G0N(
+<Net Name="clampFollowedByTanhModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Clamp_20" type="Clamp" id="20" precision="FP32">
+ <data max="50" min="-50" />
+ <input>
+ <port id="43">
+ <!--connected to Eltwise_19-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="44">
+ <!--connected to , ScaleShift_21, Activation_24, Memory_4-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Activation_38" type="Activation" id="38" precision="FP32">
+ <data type="tanh" />
+ <input>
+ <port id="82">
+ <!--connected to Eltwise_37-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="83">
+ <!--connected to , Eltwise_41-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="20" to-port="43" />
+ <edge from-layer="20" from-port="44" to-layer="38" to-port="82" />
+ </edges>
+</Net>
+)V0G0N";
+
+}
+
+std::string eltwiseWithMemoryAndActivationInputModel() {
+ return R"V0G0N(
+ <Net Name="activationAfterSplit" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Memory_27" type="Memory" id="27" precision="FP32">
+ <data id="r_27-28" index="0" size="2" />
+ <input>
+ <port id="60">
+ <!--connected to Activation_38-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ </layer>
+
+ <layer name="Memory_28" type="Memory" id="28" precision="FP32">
+ <data id="r_27-28" index="1" size="2" />
+ <output>
+ <port id="59">
+ <!--connected to , Eltwise_8-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="FullyConnected" id="2" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Activation_38" type="Activation" id="38" precision="FP32">
+ <data type="tanh" />
+ <input>
+ <port id="82">
+ <!--connected to Eltwise_37-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="83">
+ <!--connected to , Eltwise_41-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="Eltwise_8" type="Eltwise" id="11" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <!--connected to FC1-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <!--connected to FC2-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ <edge from-layer="2" from-port="1" to-layer="38" to-port="82" />
+ <edge from-layer="38" from-port="83" to-layer="11" to-port="0" />
+ <edge from-layer="28" from-port="59" to-layer="11" to-port="1" />
+ <edge from-layer="38" from-port="83" to-layer="27" to-port="60" />
+ </edges>
+ </Net>
+ )V0G0N";
+
+}
+std::string AffineWith2AffineOutputsModel() {
+ return R"V0G0N(
+ <Net Name="eltwiseMul" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected" id="2" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="FullyConnected_1" id="3" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ <layer name="FullyConnected_5" id="4" type="InnerProduct" precision="FP32">
+
+ <fc out-size="10" />
+
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Eltwise_8" type="Eltwise" id="11" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <!--connected to FullyConnected-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <!--connected to Memory_28-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ <edge from-layer="2" from-port="1" to-layer="3" to-port="0" />
+ <edge from-layer="2" from-port="1" to-layer="4" to-port="0" />
+ <edge from-layer="4" from-port="1" to-layer="11" to-port="0" />
+ <edge from-layer="3" from-port="1" to-layer="11" to-port="1" />
+ </edges>
+ </Net>
+ )V0G0N";
+
+}
+
+std::string SigmoidActivationModel() {
+ return R"V0G0N(
+<Net Name="InputLayerWithSigmoidActivation" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Sig_Activation" id="2" type="Activation" precision="FP32">
+ <data type="sigmoid" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+
+std::string TanhActivationModel() {
+ return R"V0G0N(
+<Net Name="InputLayerWithTanhActivation" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Tanh_Activation" id="2" type="Activation" precision="FP32">
+ <data type="tanh" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+
+std::string ReLUActivationModel() {
+ return R"V0G0N(
+<Net Name="InputLayerWithReLUActivation" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>1</dim>
+ <dim>10</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="ReLU_Activation" type="Activation" id="2" precision="FP32">
+ <data type="ReLU" negative_slope="0.000000" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>1</dim>
+ <dim>10</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="4">
+ <dim>1</dim>
+ <dim>1</dim>
+ <dim>10</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+
+std::string LeakyReLUActivationModel() {
+ return R"V0G0N(
+<Net Name="InputLayerWithLeakyReLUActivation" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>1</dim>
+ <dim>10</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="LeakyReLU_Activation" type="Activation" id="2" precision="FP32">
+ <data type="ReLU" negative_slope="0.010000" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>1</dim>
+ <dim>10</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="4">
+ <dim>1</dim>
+ <dim>1</dim>
+ <dim>10</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+
+std::string ClampActivationModel() {
+ return R"V0G0N(
+<Net Name="InputLayerWithClippingActivation" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Clamp_Activation" id="2" type="Activation" precision="FP32">
+ <data type="clamp" min="-5" max="5" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+
+std::string IdentityActivationModel() {
+ return R"V0G0N(
+<Net Name="InputLayerWithIdentityActivation" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Identity_Activation" id="2" type="Activation" precision="FP32">
+ <data type="identity" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ </edges>
+</Net>
+)V0G0N";
+}
+
+std::string concatModel() {
+ return R"V0G0N(
+ <Net Name="concatinationModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Split1" type="Split" id="1" precision="FP32">
+ <data axis="1" />
+ <input>
+ <port id="0">
+ <!--connected to input-->
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <!--connected to eltwise-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="2">
+ <!--connected to fc-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="ReLU1" id="11" type="Activation" precision="FP32">
+ <data type="ReLU" negative_slope="0.000000" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected1" id="12" type="InnerProduct" precision="FP32">
+ <fc out-size="10" />
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="concat1" id="21" precision="FP32" type="Concat">
+ <data axis="1"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected2" id="31" type="InnerProduct" precision="FP32">
+ <fc out-size="20" />
+ <biases offset="0" size="80" />
+ <weights offset="80" size="1600" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="11" to-port="0" />
+ <edge from-layer="1" from-port="2" to-layer="12" to-port="0" />
+ <edge from-layer="11" from-port="1" to-layer="21" to-port="0" />
+ <edge from-layer="12" from-port="1" to-layer="21" to-port="1" />
+ <edge from-layer="21" from-port="2" to-layer="31" to-port="0" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+std::string TFLeakyReluModel() {
+ return R"V0G0N(
+ <?xml version="1.0" ?>
+ <net batch="1" name="model" version="2">
+ <layers>
+ <layer id="0" name="Placeholder" precision="FP32" type="Input">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>126</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="1" name="conv1_node/Conv2D" precision="FP32" type="Convolution">
+ <data dilation-x="1" dilation-y="1" group="1" kernel-x="5" kernel-y="1" output="128" pad-x="0" pad-y="0" stride="1,1,1,1" stride-x="1" stride-y="1"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>126</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ <blobs>
+ <weights offset="0" size="327680"/>
+ <biases offset="327680" size="512"/>
+ </blobs>
+ </layer>
+ <layer id="2" name="conv1_node/Relu" precision="FP32" type="ReLU">
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="3" name="conv1_node/Neg" precision="FP32" type="Power">
+ <data power="1" scale="-1" shift="0"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="4" name="conv1_node/Relu_1" precision="FP32" type="ReLU">
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="5" name="conv1_node/mul" precision="FP32" type="Power">
+ <data power="1" scale="0.20000000298023224" shift="0"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="47" name="conv1_node/sub/negate_86" precision="FP32" type="Power">
+ <data power="1" scale="-1" shift="0"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="48" name="conv1_node/sub/add_87" precision="FP32" type="Eltwise">
+ <data operation="sum"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+ <edge from-layer="1" from-port="3" to-layer="2" to-port="0"/>
+ <edge from-layer="1" from-port="3" to-layer="3" to-port="0"/>
+ <edge from-layer="3" from-port="1" to-layer="4" to-port="0"/>
+ <edge from-layer="4" from-port="1" to-layer="5" to-port="0"/>
+ <edge from-layer="5" from-port="1" to-layer="47" to-port="0"/>
+ <edge from-layer="2" from-port="1" to-layer="48" to-port="0"/>
+ <edge from-layer="47" from-port="1" to-layer="48" to-port="1"/>
+ </edges>
+ </net>
+ )V0G0N";
+}
+std::string maxpoolAfterRelu() {
+ return R"V0G0N(
+<?xml version="1.0" ?>
+<net batch="1" name="model" version="2">
+ <layers>
+ <layer id="0" name="Placeholder" precision="FP32" type="Input">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>126</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="1" name="conv1_node/Conv2D" precision="FP32" type="Convolution">
+ <data dilation-x="1" dilation-y="1" group="1" kernel-x="5" kernel-y="1" output="128" pad-x="0" pad-y="0" stride="1,1,1,1" stride-x="1" stride-y="1"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>126</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ <blobs>
+ <weights offset="0" size="327680"/>
+ <biases offset="327680" size="512"/>
+ </blobs>
+ </layer>
+ <layer id="2" name="conv1_node/Relu" precision="FP32" type="ReLU">
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="3" name="conv1_node/Neg" precision="FP32" type="Power">
+ <data power="1" scale="-1" shift="0"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="4" name="conv1_node/Relu_1" precision="FP32" type="ReLU">
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="5" name="conv1_node/mul" precision="FP32" type="Power">
+ <data power="1" scale="0.20000000298023224" shift="0"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="47" name="conv1_node/sub/negate_86" precision="FP32" type="Power">
+ <data power="1" scale="-1" shift="0"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="48" name="conv1_node/sub/add_87" precision="FP32" type="Eltwise">
+ <data operation="sum"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="6" name="conv2_node/Conv2D" precision="FP32" type="Convolution">
+ <data dilation-x="1" dilation-y="1" group="1" kernel-x="5" kernel-y="1" output="128" pad-x="0" pad-y="0" stride="1,1,1,1" stride-x="1" stride-y="1"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>122</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </output>
+ <blobs>
+ <weights offset="328192" size="327680"/>
+ <biases offset="655872" size="512"/>
+ </blobs>
+ </layer>
+ <layer id="7" name="conv2_node/Relu" precision="FP32" type="ReLU">
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="8" name="conv2_node/Neg" precision="FP32" type="Power">
+ <data power="1" scale="-1" shift="0"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="9" name="conv2_node/Relu_1" precision="FP32" type="ReLU">
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="10" name="conv2_node/mul" precision="FP32" type="Power">
+ <data power="1" scale="0.20000000298023224" shift="0"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="53" name="conv2_node/sub/negate_92" precision="FP32" type="Power">
+ <data power="1" scale="-1" shift="0"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="54" name="conv2_node/sub/add_93" precision="FP32" type="Eltwise">
+ <data operation="sum"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="11" name="pool1_node/MaxPool" precision="FP32" type="Pooling">
+ <data exclude-pad="true" kernel-x="2" kernel-y="1" pad-x="0" pad-y="0" pool-method="max" stride="1,1,1,2" stride-x="2" stride-y="1"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>118</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>128</dim>
+ <dim>1</dim>
+ <dim>59</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+ <edge from-layer="1" from-port="3" to-layer="2" to-port="0"/>
+ <edge from-layer="1" from-port="3" to-layer="3" to-port="0"/>
+ <edge from-layer="3" from-port="1" to-layer="4" to-port="0"/>
+ <edge from-layer="4" from-port="1" to-layer="5" to-port="0"/>
+ <edge from-layer="5" from-port="1" to-layer="47" to-port="0"/>
+ <edge from-layer="2" from-port="1" to-layer="48" to-port="0"/>
+ <edge from-layer="47" from-port="1" to-layer="48" to-port="1"/>
+ <edge from-layer="48" from-port="2" to-layer="6" to-port="0"/>
+ <edge from-layer="6" from-port="3" to-layer="7" to-port="0"/>
+ <edge from-layer="6" from-port="3" to-layer="8" to-port="0"/>
+ <edge from-layer="8" from-port="1" to-layer="9" to-port="0"/>
+ <edge from-layer="9" from-port="1" to-layer="10" to-port="0"/>
+ <edge from-layer="10" from-port="1" to-layer="53" to-port="0"/>
+ <edge from-layer="7" from-port="1" to-layer="54" to-port="0"/>
+ <edge from-layer="53" from-port="1" to-layer="54" to-port="1"/>
+ <edge from-layer="54" from-port="2" to-layer="11" to-port="0"/>
+ </edges>
+ </net>
+
+ )V0G0N";
+}
+
+std::string doubleConcatModel() {
+ return R"V0G0N(
+ <Net Name="concatinationModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>40</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Split1" type="Split" id="1" precision="FP32">
+ <data axis="1" />
+ <input>
+ <port id="0">
+ <!--connected to input-->
+ <dim>1</dim>
+ <dim>40</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <!--connected to relu-->
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ <port id="2">
+ <!--connected to split-->
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="ReLU1" id="11" type="Activation" precision="FP32">
+ <data type="ReLU" negative_slope="0.000000" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Split2" type="Split" id="12" precision="FP32">
+ <data axis="1" />
+ <input>
+ <port id="0">
+ <!--connected to split-->
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <!--connected to relu-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="2">
+ <!--connected to fc-->
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="ReLU2" id="21" type="Activation" precision="FP32">
+ <data type="ReLU" negative_slope="0.000000" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected1" id="22" type="InnerProduct" precision="FP32">
+ <fc out-size="10" />
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="concat1" id="31" precision="FP32" type="Concat">
+ <data axis="1"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="concat2" id="41" precision="FP32" type="Concat">
+ <data axis="1"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>40</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected2" id="51" type="InnerProduct" precision="FP32">
+ <fc out-size="40" />
+ <biases offset="400" size="160" />
+ <weights offset="560" size="6960" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>40</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>40</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="11" to-port="0" />
+ <edge from-layer="1" from-port="2" to-layer="12" to-port="0" />
+ <edge from-layer="11" from-port="1" to-layer="41" to-port="0" />
+ <edge from-layer="12" from-port="1" to-layer="21" to-port="0" />
+ <edge from-layer="12" from-port="2" to-layer="22" to-port="0" />
+ <edge from-layer="21" from-port="1" to-layer="31" to-port="0" />
+ <edge from-layer="22" from-port="1" to-layer="31" to-port="1" />
+ <edge from-layer="31" from-port="2" to-layer="41" to-port="1" />
+ <edge from-layer="41" from-port="2" to-layer="51" to-port="0" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string cropWithoutOffsetModel() {
+ return R"V0G0N(
+ <Net Name="cropWithoutOffsetModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Crop1" type="Crop" id="1" precision="FP32">
+ <data axis="1" dim="10" offset="0"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected1" id="2" type="InnerProduct" precision="FP32">
+ <fc out-size="10" />
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="2" to-port="0" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string cropWithAlignedOffsetModel() {
+ return R"V0G0N(
+ <Net Name="cropWithAlignedOffsetModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Crop1" type="Crop" id="1" precision="FP32">
+ <data axis="1" dim="10" offset="8"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected1" id="2" type="InnerProduct" precision="FP32">
+ <fc out-size="12" />
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="2" to-port="0" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string cropWithOffsetModel() {
+ return R"V0G0N(
+ <Net Name="cropWithOffsetModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Crop1" type="Crop" id="1" precision="FP32">
+ <data axis="1" dim="10" offset="5"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected1" id="2" type="InnerProduct" precision="FP32">
+ <fc out-size="10" />
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="2" to-port="0" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string cropWithMaxOffsetModel() {
+ return R"V0G0N(
+ <Net Name="cropWithOffsetModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Crop1" type="Crop" id="1" precision="FP32">
+ <data axis="1" dim="10" offset="10"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected1" id="2" type="InnerProduct" precision="FP32">
+ <fc out-size="10" />
+ <biases offset="0" size="40" />
+ <weights offset="40" size="400" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="2" to-port="0" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string cropWithOffsetExtendedModel() {
+ return R"V0G0N(
+ <Net Name="cropWithOffsetExtendedModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected1" id="1" type="InnerProduct" precision="FP32">
+ <fc out-size="20" />
+ <biases offset="0" size="80" />
+ <weights offset="80" size="1920" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Crop1" type="Crop" id="11" precision="FP32">
+ <data axis="1" dim="10" offset="5"/>
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected2" id="12" type="InnerProduct" precision="FP32">
+ <fc out-size="10" />
+ <biases offset="1920" size="40" />
+ <weights offset="1960" size="640" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>10</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="11" to-port="0" />
+ <edge from-layer="11" from-port="1" to-layer="12" to-port="0" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+
+std::string copyModel() {
+ return R"V0G0N(
+ <Net Name="cropWithOffsetExtendedModel" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="input_1" type="input" id="0" precision="FP32">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="FullyConnected1" id="1" type="InnerProduct" precision="FP32">
+ <fc out-size="20" />
+ <biases offset="0" size="80" />
+ <weights offset="80" size="1920" />
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Copy1" id="2" type="Copy" precision="FP32">
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="1">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Eltwise_1" type="Eltwise" id="11" precision="FP32">
+ <data operation="sum" />
+ <input>
+ <port id="0">
+ <!--connected to FullyConnected-->
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ <port id="1">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>1</dim>
+ <dim>20</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+ <edge from-layer="1" from-port="1" to-layer="11" to-port="0" />
+ <edge from-layer="2" from-port="1" to-layer="11" to-port="1" />
+ </edges>
+ </Net>
+ )V0G0N";
+}
+} // namespace GNATestIRs
diff --git a/inference-engine/tests/unit/engines/gna/test_irs.hpp b/inference-engine/tests/unit/engines/gna/test_irs.hpp
new file mode 100644
index 000000000..c7b4b0c66
--- /dev/null
+++ b/inference-engine/tests/unit/engines/gna/test_irs.hpp
@@ -0,0 +1,43 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <string>
+
+namespace GNATestIRs {
+
+std::string FCOnlyModel() ;
+std::string Fc2DOutputModel();
+std::string affineToMemoryModel();
+std::string eltwiseToMemoryModel();
+std::string eltwiseToMemoryModelNoOutput();
+std::string activationAfterSplitModel();
+std::string FCWithPaddingAfterSplitModel();
+std::string SliceModelWithAlignedOutputs();
+std::string FCWithPaddingAfterSliceModel();
+std::string twoFCWithPaddingAfterSliceModel();
+std::string eltwiseSummModel();
+std::string eltwiseMulModel();
+std::string concatModel();
+std::string doubleConcatModel();
+std::string scaleShiftAffineModel();
+std::string clampFollowedByTanhModel();
+std::string eltwiseWithMemoryAndActivationInputModel();
+std::string AffineWith2AffineOutputsModel();
+std::string SigmoidActivationModel();
+std::string TanhActivationModel();
+std::string ReLUActivationModel();
+std::string LeakyReLUActivationModel();
+std::string ClampActivationModel();
+std::string IdentityActivationModel();
+std::string maxpoolAfterRelu();
+std::string TFLeakyReluModel();
+std::string cropWithoutOffsetModel();
+std::string cropWithAlignedOffsetModel();
+std::string cropWithOffsetModel();
+std::string cropWithMaxOffsetModel();
+std::string cropWithOffsetExtendedModel();
+std::string copyModel();
+} // namespace GNATestIRs
diff --git a/inference-engine/tests/unit/engines/mkldnn/constant_propagation_test.cpp b/inference-engine/tests/unit/engines/mkldnn/constant_propagation_test.cpp
index d608d5610..5d817f8b1 100644
--- a/inference-engine/tests/unit/engines/mkldnn/constant_propagation_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/constant_propagation_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/convert_desc_test.cpp b/inference-engine/tests/unit/engines/mkldnn/convert_desc_test.cpp
index 01395da5b..ddd244489 100644
--- a/inference-engine/tests/unit/engines/mkldnn/convert_desc_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/convert_desc_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/dump_test.cpp b/inference-engine/tests/unit/engines/mkldnn/dump_test.cpp
new file mode 100644
index 000000000..042f7ac9f
--- /dev/null
+++ b/inference-engine/tests/unit/engines/mkldnn/dump_test.cpp
@@ -0,0 +1,136 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include "ie_blob.h"
+#include "blob_factory.hpp"
+#include "utils/blob_dump.h"
+
+using namespace InferenceEngine;
+using namespace MKLDNNPlugin;
+
+TEST(MKLDNNDumpTests, UnallocatedBlob_NoDump) {
+ SizeVector dims {2,3,4,5};
+ Blob::Ptr blob = make_blob_with_precision({Precision::U8, dims, NHWC});
+
+ std::stringstream buff;
+
+ EXPECT_THROW({
+ BlobDumper(blob).dump(buff);
+ }, details::InferenceEngineException);
+}
+
+TEST(MKLDNNDumpTests, EmptyBlob_NoDump) {
+ SizeVector dims {2,3,4,5};
+ Blob::Ptr blob;
+
+ std::stringstream buff;
+
+ EXPECT_THROW({
+ BlobDumper(blob).dump(buff);
+ }, details::InferenceEngineException);
+}
+
+TEST(MKLDNNDumpTests, Ser) {
+ SizeVector dims {2,3,4,5};
+ Blob::Ptr blob = make_blob_with_precision({Precision::U8, dims, NHWC});
+ blob->allocate();
+
+ std::stringstream buff;
+ BlobDumper(blob).dump(buff);
+
+ ASSERT_GT(buff.str().size(), blob->byteSize());
+}
+
+TEST(MKLDNNDumpTests, SerDeser) {
+ SizeVector dims {2,3,4,5};
+ Blob::Ptr blob = make_blob_with_precision({Precision::U8, dims, NCHW});
+ blob->allocate();
+
+ std::stringstream buff;
+
+ BlobDumper(blob).dump(buff);
+ Blob::Ptr deser_blob = BlobDumper::read(buff).get();
+
+ ASSERT_EQ(deser_blob->dims(), blob->dims());
+ ASSERT_EQ(deser_blob->precision(), blob->precision());
+
+ std::vector<uint8_t> data(blob->buffer().as<uint8_t*>(), blob->buffer().as<uint8_t*>() + blob->size());
+ std::vector<uint8_t> deser_data(deser_blob->buffer().as<uint8_t*>(), deser_blob->buffer().as<uint8_t*>()
+ + deser_blob->size());
+ ASSERT_EQ(deser_data, data);
+}
+
+TEST(MKLDNNDumpTests, SerDeserWithScales) {
+ SizeVector dims {2,3,4,5};
+ auto blob = make_blob_with_precision({Precision::U8, dims, NCHW});
+ blob->allocate();
+
+ auto scls = make_blob_with_precision({Precision::FP32, {3}, C});
+ scls->allocate();
+
+ std::stringstream buff;
+
+ BlobDumper(blob).withScales(scls).dump(buff);
+ auto deser = BlobDumper::read(buff);
+ auto deser_blob = deser.get();
+ auto deser_scls = deser.getScales();
+
+ ASSERT_EQ(deser_blob->dims(), blob->dims());
+ ASSERT_EQ(deser_blob->precision(), blob->precision());
+
+ std::vector<uint8_t> data(blob->buffer().as<uint8_t*>(), blob->buffer().as<uint8_t*>() + blob->size());
+ std::vector<uint8_t> deser_data(deser_blob->buffer().as<uint8_t*>(), deser_blob->buffer().as<uint8_t*>()
+ + deser_blob->size());
+ ASSERT_EQ(deser_data, data);
+
+ std::vector<uint8_t> scls_data(scls->buffer().as<uint8_t*>(), scls->buffer().as<uint8_t*>() + scls->size());
+ std::vector<uint8_t> deser_scls_data(deser_scls->buffer().as<uint8_t*>(), deser_scls->buffer().as<uint8_t*>()
+ + deser_scls->size());
+ ASSERT_EQ(deser_scls_data, scls_data);
+}
+
+
+TEST(MKLDNNDumpTests, SerU8AsTxt) {
+ SizeVector dims {2,3,4,5};
+
+ Blob::Ptr blob = make_blob_with_precision({Precision::U8, dims, NCHW});
+ blob->allocate();
+
+ Blob::Ptr scls = make_blob_with_precision({Precision::FP32, {dims[1]}, C});
+ scls->allocate();
+
+ std::stringstream buff;
+ BlobDumper(blob).withScales(scls).dumpAsTxt(buff);
+
+ std::string deser_header, ref_header = "U8 4D shape: 2 3 4 5 (120)";
+ std::getline(buff, deser_header);
+ ASSERT_EQ(deser_header, ref_header);
+
+ auto num_line = std::count(std::istreambuf_iterator<char>(buff),
+ std::istreambuf_iterator<char>(), '\n');
+ ASSERT_EQ(num_line, blob->size());
+}
+
+TEST(MKLDNNDumpTests, SerAsTxt) {
+ SizeVector dims {2,3};
+
+ Blob::Ptr blob = make_blob_with_precision({Precision::FP32, dims, NC});
+ blob->allocate();
+
+ Blob::Ptr scls = make_blob_with_precision({Precision::FP32, {dims[1]}, C});
+ scls->allocate();
+
+ std::stringstream buff;
+ BlobDumper(blob).withScales(scls).dumpAsTxt(buff);
+
+ std::string deser_header, ref_header = "FP32 2D shape: 2 3 (6)";
+ std::getline(buff, deser_header);
+ ASSERT_EQ(deser_header, ref_header);
+
+ auto num_line = std::count(std::istreambuf_iterator<char>(buff),
+ std::istreambuf_iterator<char>(), '\n');
+ ASSERT_EQ(num_line, blob->size());
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/engines/mkldnn/dumper_test.cpp b/inference-engine/tests/unit/engines/mkldnn/dumper_test.cpp
new file mode 100644
index 000000000..383a1e7f1
--- /dev/null
+++ b/inference-engine/tests/unit/engines/mkldnn/dumper_test.cpp
@@ -0,0 +1,99 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include "mkldnn_graph.h"
+#include "mkldnn_graph_dumper.h"
+#include "ie_blob.h"
+#include "ie_util_internal.hpp"
+#include "details/ie_cnn_network_tools.h"
+#include "xml_net_builder.hpp"
+#include "graph_tools.hpp"
+
+#include <string>
+#include <map>
+
+using namespace InferenceEngine;
+using namespace MKLDNNPlugin;
+using std::string;
+using std::map;
+
+class NetGen : testing::V2NetBuilder {
+ string model;
+ TBlob<uint8_t>::Ptr weights;
+
+public:
+ NetGen(): testing::V2NetBuilder(buildNetworkWithOneInput(
+ "SomeNet", {2,3,16,16}, "FP32")) {
+ using prm_t = map<string, string>;
+
+ testing::InOutData inout = {{{2,3,16,16}},{{2,16,16,16}}};
+
+ prm_t conv_prm = {
+ {"stride-x", std::to_string(1)},
+ {"stride-y", std::to_string(1)},
+ {"pad-x", std::to_string(1)},
+ {"pad-y", std::to_string(1)},
+ {"kernel-x", std::to_string(3)},
+ {"kernel-y", std::to_string(3)},
+ {"output", std::to_string(16)},
+ {"group", std::to_string(1)}
+ };
+ size_t wght = 3*16*3*3*sizeof(float);
+ size_t bias = 16*sizeof(float);
+
+ prm_t relu_prm = {{"negative_slope", std::to_string(0)}};
+
+ addLayer("Convolution", "FP32", &conv_prm, {{{2,3,16,16}},{{2,16,16,16}}}, wght, bias);
+ addLayer("Relu", "FP32", &relu_prm, {{{2,16,16,16}},{{2,16,16,16}}});
+
+ model = finish();
+
+ weights.reset(new TBlob<uint8_t>({Precision::U8, {wght+bias}, C}));
+ weights->allocate();
+ }
+
+ CNNNetwork net() {
+ CNNNetReader net_reader;
+ net_reader.ReadNetwork(model.data(), model.length());
+ net_reader.SetWeights(weights);
+
+ return net_reader.getNetwork();
+ }
+};
+
+TEST(MKLDNNLayersTests, DumpSimpleGraph) {
+ auto net = NetGen().net();
+ MKLDNNGraph graph;
+ MKLDNNExtensionManager::Ptr extMgr;
+ graph.CreateGraph(net, extMgr);
+
+ auto dump_net = dump_graph_as_ie_net(graph);
+ auto layers = details::CNNNetSortTopologically(*dump_net);
+
+ ASSERT_EQ(layers.size(), 4);
+ ASSERT_EQ(layers[0]->type, "Input");
+ ASSERT_EQ(layers[1]->type, "Conv_Activ");
+ ASSERT_EQ(layers[2]->type, "Reorder");
+ ASSERT_EQ(layers[3]->type, "Output");
+}
+
+TEST(MKLDNNLayersTests, DumpSimpleGraphToDot) {
+ auto net = NetGen().net();
+ MKLDNNGraph graph;
+ MKLDNNExtensionManager::Ptr extMgr;
+ graph.CreateGraph(net, extMgr);
+
+ std::stringstream buff;
+ dump_graph_as_dot(graph, buff);
+
+ std::string dot = buff.str();
+ std::cout << dot;
+ ASSERT_EQ(std::count(dot.begin(), dot.end(), '{'), 1); // 1-graph
+ ASSERT_EQ(std::count(dot.begin(), dot.end(), '}'), 1);
+ ASSERT_EQ(std::count(dot.begin(), dot.end(), '['), 10); // 4-node 3-data 3-shape
+ ASSERT_EQ(std::count(dot.begin(), dot.end(), ']'), 10);
+ ASSERT_EQ(std::count(dot.begin(), dot.end(), '>'), 6); // connection
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/fake_layer.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/fake_layer.cpp
index 988ab4481..4e22a72bf 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/fake_layer.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/fake_layer.cpp
@@ -1,17 +1,101 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
-#include <extension/ext_base.hpp>
-#include <extension/ext_base.cpp>
#include <extension/ext_list.hpp>
+#include <extension/ext_base.cpp>
+
+#include <string>
+#include <map>
+#include <memory>
+#include <algorithm>
+
+using namespace InferenceEngine;
+using namespace Extensions;
+
+struct TestExtensionsHolder {
+ std::map<std::string, Cpu::ext_factory> list;
+ std::map<std::string, IShapeInferImpl::Ptr> si_list;
+};
+
+
+class FakeExtensions : public IExtension {
+ public:
+
+ void SetLogCallback(InferenceEngine::IErrorListener &listener) noexcept override {};
+
+ void Unload() noexcept override {};
+
+ void Release() noexcept override {
+ delete this;
+ };
+
+ static std::shared_ptr<TestExtensionsHolder> GetExtensionsHolder() {
+ static std::shared_ptr<TestExtensionsHolder> localHolder;
+ if (localHolder == nullptr) {
+ localHolder = std::shared_ptr<TestExtensionsHolder>(new TestExtensionsHolder());
+ }
+ return localHolder;
+ }
+
+ static void AddExt(std::string name, Cpu::ext_factory factory) {
+ GetExtensionsHolder()->list[name] = factory;
+ }
+
+ void GetVersion(const Version *&versionInfo) const noexcept override {
+ static Version ExtensionDescription = {
+ {1, 0}, // extension API version
+ "1.0",
+ "ie-cpu-ext" // extension description message
+ };
+
+ versionInfo = &ExtensionDescription;
+ }
+
+ StatusCode getPrimitiveTypes(char **&types, unsigned int &size, ResponseDesc *resp) noexcept override {
+ collectTypes(types, size, GetExtensionsHolder()->list);
+ return OK;
+ };
+ StatusCode getFactoryFor(ILayerImplFactory *&factory, const CNNLayer *cnnLayer, ResponseDesc *resp) noexcept override {
+ auto &factories = GetExtensionsHolder()->list;
+ if (factories.find(cnnLayer->type) == factories.end()) {
+ std::string errorMsg = std::string("Factory for ") + cnnLayer->type + " wasn't found!";
+ errorMsg.copy(resp->msg, sizeof(resp->msg) - 1);
+ return NOT_FOUND;
+ }
+ factory = factories[cnnLayer->type](cnnLayer);
+ return OK;
+ }
+ StatusCode getShapeInferTypes(char **&types, unsigned int &size, ResponseDesc *resp) noexcept override {
+ collectTypes(types, size, GetExtensionsHolder()->si_list);
+ return OK;
+ };
-namespace InferenceEngine {
-namespace Extensions {
-namespace Cpu {
+ StatusCode getShapeInferImpl(IShapeInferImpl::Ptr &impl, const char *type, ResponseDesc *resp) noexcept override {
+ auto &factories = GetExtensionsHolder()->si_list;
+ if (factories.find(type) == factories.end()) {
+ std::string errorMsg = std::string("Shape Infer Implementation for ") + type + " wasn't found!";
+ if (resp) errorMsg.copy(resp->msg, sizeof(resp->msg) - 1);
+ return NOT_FOUND;
+ }
+ impl = factories[type];
+ return OK;
+ }
-class FakeLayerPLNImpl: public ExtLayerBase {
+ template<class T>
+ void collectTypes(char **&types, unsigned int &size, const std::map<std::string, T> &factories) {
+ types = new char *[factories.size()];
+ unsigned count = 0;
+ for (auto it = factories.begin(); it != factories.end(); it++, count++) {
+ types[count] = new char[it->first.size() + 1];
+ std::copy(it->first.begin(), it->first.end(), types[count]);
+ types[count][it->first.size()] = '\0';
+ }
+ size = count;
+ }
+};
+
+ class FakeLayerPLNImpl: public Cpu::ExtLayerBase {
public:
explicit FakeLayerPLNImpl(const CNNLayer* layer) {
try {
@@ -27,7 +111,7 @@ public:
}
};
-class FakeLayerBLKImpl: public ExtLayerBase {
+class FakeLayerBLKImpl: public Cpu::ExtLayerBase {
public:
explicit FakeLayerBLKImpl(const CNNLayer* layer) {
try {
@@ -48,9 +132,24 @@ public:
}
};
-REG_FACTORY_FOR(ImplFactory<FakeLayerPLNImpl>, FakeLayerPLN);
-REG_FACTORY_FOR(ImplFactory<FakeLayerBLKImpl>, FakeLayerBLK);
+template<typename Ext>
+class FakeRegisterBase {
+ public:
+ explicit FakeRegisterBase(const std::string& type) {
+ FakeExtensions::AddExt(type,
+ [](const CNNLayer* layer) -> InferenceEngine::ILayerImplFactory* {
+ return new Ext(layer);
+ });
+ }
+};
+
+#define REG_FAKE_FACTORY_FOR(__prim, __type) \
+static FakeRegisterBase<__prim> __reg__##__type(#__type)
+
+REG_FAKE_FACTORY_FOR(Cpu::ImplFactory<FakeLayerPLNImpl>, FakeLayerPLN);
+REG_FAKE_FACTORY_FOR(Cpu::ImplFactory<FakeLayerBLKImpl>, FakeLayerBLK);
+
-}
-}
-}
+InferenceEngine::IExtensionPtr make_FakeExtensions() {
+ return InferenceEngine::IExtensionPtr(new FakeExtensions());
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/gather_tests.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/gather_tests.cpp
new file mode 100644
index 000000000..b4300fba6
--- /dev/null
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/gather_tests.cpp
@@ -0,0 +1,695 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <gmock/gmock-spec-builders.h>
+#include "mkldnn_plugin/mkldnn_graph.h"
+
+#include "test_graph.hpp"
+
+#include "single_layer_common.hpp"
+#include <mkldnn_plugin/mkldnn_extension_utils.h>
+#include <extension/ext_list.hpp>
+#include "tests_common.hpp"
+
+
+using namespace ::testing;
+using namespace std;
+using namespace mkldnn;
+
+
+struct gather_test_params {
+ std::string inIdxPrecision;
+ InferenceEngine::SizeVector inIdx;
+ InferenceEngine::SizeVector inDict;
+ int axis;
+ InferenceEngine::SizeVector out;
+
+ size_t num_prim_desc;
+ int selectedType;
+
+ std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
+};
+
+
+inline void clipping(int *idx, const int min, const int max) {
+ (*idx) = ((*idx) > min) ? (*idx) : min;
+ (*idx) = ((*idx) < max) ? (*idx) : (max - 1);
+ return;
+}
+
+template <typename data_t>
+void ref_gather(InferenceEngine::TBlob<data_t> &srcIdx, InferenceEngine::TBlob<float> &srcDct, InferenceEngine::TBlob<float> &dst, size_t axis) {
+ size_t i, j;
+ const data_t *src_dataIdx = srcIdx.data();
+ float* src_dataDict = srcDct.data();
+ float *dst_data = dst.data();
+ size_t src_size = srcIdx.size();
+
+ std::vector<size_t> dims = srcDct.getTensorDesc().getDims();
+ std::vector<size_t> dims_actual;
+
+ // Remove redundant dimensions
+ for (size_t i = 0; i < dims.size(); i++) {
+ if (dims[i] > 1) {
+ for (size_t j = i; j < dims.size(); j++)
+ dims_actual.push_back(dims[j]);
+ break;
+ }
+ }
+
+ // Find number of dictionaries, index range and data length
+ size_t numDictionaries = 1;
+ for (i = 0; i < axis; i++)
+ numDictionaries *= dims_actual[i];
+ size_t indexRange = dims_actual[axis];
+ size_t dataLength = 1;
+ for (i = axis + 1; i < dims_actual.size(); i++)
+ dataLength *= dims_actual[i];
+
+ // The gathering process
+ for (i = 0; i < src_size; i++) {
+ int idx = static_cast<int>(src_dataIdx[i]);
+
+ // Index clipping
+ clipping(&idx, 0, indexRange);
+
+ // Copying data to destination from Dictionary
+ for (j = 0; j < numDictionaries; j++) {
+ memcpy(&dst_data[dataLength * (i + j * src_size)],
+ &src_dataDict[dataLength * (idx + j * indexRange)], sizeof(float)*dataLength);
+ }
+ }
+}
+
+class MKLDNNCPUExtGatherTests: public TestsCommon, public WithParamInterface<gather_test_params> {
+ std::string model_t = R"V0G0N(
+<net Name="Gather_net" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="InputText" type="Input" precision="_IIDXP_" id="1">
+ <output>
+ <port id="1">
+ _IIDX_
+ </port>
+ </output>
+ </layer>
+ <layer name="InputDictionary" type="Input" precision="FP32" id="2">
+ <output>
+ <port id="2">
+ _IDICT_
+ </port>
+ </output>
+ </layer>
+ <layer name="gather" id="3" type="Gather" precision="FP32">
+ <data axis="_AX_"/>
+ <input>
+ <port id="1">
+ _IDICT_
+ </port>
+ <port id="2">
+ _IIDX_
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ _OUT_
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="1" from-port="1" to-layer="3" to-port="2"/>
+ <edge from-layer="2" from-port="2" to-layer="3" to-port="1"/>
+ </edges>
+</net>
+)V0G0N";
+
+ std::string getModel(gather_test_params p) {
+ std::string model = model_t;
+ std::string inIdx;
+ std::string inDict;
+ std::string out;
+
+ for (auto& idx : p.inIdx) {
+ inIdx += "<dim>";
+ inIdx += std::to_string(idx) + "</dim>\n";
+ }
+
+ for (auto& dct : p.inDict) {
+ inDict += "<dim>";
+ inDict += std::to_string(dct) + "</dim>\n";
+ }
+
+ for (auto& dst : p.out) {
+ out += "<dim>";
+ out += std::to_string(dst) + "</dim>\n";
+ }
+
+ REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision);
+ REPLACE_WITH_STR(model, "_IIDX_", inIdx);
+ REPLACE_WITH_STR(model, "_IDICT_", inDict);
+ REPLACE_WITH_NUM(model, "_AX_", p.axis);
+ REPLACE_WITH_STR(model, "_OUT_", out);
+
+ return model;
+ }
+
+ template <typename data_t>
+ static void fill_data_dbgval(data_t *data, size_t size) {
+ for (size_t i = 0; i < size; i++) {
+ data[i] = static_cast<data_t>(i & (sizeof(data_t) * 8 - 1));
+ }
+ }
+protected:
+ virtual void TearDown() {
+ }
+
+ virtual void SetUp() {
+ try {
+ TestsCommon::SetUp();
+ gather_test_params p = ::testing::WithParamInterface<gather_test_params>::GetParam();
+ std::string model = getModel(p);
+
+ InferenceEngine::CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+
+ InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
+ MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
+ extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
+
+ MKLDNNGraphTestClass graph;
+ graph.CreateGraph(net_reader.getNetwork(), extMgr);
+
+ auto& nodes = graph.getNodes();
+ nodes = graph.getNodes();
+
+ for (auto &node : nodes) {
+ if (node->getName() == "gather") {
+ ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size());
+ for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
+ p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j));
+ }
+ ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor());
+ ASSERT_EQ(p.selectedType,
+ node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType);
+ }
+ }
+ ASSERT_EQ(4, nodes.size());
+
+ // Input Dictionary
+ InferenceEngine::Blob::Ptr srcDict = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.inDict, InferenceEngine::TensorDesc::getLayoutByDims(p.inDict) });
+ srcDict->allocate();
+ fill_data(srcDict->buffer(), srcDict->size());
+ auto * srcDictPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(srcDict.get());
+ if (srcDictPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+
+ // Output Data
+ InferenceEngine::OutputsDataMap out;
+ out = net_reader.getNetwork().getOutputsInfo();
+ InferenceEngine::BlobMap outputBlobs;
+
+ std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+
+ InferenceEngine::TBlob<float>::Ptr output;
+ output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+ output->allocate();
+ outputBlobs[item.first] = output;
+
+ // Output Reference
+ InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+ dst_ref.allocate();
+
+ // Input Indexes
+ InferenceEngine::Blob::Ptr srcIdx;
+ if (p.inIdxPrecision == "I32") {
+ srcIdx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
+ srcIdx->allocate();
+ fill_data_dbgval(static_cast<int32_t*>(srcIdx->buffer()), srcIdx->size());
+ auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(srcIdx.get());
+ if (srcIdxPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<int32_t>.";
+
+ // Check results
+ ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
+ }
+ else if (p.inIdxPrecision == "FP32") {
+ srcIdx = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
+ srcIdx->allocate();
+ fill_data(srcIdx->buffer(), srcIdx->size());
+ auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(srcIdx.get());
+ if (srcIdxPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+
+ // Check results
+ ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
+ }
+ else if (p.inIdxPrecision == "U16") {
+ srcIdx = InferenceEngine::make_shared_blob<uint16_t>({ InferenceEngine::Precision::U16, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
+ srcIdx->allocate();
+ fill_data_dbgval(static_cast<uint16_t*>(srcIdx->buffer()), srcIdx->size());
+ auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<uint16_t>*>(srcIdx.get());
+ if (srcIdxPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<uint16_t>.";
+
+ // Check results
+ ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
+ }
+ else if (p.inIdxPrecision == "I16") {
+ srcIdx = InferenceEngine::make_shared_blob<int16_t>({ InferenceEngine::Precision::I16, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
+ srcIdx->allocate();
+ fill_data_dbgval(static_cast<int16_t*>(srcIdx->buffer()), srcIdx->size());
+ auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<int16_t>*>(srcIdx.get());
+ if (srcIdxPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<int16_t>.";
+
+ // Check results
+ ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
+ }
+ else if (p.inIdxPrecision == "U8") {
+ srcIdx = InferenceEngine::make_shared_blob<uint8_t>({ InferenceEngine::Precision::U8, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
+ srcIdx->allocate();
+ fill_data_dbgval(static_cast<uint8_t*>(srcIdx->buffer()), srcIdx->size());
+ auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<uint8_t>*>(srcIdx.get());
+ if (srcIdxPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<uint8_t>.";
+
+ // Check results
+ ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
+ }
+ else if (p.inIdxPrecision == "I8") {
+ srcIdx = InferenceEngine::make_shared_blob<int8_t>({ InferenceEngine::Precision::I8, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
+ srcIdx->allocate();
+ fill_data_dbgval(static_cast<int8_t*>(srcIdx->buffer()), srcIdx->size());
+ auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<int8_t>*>(srcIdx.get());
+ if (srcIdxPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<int8_t>.";
+
+ // Check results
+ ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
+ }
+ else {
+ return;
+ }
+
+ InferenceEngine::BlobMap srcs;
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputDictionary", srcDict));
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputText", srcIdx));
+
+ // Infer
+ graph.Infer(srcs, outputBlobs);
+ compare(*output, dst_ref);
+ } catch (const InferenceEngine::details::InferenceEngineException &e) {
+ FAIL() << e.what();
+ }
+ }
+};
+
+TEST_P(MKLDNNCPUExtGatherTests, TestsGather) {}
+
+INSTANTIATE_TEST_CASE_P(
+ TestsGather, MKLDNNCPUExtGatherTests,
+ ::testing::Values(
+ gather_test_params{ "FP32", {1, 1, 12, 256}, {1, 1, 71, 16}, 0, {1, 12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "I32", {1, 1, 12, 256}, {1, 1, 71, 16}, 0, {1, 12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "I16", {1, 1, 12, 256}, {1, 1, 71, 16}, 0, {1, 12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "U8", {1, 1, 12, 256}, {1, 1, 71, 16}, 0, {1, 12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "I8", {1, 1, 12, 256}, {1, 1, 71, 16}, 0, {1, 12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "I32", {12, 256}, {71, 16}, 0, {12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "I32", {3, 4}, {2, 5, 6}, 0, {3, 4, 5, 6}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "I32", {3, 4}, {5, 1}, 0, {3, 4, 1}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "FP32", {1, 1, 12, 256}, {1, 1, 71, 16}, 1, {1, 71, 12, 256}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "I32", {1, 1, 3, 4}, {1, 2, 5, 6}, 1, {2, 3, 4, 6}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "I32", {1, 1, 3, 4}, {1, 2, 5, 6}, 2, {2, 5, 3, 4}, 1, MKLDNNPlugin::impl_desc_type::unknown },
+ gather_test_params{ "I32", {12, 4, 9, 8}, {6, 13, 10, 3}, 1, {6, 12, 4, 9, 8, 10, 3}, 1, MKLDNNPlugin::impl_desc_type::unknown }
+ ));
+
+
+
+
+struct gatherTF_test_params {
+ InferenceEngine::SizeVector in_dim;
+ std::vector<int32_t> in;
+
+ InferenceEngine::SizeVector dct_dim;
+ std::vector<float> dct;
+
+ int axis;
+
+ InferenceEngine::SizeVector ref_dim;
+ std::vector<float> ref;
+
+ std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
+};
+
+class MKLDNNCPUExtGatherTFTests : public TestsCommon, public WithParamInterface<gatherTF_test_params> {
+ std::string model_t = R"V0G0N(
+<net Name="Gather_net" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="InputText" type="Input" precision="I32" id="1">
+ <output>
+ <port id="1">
+ _IIDX_
+ </port>
+ </output>
+ </layer>
+ <layer name="InputDictionary" type="Input" precision="FP32" id="2">
+ <output>
+ <port id="2">
+ _IDICT_
+ </port>
+ </output>
+ </layer>
+ <layer name="gather" id="3" type="Gather" precision="FP32">
+ <data axis="_AX_"/>
+ <input>
+ <port id="1">
+ _IDICT_
+ </port>
+ <port id="2">
+ _IIDX_
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ _OUT_
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="1" from-port="1" to-layer="3" to-port="2"/>
+ <edge from-layer="2" from-port="2" to-layer="3" to-port="1"/>
+ </edges>
+</net>
+)V0G0N";
+
+ std::string getModel(gatherTF_test_params p) {
+ std::string model = model_t;
+ std::string inIdx;
+ std::string inDict;
+ std::string out;
+
+ for (auto& idx : p.in_dim) {
+ inIdx += "<dim>";
+ inIdx += std::to_string(idx) + "</dim>\n";
+ }
+
+ for (auto& dct : p.dct_dim) {
+ inDict += "<dim>";
+ inDict += std::to_string(dct) + "</dim>\n";
+ }
+
+ for (auto& dst : p.ref_dim) {
+ out += "<dim>";
+ out += std::to_string(dst) + "</dim>\n";
+ }
+
+ REPLACE_WITH_STR(model, "_IIDX_", inIdx);
+ REPLACE_WITH_STR(model, "_IDICT_", inDict);
+ REPLACE_WITH_NUM(model, "_AX_", p.axis);
+ REPLACE_WITH_STR(model, "_OUT_", out);
+
+ return model;
+ }
+
+protected:
+ virtual void TearDown() {
+ }
+
+ virtual void SetUp() {
+ try {
+ TestsCommon::SetUp();
+ gatherTF_test_params p = ::testing::WithParamInterface<gatherTF_test_params>::GetParam();
+ std::string model = getModel(p);
+
+ InferenceEngine::CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+
+ InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
+ MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
+ extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
+
+ MKLDNNGraphTestClass graph;
+ graph.CreateGraph(net_reader.getNetwork(), extMgr);
+
+ // Input Indexes
+ InferenceEngine::Blob::Ptr srcIdx;
+ srcIdx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, p.in_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.in_dim) });
+ srcIdx->allocate();
+ memcpy(static_cast<int32_t*>(srcIdx->buffer()), &p.in[0], sizeof(int32_t)*p.in.size());
+ auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(srcIdx.get());
+ if (srcIdxPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<int32_t>.";
+
+ // Input Dictionary
+ InferenceEngine::Blob::Ptr srcDict = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.dct_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.dct_dim) });
+ srcDict->allocate();
+ memcpy(srcDict->buffer(), &p.dct[0], sizeof(float)*p.dct.size());
+ auto * srcDictPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(srcDict.get());
+ if (srcDictPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+
+ // Output Data
+ InferenceEngine::OutputsDataMap out;
+ out = net_reader.getNetwork().getOutputsInfo();
+ InferenceEngine::BlobMap outputBlobs;
+ std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+ InferenceEngine::TBlob<float>::Ptr output;
+ output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+ output->allocate();
+ outputBlobs[item.first] = output;
+
+ // Infer
+ InferenceEngine::BlobMap srcs;
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputDictionary", srcDict));
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputText", srcIdx));
+ graph.Infer(srcs, outputBlobs);
+
+ // Check results
+ if (memcmp((*output).data(), &p.ref[0], p.ref.size()) != 0)
+ FAIL() << "Wrong result with compare TF reference!";
+ } catch (const InferenceEngine::details::InferenceEngineException &e) {
+ FAIL() << e.what();
+ }
+ }
+};
+
+TEST_P(MKLDNNCPUExtGatherTFTests, TestsGather) {}
+
+// Test data vectors
+std::vector<int32_t> in0 = { 0, 1, 1, 0 };
+std::vector<int32_t> in1 = { 0, 1, 2, 1 };
+std::vector<float> dict = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f };
+std::vector<float> ref_in0_a0_d223 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }; // 2x2x2x3
+std::vector<float> ref_in0_a2_d232 = { 1.f, 2.f, 2.f, 1.f, 3.f, 4.f, 4.f, 3.f, 5.f, 6.f, 6.f, 5.f, 7.f, 8.f, 8.f, 7.f, 9.f, 10.f, 10.f, 9.f, 11.f, 12.f, 12.f, 11.f }; // 2x3x2x2
+std::vector<float> ref_in1_a0_d322 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 5.f, 6.f, 7.f, 8.f }; // 2x2x2x2
+std::vector<float> ref_in1_a1_d232 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 3.f, 4.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 9.f, 10.f }; // 2x2x2x2
+std::vector<float> ref_in1_a2_d223 = { 1.f, 2.f, 3.f, 2.f, 4.f, 5.f, 6.f, 5.f, 7.f, 8.f, 9.f, 8.f, 10.f, 11.f, 12.f, 11.f }; // 2x2x2x2
+
+INSTANTIATE_TEST_CASE_P(
+ TestsGather, MKLDNNCPUExtGatherTFTests,
+ ::testing::Values(
+ gatherTF_test_params{ { 2, 2 }, in0,{ 2, 2, 3 }, dict, 0, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
+ gatherTF_test_params{ { 2, 2 }, in0,{ 2, 2, 3 }, dict,-3, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
+ gatherTF_test_params{ { 2, 2 }, in0,{ 2, 3, 2 }, dict, 2, { 2, 3, 2, 2 }, ref_in0_a2_d232 },
+ gatherTF_test_params{ { 2, 2 }, in0,{ 2, 3, 2 }, dict,-1, { 2, 3, 2, 2 }, ref_in0_a2_d232 },
+ gatherTF_test_params{ { 2, 2 }, in1,{ 3, 2, 2 }, dict, 0, { 2, 2, 2, 2 }, ref_in1_a0_d322 },
+ gatherTF_test_params{ { 2, 2 }, in1,{ 3, 2, 2 }, dict,-3, { 2, 2, 2, 2 }, ref_in1_a0_d322 },
+ gatherTF_test_params{ { 2, 2 }, in1,{ 2, 3, 2 }, dict, 1, { 2, 2, 2, 2 }, ref_in1_a1_d232 },
+ gatherTF_test_params{ { 2, 2 }, in1,{ 2, 3, 2 }, dict,-2, { 2, 2, 2, 2 }, ref_in1_a1_d232 },
+ gatherTF_test_params{ { 2, 2 }, in1,{ 2, 2, 3 }, dict, 2, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
+ gatherTF_test_params{ { 2, 2 }, in1,{ 2, 2, 3 }, dict,-1, { 2, 2, 2, 2 }, ref_in1_a2_d223 }));
+
+
+class MKLDNNCPUExtGatherHolesTests : public TestsCommon, public WithParamInterface<gatherTF_test_params> {
+ std::string model_t = R"V0G0N(
+<net Name="Gather_net" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="InputText" type="Input" precision="I32" id="1">
+ <output>
+ <port id="1">
+ <dim>2</dim>
+ <dim>2</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="InputDictionary" type="Input" precision="FP32" id="2">
+ <output>
+ <port id="2">
+ <dim>3</dim>
+ <dim>2</dim>
+ <dim>2</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="Input3" type="Input" precision="FP32" id="3">
+ <output>
+ <port id="3">
+ <dim>2</dim>
+ <dim>5</dim>
+ <dim>2</dim>
+ <dim>2</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="gather" id="4" type="Gather" precision="FP32">
+ <data axis="0"/>
+ <input>
+ <port id="1">
+ <dim>3</dim>
+ <dim>2</dim>
+ <dim>2</dim>
+ </port>
+ <port id="2">
+ <dim>2</dim>
+ <dim>2</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>2</dim>
+ <dim>2</dim>
+ <dim>2</dim>
+ <dim>2</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="con" id="5" type="Concat" precision="FP32">
+ <concat_data axis="1"/>
+ <input>
+ <port id="1">
+ <dim>2</dim>
+ <dim>2</dim>
+ <dim>2</dim>
+ <dim>2</dim>
+ </port>
+ <port id="2">
+ <dim>2</dim>
+ <dim>5</dim>
+ <dim>2</dim>
+ <dim>2</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>2</dim>
+ <dim>7</dim>
+ <dim>2</dim>
+ <dim>2</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="1" from-port="1" to-layer="4" to-port="2"/>
+ <edge from-layer="2" from-port="2" to-layer="4" to-port="1"/>
+ <edge from-layer="4" from-port="3" to-layer="5" to-port="1"/>
+ <edge from-layer="3" from-port="3" to-layer="5" to-port="2"/>
+ </edges>
+</net>
+)V0G0N";
+
+ std::string getModel(gatherTF_test_params p) {
+ std::string model = model_t;
+ std::string inIdx;
+ std::string inDict;
+ std::string out;
+
+ for (auto& idx : p.in_dim) {
+ inIdx += "<dim>";
+ inIdx += std::to_string(idx) + "</dim>\n";
+ }
+
+ for (auto& dct : p.dct_dim) {
+ inDict += "<dim>";
+ inDict += std::to_string(dct) + "</dim>\n";
+ }
+
+ for (auto& dst : p.ref_dim) {
+ out += "<dim>";
+ out += std::to_string(dst) + "</dim>\n";
+ }
+
+ REPLACE_WITH_STR(model, "_OUTC_", inIdx);
+ REPLACE_WITH_STR(model, "_IDICT_", inDict);
+ REPLACE_WITH_NUM(model, "_AX_", p.axis);
+ REPLACE_WITH_STR(model, "_OUT_", out);
+
+ return model;
+ }
+
+protected:
+ virtual void TearDown() {
+ }
+
+ virtual void SetUp() {
+ try {
+ TestsCommon::SetUp();
+ gatherTF_test_params p = ::testing::WithParamInterface<gatherTF_test_params>::GetParam();
+ std::string model = getModel(p);
+
+ InferenceEngine::CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+
+ InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
+ MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
+ extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
+
+ MKLDNNGraphTestClass graph;
+ graph.CreateGraph(net_reader.getNetwork(), extMgr);
+
+ // Input Indexes
+ InferenceEngine::Blob::Ptr srcIdx;
+ int32_t in_size = 4;
+ InferenceEngine::SizeVector in_dim = {2, 2};
+ srcIdx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, in_dim, InferenceEngine::TensorDesc::getLayoutByDims(in_dim) });
+ srcIdx->allocate();
+ memcpy(static_cast<int32_t*>(srcIdx->buffer()), &p.in[0], sizeof(int32_t)*in_size);
+ auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(srcIdx.get());
+ if (srcIdxPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<int32_t>.";
+
+ // Input Dictionary
+ InferenceEngine::Blob::Ptr srcDict = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.dct_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.dct_dim) });
+ srcDict->allocate();
+ memcpy(srcDict->buffer(), &p.dct[0], sizeof(float)*p.dct.size());
+ auto * srcDictPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(srcDict.get());
+ if (srcDictPtr == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+
+ // Output Data
+ InferenceEngine::OutputsDataMap out;
+ out = net_reader.getNetwork().getOutputsInfo();
+ InferenceEngine::BlobMap outputBlobs;
+ std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+ InferenceEngine::TBlob<float>::Ptr output;
+ output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+ output->allocate();
+ outputBlobs[item.first] = output;
+
+ // Infer
+ InferenceEngine::BlobMap srcs;
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputDictionary", srcDict));
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputText", srcIdx));
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("Input3", srcIdx));
+ graph.Infer(srcs, outputBlobs);
+
+ // Check results
+ if (memcmp((*output).data(), &p.ref[0], p.ref.size()) != 0)
+ FAIL() << "Wrong result with compare TF reference!";
+ }
+ catch (const InferenceEngine::details::InferenceEngineException &e) {
+ FAIL() << e.what();
+ }
+ }
+};
+
+TEST_P(MKLDNNCPUExtGatherHolesTests, TestsGather) {}
+
+INSTANTIATE_TEST_CASE_P(
+ TestsGather, MKLDNNCPUExtGatherHolesTests,
+ ::testing::Values(
+ gatherTF_test_params{ { 1, 5, 2, 2 }, in1,{ 1, 3, 2, 2 }, dict, 1,{ 2, 2, 2, 2 }, ref_in1_a0_d322 }));
+
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp
index d4f1bf7e2..49e62bc6b 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -1361,9 +1360,10 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteNotInLineGRN) {
<edge from-layer="2" from-port="4" to-layer="3" to-port="6"/>
</edges>
</net>)V0G0N";
- std::shared_ptr<InferenceEngine::IExtension> cpuExt(new InferenceEngine::Extensions::Cpu::CpuExtensions());
+ InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
- extMgr->AddExtension(cpuExt);
+
+ extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
@@ -1503,9 +1503,10 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteInLineGRN) {
<edge from-layer="3" from-port="4" to-layer="4" to-port="6"/>
</edges>
</net>)V0G0N";
- std::shared_ptr<InferenceEngine::IExtension> cpuExt(new InferenceEngine::Extensions::Cpu::CpuExtensions());
+
+ InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
- extMgr->AddExtension(cpuExt);
+ extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/interp_tests.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/interp_tests.cpp
index a37f135db..6bc9b757b 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/interp_tests.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/interp_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -11,7 +10,6 @@
#include "single_layer_common.hpp"
#include <mkldnn_plugin/mkldnn_extension_utils.h>
-#include <extension/ext_list.hpp>
#include "tests_common.hpp"
@@ -192,9 +190,9 @@ protected:
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
- std::shared_ptr<InferenceEngine::IExtension> cpuExt(new InferenceEngine::Extensions::Cpu::CpuExtensions());
+ InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
- extMgr->AddExtension(cpuExt);
+ extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
MKLDNNGraphTestClass graph;
graph.CreateGraph(net_reader.getNetwork(), extMgr);
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/mvn_tests.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/mvn_tests.cpp
index ec2a232c5..84511a1f9 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/mvn_tests.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/mvn_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -13,20 +12,18 @@
#include <mkldnn_plugin/mkldnn_extension_utils.h>
#include <extension/ext_list.hpp>
#include "tests_common.hpp"
+#include "ir_gen_helper.hpp"
-
+using namespace InferenceEngine;
using namespace ::testing;
using namespace std;
using namespace mkldnn;
+using namespace single_layer_tests;
struct mvn_test_params {
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } in;
+ // Formats: NCHW, NCDHW
+ vector<size_t> dims;
int across_channels;
int normalize_variance;
@@ -36,53 +33,84 @@ struct mvn_test_params {
bool isBlockedFormat;
int selectedType;
- std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
+ vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
};
+extern InferenceEngine::IExtensionPtr make_FakeExtensions();
+
template <typename data_t>
-void ref_mvn(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<data_t> &dst, mvn_test_params prm) {
+void ref_mvn(const TBlob<data_t> &src, TBlob<data_t> &dst, mvn_test_params prm) {
const data_t *src_data = src.readOnly();
data_t *dst_data = dst.data();
+ size_t dims_size = prm.dims.size();
- size_t N = prm.in.n;
- size_t C = prm.in.c;
- size_t H = prm.in.h;
- size_t W = prm.in.w;
+ size_t N = prm.dims[0];
+ size_t C = prm.dims[1];
+ size_t D = dims_size > 4 ? prm.dims[dims_size - 3lu] : 1lu;
+ size_t H = dims_size > 3 ? prm.dims[dims_size - 2lu] : 1lu;
+ size_t W = prm.dims[dims_size - 1lu];
float eps = prm.eps;
- for (int b = 0; b < N; b++) {
+ size_t C1 = H * W;
+ size_t C2 = C1 * D;
+ size_t C3 = C2 * C;
+
+ for (size_t b = 0lu; b < N; b++) {
+ size_t cb = b * C3;
// Calculate mean value
if (prm.across_channels) {
- double mean = 0;
- for (int c = 0; c < C; c++) {
- for (int h = 0; h < H; h++) {
- for (int w = 0; w < W; w++) {
- mean += src_data[b*C*H*W + c*H*W + h*W + w];
+ double mean = 0.0;
+ for (size_t c = 0lu; c < C; c++) {
+ size_t cc = cb + c * C2;
+ for (size_t d = 0lu; d < D; d++) {
+ size_t cd = cc + d * C1;
+ for (size_t h = 0lu; h < H; h++) {
+ size_t ch = cd + h * W;
+ for (size_t w = 0lu; w < W; w++) {
+ mean += src_data[ch + w];
+ }
}
}
}
- mean /= C*H*W;
- for (int c = 0; c < C; c++) {
- for (int h = 0; h < H; h++) {
- for (int w = 0; w < W; w++) {
- dst_data[b*C*H*W + c*H*W + h*W + w] = src_data[b*C*H*W + c*H*W + h*W + w] - mean;
+ mean /= (double)C3;
+ for (size_t c = 0lu; c < C; c++) {
+ size_t cc = cb + c * C2;
+ for (size_t d = 0lu; d < D; d++) {
+ size_t cd = cc + d * C1;
+ for (size_t h = 0lu; h < H; h++) {
+ size_t ch = cd + h * W;
+ for (size_t w = 0lu; w < W; w++) {
+ size_t index = ch + w;
+ dst_data[index] = src_data[index] - mean;
+ }
}
}
}
} else {
- for (int c = 0; c < C; c++) {
- double mean = 0;
- for (int h = 0; h < H; h++) {
- for (int w = 0; w < W; w++) {
- mean += src_data[b*C*H*W + c*H*W + h*W + w];
+ for (size_t c = 0lu; c < C; c++) {
+ size_t cc = cb + c * C2;
+ double mean = 0.0;
+ for (size_t d = 0lu; d < D; d++) {
+ size_t cd = cc + d * C1;
+ for (size_t h = 0lu; h < H; h++) {
+ size_t ch = cd + h * W;
+ for (size_t w = 0lu; w < W; w++) {
+ mean += src_data[ch + w];
+ }
}
}
- mean /= H*W;
- for (int h = 0; h < H; h++) {
- for (int w = 0; w < W; w++) {
- dst_data[b*C*H*W + c*H*W + h*W + w] = src_data[b*C*H*W + c*H*W + h*W + w] - mean;
+ mean /= (double)C2;
+
+ for (size_t d = 0lu; d < D; d++) {
+ size_t cd = cc + d * C1;
+ for (size_t h = 0lu; h < H; h++) {
+ size_t ch = cd + h * W;
+ for (size_t w = 0lu; w < W; w++) {
+ size_t index = ch + w;
+ dst_data[index] = src_data[index] - mean;
+ }
}
}
}
@@ -90,41 +118,61 @@ void ref_mvn(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<d
}
if (prm.normalize_variance) {
- for (int b = 0; b < N; b++) {
+ for (size_t b = 0; b < N; b++) {
+ size_t cb = b * C3;
// Calculate variances value
if (prm.across_channels) {
- double variance = 0;
- for (int c = 0; c < C; c++) {
- for (int h = 0; h < H; h++) {
- for (int w = 0; w < W; w++) {
- variance += std::pow(dst_data[b*C*H*W + c*H*W + h*W + w], 2);
+ double variance = 0.f;
+ for (size_t c = 0lu; c < C; c++) {
+ size_t cc = cb + c * C2;
+ for (size_t d = 0lu; d < D; d++) {
+ size_t cd = cc + d * C1;
+ for (size_t h = 0lu; h < H; h++) {
+ size_t ch = cd + h * W;
+ for (size_t w = 0lu; w < W; w++) {
+ variance += std::pow(dst_data[ch + w], 2);
+ }
}
}
}
- variance /= C*H*W;
- variance = std::pow(variance, 0.5f);
+ variance /= C3;
variance += eps;
- for (int c = 0; c < C; c++) {
- for (int h = 0; h < H; h++) {
- for (int w = 0; w < W; w++) {
- dst_data[b*C*H*W + c*H*W + h*W + w] /= variance;
+ variance = std::pow(variance, 0.5f);
+ for (size_t c = 0lu; c < C; c++) {
+ size_t cc = cb + c * C2;
+ for (size_t d = 0lu; d < D; d++) {
+ size_t cd = cc + d * C1;
+ for (size_t h = 0lu; h < H; h++) {
+ size_t ch = cd + h * W;
+ for (size_t w = 0lu; w < W; w++) {
+ dst_data[ch + w] /= variance;
+ }
}
}
}
} else {
- for (int c = 0; c < C; c++) {
- double variance = 0;
- for (int h = 0; h < H; h++) {
- for (int w = 0; w < W; w++) {
- variance += std::pow(dst_data[b*C*H*W + c*H*W + h*W + w], 2);
+ for (size_t c = 0lu; c < C; c++) {
+ size_t cc = cb + c * C2;
+ double variance = 0.0;
+ for (size_t d = 0lu; d < D; d++) {
+ size_t cd = cc + d * C1;
+ for (size_t h = 0lu; h < H; h++) {
+ size_t ch = cd + h * W;
+ for (size_t w = 0lu; w < W; w++) {
+ variance += std::pow(dst_data[ch + w], 2);
+ }
}
}
- variance /= H*W;
- variance = std::pow(variance, 0.5f);
+ variance /= C2;
variance += eps;
- for (int h = 0; h < H; h++) {
- for (int w = 0; w < W; w++) {
- dst_data[b*C*H*W + c*H*W + h*W + w] /= variance;
+ variance = std::pow(variance, 0.5f);
+ for (size_t d = 0lu; d < D; d++) {
+ size_t cd = cc + d * C1;
+ for (size_t h = 0lu; h < H; h++) {
+ size_t ch = cd + h * W;
+ for (size_t w = 0lu; w < W; w++) {
+ dst_data[ch + w] /= variance;
+ }
}
}
}
@@ -134,34 +182,16 @@ void ref_mvn(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<d
}
class MKLDNNCPUExtMVNTests: public TestsCommon, public WithParamInterface<mvn_test_params> {
- std::string model_t = R"V0G0N(
-<Net Name="MVN_net" version="2" precision="FP32" batch="1">
- <layers>
- <layer name="in1" type="Input" precision="FP32" id="0">
- <output>
- <port id="0">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
- </port>
- </output>
- </layer>
+ std::string layers_t = R"V0G0N(
<layer name="fakeLayer" id="1" type="_FL_" precision="FP32">
<input>
<port id="1">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ __SRC_DIMS__
</port>
</input>
<output>
<port id="2">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ __SRC_DIMS__
</port>
</output>
</layer>
@@ -169,45 +199,42 @@ class MKLDNNCPUExtMVNTests: public TestsCommon, public WithParamInterface<mvn_te
<data across_channels="_AC_" normalize_variance="_NV_" eps="_EPS_"/>
<input>
<port id="3">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ __SRC_DIMS__
</port>
</input>
<output>
<port id="4">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ __SRC_DIMS__
</port>
</output>
</layer>
- </layers>
- <edges>
+)V0G0N";
+
+ std::string edges_t = R"V0G0N(
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="3"/>
- </edges>
-</Net>
)V0G0N";
std::string getModel(mvn_test_params p) {
- std::string model = model_t;
+ std::string model = layers_t;
if (p.isBlockedFormat)
REPLACE_WITH_STR(model, "_FL_", "FakeLayerBLK");
else
REPLACE_WITH_STR(model, "_FL_", "FakeLayerPLN");
- REPLACE_WITH_NUM(model, "_IW_", p.in.w);
- REPLACE_WITH_NUM(model, "_IH_", p.in.h);
- REPLACE_WITH_NUM(model, "_IC_", p.in.c);
- REPLACE_WITH_NUM(model, "_IN_", p.in.n);
+ std::string s_dims;
+ for (auto& dim : p.dims) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims);
REPLACE_WITH_NUM(model, "_AC_", p.across_channels);
REPLACE_WITH_NUM(model, "_NV_", p.normalize_variance);
REPLACE_WITH_NUM(model, "_EPS_", p.eps);
+ model = IRTemplateGenerator::getIRTemplate("MVN_Only", p.dims, "FP32", model, edges_t);
+
return model;
}
@@ -221,12 +248,14 @@ protected:
mvn_test_params p = ::testing::WithParamInterface<mvn_test_params>::GetParam();
std::string model = getModel(p);
- InferenceEngine::CNNNetReader net_reader;
+ CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
- std::shared_ptr<InferenceEngine::IExtension> cpuExt(new InferenceEngine::Extensions::Cpu::CpuExtensions());
+ InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
- extMgr->AddExtension(cpuExt);
+ extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
+ extMgr->AddExtension(make_FakeExtensions());
+
MKLDNNGraphTestClass graph;
graph.CreateGraph(net_reader.getNetwork(), extMgr);
@@ -250,38 +279,48 @@ protected:
else
ASSERT_EQ(5, nodes.size()); // TODO: should be 4 (redudant reorder in case of both layers are inplace)
- InferenceEngine::SizeVector dims_src = {p.in.w, p.in.h, p.in.c, p.in.n};
+ SizeVector dims_src = p.dims;
+
+ Layout layout = ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = NCHW;
+ break;
+ case 5:
+ layout = NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NHWC, dims_src);
+ Blob::Ptr src = make_shared_blob<float, const SizeVector>(Precision::FP32, layout, dims_src);
src->allocate();
fill_data(src->buffer(), src->size());
- auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
+ auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
if (srcPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
- InferenceEngine::BlobMap srcs;
- srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
+ BlobMap srcs;
+ srcs.insert(std::pair<std::string, Blob::Ptr>("in1", src));
- InferenceEngine::OutputsDataMap out;
+ OutputsDataMap out;
out = net_reader.getNetwork().getOutputsInfo();
- InferenceEngine::BlobMap outputBlobs;
+ BlobMap outputBlobs;
- std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+ std::pair<std::string, DataPtr> item = *out.begin();
- InferenceEngine::TBlob<float>::Ptr output;
- output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+ TBlob<float>::Ptr output;
+ output = make_shared_blob<float>(item.second->getTensorDesc());
output->allocate();
outputBlobs[item.first] = output;
graph.Infer(srcs, outputBlobs);
- InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+ TBlob<float> dst_ref(item.second->getTensorDesc());
dst_ref.allocate();
ref_mvn(*srcPtr, dst_ref, p);
- compare(*output, dst_ref);
- } catch (const InferenceEngine::details::InferenceEngineException &e) {
+ compare(*output, dst_ref, 0.0001f);
+ } catch (const details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
@@ -292,7 +331,7 @@ TEST_P(MKLDNNCPUExtMVNTests, TestsMVN) {}
INSTANTIATE_TEST_CASE_P(
TestsMVN, MKLDNNCPUExtMVNTests,
::testing::Values(
- mvn_test_params{{2, 64, 15, 15}, 0, 0, 0.00001, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
+ /*0*/ mvn_test_params{{2, 64, 15, 15}, 0, 0, 0.00001, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
mvn_test_params{{2, 2, 33, 65}, 0, 0, 0.00001, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
@@ -301,10 +340,22 @@ INSTANTIATE_TEST_CASE_P(
mvn_test_params{{2, 64, 15, 15}, 1, 1, 0.00001, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
mvn_test_params{{2, 2, 33, 65}, 1, 1, 0.00001, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
mvn_test_params{{2, 64, 15, 15}, 0, 0, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
- mvn_test_params{{2, 2, 33, 65}, 0, 0, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
+ /*9*/ mvn_test_params{{2, 2, 33, 65}, 0, 0, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
mvn_test_params{{2, 64, 15, 15}, 0, 1, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
mvn_test_params{{2, 2, 33, 65}, 0, 1, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
mvn_test_params{{2, 64, 15, 15}, 1, 0, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
mvn_test_params{{2, 2, 33, 65}, 1, 0, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
- mvn_test_params{{2,640, 15, 15}, 1, 1, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
- mvn_test_params{{2, 2, 33, 65}, 1, 1, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown }));
+ /*14*/ mvn_test_params{{2,640, 15, 15}, 1, 1, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
+ mvn_test_params{{2, 2, 33, 65}, 1, 1, 0.00001, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
+
+ // 5D
+ /*16*/ mvn_test_params{{2, 64, 24, 32, 40}, 0, 0, 0.00001f, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
+ mvn_test_params{{2, 64, 24, 32, 40}, 0, 1, 0.00001f, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
+ mvn_test_params{{2, 64, 24, 32, 40}, 1, 0, 0.00001f, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
+ mvn_test_params{{2, 64, 24, 32, 40}, 1, 1, 0.00001f, 2, false, MKLDNNPlugin::impl_desc_type::unknown },
+ mvn_test_params{{2, 64, 24, 32, 40}, 0, 0, 0.00001f, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
+ mvn_test_params{{2, 64, 24, 32, 40}, 0, 1, 0.00001f, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
+ mvn_test_params{{2, 64, 24, 32, 40}, 1, 0, 0.00001f, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
+ /*23*/ mvn_test_params{{2, 64, 24, 32, 40}, 1, 1, 0.00001f, 2, true, MKLDNNPlugin::impl_desc_type::unknown },
+ mvn_test_params{{1, 64, 32, 32, 32}, 0, 1, 0.001f, 2, true, MKLDNNPlugin::impl_desc_type::unknown }
+ ));
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/resample_tests.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/resample_tests.cpp
index 71b86cc53..f3e4bad1e 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/resample_tests.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/extensions/resample_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -11,7 +10,6 @@
#include "single_layer_common.hpp"
#include <mkldnn_plugin/mkldnn_extension_utils.h>
-#include <extension/ext_list.hpp>
#include "tests_common.hpp"
using namespace ::testing;
@@ -42,6 +40,8 @@ static inline float triangleCoeff(float x) {
return std::max(0.0f, 1 - std::abs(x));
}
+extern InferenceEngine::IExtensionPtr make_FakeExtensions();
+
template <typename data_t>
void ref_resample(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<data_t> &dst, resample_test_params prm) {
const data_t *src_data = src.readOnly();
@@ -222,9 +222,10 @@ protected:
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
- std::shared_ptr<InferenceEngine::IExtension> cpuExt(new InferenceEngine::Extensions::Cpu::CpuExtensions());
+ InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
- extMgr->AddExtension(cpuExt);
+ extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
+ extMgr->AddExtension(make_FakeExtensions());
MKLDNNGraphTestClass graph;
graph.CreateGraph(net_reader.getNetwork(), extMgr);
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp
index 380ccb380..a0898b599 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -21,12 +20,8 @@ struct activation_test_params {
float alpha;
float beta;
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } in;
+ // Formats: NCHW, NCDHW
+ vector<size_t> dims;
size_t num_prim_desc;
@@ -56,10 +51,17 @@ T bounded_relu_fwd(T s, A alpha) {
return s > alpha ? (T)(alpha) : s;
}
+template <typename T> T tanh_fwd(T s) {
+ return static_cast<T>(::tanhf((float)s));
+}
+
template <typename data_t>
void ref_activation(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<data_t> &dst, activation_test_params prm) {
- size_t IW = src.dims()[3];
- size_t IH = src.dims()[2];
+ auto dims_size = src.dims().size();
+
+ size_t IW = src.dims()[dims_size - 1];
+ size_t IH = src.dims()[dims_size - 2];
+ size_t ID = dims_size == 5 ? src.dims()[dims_size - 3] : 1u;
size_t IC = src.dims()[1];
size_t MB = src.dims()[0];
@@ -68,18 +70,23 @@ void ref_activation(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::
for(int mb = 0; mb < MB; mb++) {
for(int c = 0; c < IC; c++) {
- for(int h = 0; h < IH; h++) {
- for(int w = 0; w < IW; w++) {
- int idx = mb * IC * IH * IW
- + c * IH * IW
- + h * IW + w;
-
- switch (prm.alg) {
- case eltwise_relu: dst_data[idx] = relu_fwd(src_data[idx], prm.alpha); break;
- case eltwise_elu: dst_data[idx] = elu_fwd(src_data[idx], prm.alpha); break;
- case eltwise_logistic: dst_data[idx] = logistic_fwd(src_data[idx]); break;
- case eltwise_bounded_relu: dst_data[idx] = bounded_relu_fwd(src_data[idx], prm.alpha); break;
- default: assert(!"unknown alg_kind");
+ for(int d = 0; d < ID; d++) {
+ for(int h = 0; h < IH; h++) {
+ for(int w = 0; w < IW; w++) {
+ int idx = mb * IC * ID * IH * IW
+ + c * ID * IH * IW
+ + d * IH * IW
+ + h * IW
+ + w;
+
+ switch (prm.alg) {
+ case eltwise_relu: dst_data[idx] = relu_fwd(src_data[idx], prm.alpha); break;
+ case eltwise_elu: dst_data[idx] = elu_fwd(src_data[idx], prm.alpha); break;
+ case eltwise_logistic: dst_data[idx] = logistic_fwd(src_data[idx]); break;
+ case eltwise_bounded_relu: dst_data[idx] = bounded_relu_fwd(src_data[idx], prm.alpha); break;
+ case eltwise_tanh: dst_data[idx] = tanh_fwd(src_data[idx]); break;
+ default: assert(!"unknown alg_kind");
+ }
}
}
}
@@ -90,24 +97,26 @@ void ref_activation(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::
class MKLDNNGraphActivationTests: public TestsCommon,
public WithParamInterface<activation_test_params> {
std::string model_t = R"V0G0N(
-<Net Name="Activation" version="2" precision="FP32" batch="1">
+<Net Name="Activation" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="0">
<output>
<port id="0">
<dim>_IN_</dim>
<dim>_IC_</dim>
+ <dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="_LT_" precision="FP32">
- <data _P1_NAME_="_P1_VAL_" _P2_NAME_="_P2_VAL_" PrimitivesPriority="_IMPLS_"/>
+ <data _P1_ _P2_ PrimitivesPriority="_IMPLS_"/>
<input>
<port id="1">
<dim>_IN_</dim>
<dim>_IC_</dim>
+ <dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
@@ -116,6 +125,7 @@ class MKLDNNGraphActivationTests: public TestsCommon,
<port id="2">
<dim>_IN_</dim>
<dim>_IC_</dim>
+ <dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
@@ -134,30 +144,49 @@ protected:
std::string getModel(activation_test_params p) {
std::string model = model_t;
+ auto dims_size = p.dims.size();
+
+ switch (dims_size) {
+ case 3:
+ REMOVE_LINE(model, "<dim>_IH_</dim>");
+ case 4:
+ REMOVE_LINE(model, "<dim>_ID_</dim>");
+ }
switch (p.alg) {
case eltwise_relu: REPLACE_WITH_STR(model, "_LT_", "ReLU"); break;
case eltwise_elu: REPLACE_WITH_STR(model, "_LT_", "ELU"); break;
case eltwise_logistic: REPLACE_WITH_STR(model, "_LT_", "Sigmoid"); break;
case eltwise_bounded_relu: REPLACE_WITH_STR(model, "_LT_", "ReLU6"); break;
+ case eltwise_tanh: REPLACE_WITH_STR(model, "_LT_", "Activation"); break;
default: assert(!"unknown alg_kind");
}
- if (p.alg == eltwise_relu)
- REPLACE_WITH_STR(model, "_P1_NAME_", "negative_slope");
- else if (p.alg == eltwise_bounded_relu)
- REPLACE_WITH_STR(model, "_P1_NAME_", "n");
- else
- REPLACE_WITH_STR(model, "_P1_NAME_", "alpha");
- REPLACE_WITH_NUM(model, "_P1_VAL_", p.alpha);
-
- REPLACE_WITH_STR(model, "_P2_NAME_", "beta");
- REPLACE_WITH_NUM(model, "_P2_VAL_", p.beta);
-
- REPLACE_WITH_NUM(model, "_IW_", p.in.w);
- REPLACE_WITH_NUM(model, "_IH_", p.in.h);
- REPLACE_WITH_NUM(model, "_IC_", p.in.c);
- REPLACE_WITH_NUM(model, "_IN_", p.in.n);
+ string P1, P2;
+ if (p.alg == eltwise_relu) {
+ P1 = string("negative_slope=\"") + to_string(p.alpha) + string("\"");
+ P2 = string("beta=\"") + to_string(p.beta) + string("\"");
+ } else if (p.alg == eltwise_bounded_relu) {
+ P1 = string("n=\"") + to_string(p.alpha) + string("\"");
+ P2 = string("beta=\"") + to_string(p.beta) + string("\"");
+ } else if (p.alg == eltwise_tanh) {
+ P1 = string("type=\"tanh\"");
+ } else {
+ P1 = string("alpha=\"") + to_string(p.alpha) + string("\"");
+ P2 = string("beta=\"") + to_string(p.beta) + string("\"");
+ }
+ REPLACE_WITH_STR(model, "_P1_", P1);
+ REPLACE_WITH_STR(model, "_P2_", P2);
+
+ REPLACE_WITH_NUM(model, "_IW_", p.dims[dims_size - 1]);
+ REPLACE_WITH_NUM(model, "_IC_", p.dims[1]);
+ REPLACE_WITH_NUM(model, "_IN_", p.dims[0]);
+ switch (dims_size) {
+ case 5:
+ REPLACE_WITH_NUM(model, "_ID_", p.dims[dims_size - 3]);
+ case 4:
+ REPLACE_WITH_NUM(model, "_IH_", p.dims[dims_size - 2]);
+ }
std::string impls;
for (const auto& preferType : p.preferTypes) {
@@ -194,9 +223,18 @@ protected:
}
}
- InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src->allocate();
fill_data(src->buffer(), src->size());
@@ -226,7 +264,7 @@ protected:
ref_activation(*srcPtr, dst_ref, p);
- compare(*output, dst_ref);
+ compare(*output, dst_ref, 0.0005f);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
@@ -265,7 +303,9 @@ INSTANTIATE_TEST_CASE_P(
activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
- activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}
+ activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ // 5D
+ activation_test_params{eltwise_tanh, 0.f, 0.f, {1, 1, 64, 64, 64}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}
));
class MKLDNNGraphDynBatchActivationTests: public MKLDNNGraphActivationTests {
@@ -275,7 +315,7 @@ protected:
TestsCommon::SetUp();
activation_test_params p = ::testing::WithParamInterface<activation_test_params>::GetParam();
std::string model = getModel(p);
- size_t MB = p.in.n;
+ size_t MB = p.dims[0];
if (MB < 2)
MB = 2;
@@ -292,9 +332,18 @@ protected:
graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
graph.CreateGraph(net_reader.getNetwork());
- InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src->allocate();
fill_data(src->buffer(), src->size());
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp
index 67ec86099..544f51a73 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp
index 4dbfd3fd7..6920b55c4 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp
index 2ec54e429..7396700b8 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -19,17 +18,11 @@ using namespace ::testing;
using namespace std;
using namespace mkldnn;
-struct dim4 {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
-};
struct concat_test_params {
- dim4 in1;
-
- dim4 in2;
+ // Formats: NCHW, NCDHW
+ vector<size_t> in1;
+ vector<size_t> in2;
size_t axis;
@@ -43,50 +36,30 @@ struct concat_test_params {
class MKLDNNGraphConcatTests: public TestsCommon,
public WithParamInterface<concat_test_params> {
std::string model_t = R"V0G0N(
-<net name="ConcatOnly" version="2" precision="FP32" batch="1">
+<net name="ConcatOnly" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="1">
<output>
- <port id="1">
- <dim>_IN1_</dim>
- <dim>_IC1_</dim>
- <dim>_IH1_</dim>
- <dim>_IW1_</dim>
+ <port id="1">__SRC_DIMS_1__
</port>
</output>
</layer>
<layer name="in2" type="Input" precision="FP32" id="2">
<output>
- <port id="2">
- <dim>_IN2_</dim>
- <dim>_IC2_</dim>
- <dim>_IH2_</dim>
- <dim>_IW2_</dim>
+ <port id="2">__SRC_DIMS_2__
</port>
</output>
</layer>
<layer name="con" id="3" type="Concat" precision="FP32">
<concat_data axis="_AXIS_"/>
<input>
- <port id="1">
- <dim>_IN1_</dim>
- <dim>_IC1_</dim>
- <dim>_IH1_</dim>
- <dim>_IW1_</dim>
+ <port id="1">__SRC_DIMS_1__
</port>
- <port id="2">
- <dim>_IN2_</dim>
- <dim>_IC2_</dim>
- <dim>_IH2_</dim>
- <dim>_IW2_</dim>
+ <port id="2">__SRC_DIMS_2__
</port>
</input>
<output>
- <port id="3">
- <dim>_ON_</dim>
- <dim>_OC_</dim>
- <dim>_OH_</dim>
- <dim>_OW_</dim>
+ <port id="3">__DST_DIMS__
</port>
</output>
</layer>
@@ -100,20 +73,27 @@ class MKLDNNGraphConcatTests: public TestsCommon,
std::string getModel(concat_test_params p) {
std::string model = model_t;
- REPLACE_WITH_NUM(model, "_IN1_", p.in1.n);
- REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
- REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
- REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
+ std::string s_dims;
+ for (auto& dim : p.in1) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS_1__", s_dims);
- REPLACE_WITH_NUM(model, "_IN2_", p.in2.n);
- REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
- REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
- REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
+ s_dims = "";
+ for (auto& dim : p.in2) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS_2__", s_dims);
- REPLACE_WITH_NUM(model, "_ON_", p.axis == 0 ? p.in1.n + p.in2.n : p.in1.n);
- REPLACE_WITH_NUM(model, "_OC_", p.axis == 1 ? p.in1.c + p.in2.c : p.in1.c);
- REPLACE_WITH_NUM(model, "_OH_", p.axis == 2 ? p.in1.h + p.in2.h : p.in1.h);
- REPLACE_WITH_NUM(model, "_OW_", p.axis == 3 ? p.in1.w + p.in2.w : p.in1.w);
+ s_dims = "";
+ for (size_t i = 0; i < p.in1.size(); i++) {
+ size_t dim = p.axis == i ? p.in1[i] + p.in2[i] : p.in1[i];
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims);
REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
return model;
@@ -147,14 +127,23 @@ protected:
}
ASSERT_LE(3, nodes.size());
- InferenceEngine::SizeVector dims_src1 = {p.in1.n, p.in1.c, p.in1.h, p.in1.w};
- InferenceEngine::SizeVector dims_src2 = {p.in2.n, p.in2.c, p.in2.h, p.in2.w};
+ InferenceEngine::SizeVector dims_src1 = p.in1;
+ InferenceEngine::SizeVector dims_src2 = p.in2;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.in1.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src1);
+ InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src1);
src1->allocate();
fill_data(src1->buffer(), src1->size());
- InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src2);
+ InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src2);
src2->allocate();
fill_data(src2->buffer(), src2->size());
InferenceEngine::BlobMap srcs;
@@ -215,6 +204,35 @@ protected:
TEST_P(MKLDNNGraphConcatTests, TestsConcat) {}
+INSTANTIATE_TEST_CASE_P(
+ TestsConcat, MKLDNNGraphConcatTests,
+ ::testing::Values(
+ concat_test_params {
+ {1, 3, 3, 5},
+ {1, 3, 3, 5},
+ 1, 2
+ },
+ concat_test_params {
+ {1, 7, 1, 5},
+ {1, 7, 9, 5},
+ 2, 1, MKLDNNPlugin::impl_desc_type::ref
+ },
+ concat_test_params {
+ {1, 2, 3, 5, 3},
+ {1, 5, 3, 5, 3},
+ 1, 2
+ },
+ concat_test_params {
+ {1, 32, 3, 4, 5},
+ {1, 32, 3, 4, 5},
+ 1, 6, MKLDNNPlugin::impl_desc_type::unknown
+ },
+ concat_test_params {
+ {1, 64, 16, 16, 16, 1},
+ {1, 64, 16, 16, 16, 1},
+ 5, 1, MKLDNNPlugin::impl_desc_type::ref
+ }));
+
class MKLDNNGraphDynBatchConcatTests: public TestsCommon, public WithParamInterface<concat_test_params> {
std::string model_t = R"V0G0N(
<net name="ConcatOnly" version="2" precision="FP32" batch="1">
@@ -222,20 +240,14 @@ class MKLDNNGraphDynBatchConcatTests: public TestsCommon, public WithParamInterf
<layer name="in1" type="Input" precision="FP32" id="1">
<output>
<port id="1">
- <dim>1</dim>
- <dim>_IC1_</dim>
- <dim>_IH1_</dim>
- <dim>_IW1_</dim>
+ <dim>1</dim>__SRC_DIMS_1__
</port>
</output>
</layer>
<layer name="in2" type="Input" precision="FP32" id="2">
<output>
<port id="2">
- <dim>1</dim>
- <dim>_IC2_</dim>
- <dim>_IH2_</dim>
- <dim>_IW2_</dim>
+ <dim>1</dim>__SRC_DIMS_2__
</port>
</output>
</layer>
@@ -243,24 +255,15 @@ class MKLDNNGraphDynBatchConcatTests: public TestsCommon, public WithParamInterf
<concat_data axis="_AXIS_"/>
<input>
<port id="1">
- <dim>1</dim>
- <dim>_IC1_</dim>
- <dim>_IH1_</dim>
- <dim>_IW1_</dim>
+ <dim>1</dim>__SRC_DIMS_1__
</port>
<port id="2">
- <dim>1</dim>
- <dim>_IC2_</dim>
- <dim>_IH2_</dim>
- <dim>_IW2_</dim>
+ <dim>1</dim>__SRC_DIMS_2__
</port>
</input>
<output>
<port id="3">
- <dim>1</dim>
- <dim>_OC_</dim>
- <dim>_OH_</dim>
- <dim>_OW_</dim>
+ <dim>1</dim>__DST_DIMS__
</port>
</output>
</layer>
@@ -274,20 +277,27 @@ class MKLDNNGraphDynBatchConcatTests: public TestsCommon, public WithParamInterf
std::string getModel(concat_test_params p) {
std::string model = model_t;
- REPLACE_WITH_NUM(model, "_IN1_", p.in1.n);
- REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
- REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
- REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
+ std::string s_dims;
+ for (size_t i = 1; i < p.in1.size(); i++) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(p.in1[i]) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS_1__", s_dims);
- REPLACE_WITH_NUM(model, "_IN2_", p.in2.n);
- REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
- REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
- REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
+ s_dims = "";
+ for (size_t i = 1; i < p.in2.size(); i++) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(p.in2[i]) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS_2__", s_dims);
- REPLACE_WITH_NUM(model, "_ON_", p.axis == 0 ? p.in1.n + p.in2.n : p.in1.n);
- REPLACE_WITH_NUM(model, "_OC_", p.axis == 1 ? p.in1.c + p.in2.c : p.in1.c);
- REPLACE_WITH_NUM(model, "_OH_", p.axis == 2 ? p.in1.h + p.in2.h : p.in1.h);
- REPLACE_WITH_NUM(model, "_OW_", p.axis == 3 ? p.in1.w + p.in2.w : p.in1.w);
+ s_dims = "";
+ for (size_t i = 1; i < p.in1.size(); i++) {
+ size_t dim = p.axis == i ? p.in1[i] + p.in2[i] : p.in1[i];
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims);
REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
return model;
@@ -302,7 +312,7 @@ protected:
TestsCommon::SetUp();
concat_test_params p = ::testing::WithParamInterface<concat_test_params>::GetParam();
std::string model = getModel(p);
- size_t MB = p.in1.n;
+ size_t MB = p.in1[0];
if (MB < 2)
MB = 2;
@@ -319,14 +329,23 @@ protected:
graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
graph.CreateGraph(net_reader.getNetwork());
- InferenceEngine::SizeVector dims_src1 = {MB, p.in1.c, p.in1.h, p.in1.w};
- InferenceEngine::SizeVector dims_src2 = {MB, p.in2.c, p.in2.h, p.in2.w};
+ InferenceEngine::SizeVector dims_src1 = p.in1;
+ InferenceEngine::SizeVector dims_src2 = p.in2;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.in1.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src1);
+ InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src1);
src1->allocate();
fill_data(src1->buffer(), src1->size());
- InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src2);
+ InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src2);
src2->allocate();
fill_data(src2->buffer(), src2->size());
InferenceEngine::BlobMap srcs;
@@ -396,6 +415,11 @@ INSTANTIATE_TEST_CASE_P(
{2, 2, 3, 3},
{2, 3, 3, 3},
1, 2, MKLDNNPlugin::impl_desc_type::unknown
+ },
+ concat_test_params {
+ {2, 2, 3, 3, 3},
+ {2, 3, 3, 3, 3},
+ 1, 2, MKLDNNPlugin::impl_desc_type::unknown
}));
struct concat_param {
@@ -406,9 +430,10 @@ struct concat_param {
};
struct two_concat_test_params {
- dim4 in1;
- dim4 in2;
- dim4 in3;
+ // Formats: NCHW, NCDHW
+ vector<size_t> in1;
+ vector<size_t> in2;
+ vector<size_t> in3;
concat_param concat1;
concat_param concat2;
@@ -421,31 +446,19 @@ class MKLDNNGraphTwoConcatTests: public TestsCommon,
<layers>
<layer name="in1" type="Input" precision="FP32" id="1">
<output>
- <port id="1">
- <dim>_IN1_</dim>
- <dim>_IC1_</dim>
- <dim>_IH1_</dim>
- <dim>_IW1_</dim>
+ <port id="1">__SRC_DIMS_1__
</port>
</output>
</layer>
<layer name="in2" type="Input" precision="FP32" id="2">
<output>
- <port id="1">
- <dim>_IN2_</dim>
- <dim>_IC2_</dim>
- <dim>_IH2_</dim>
- <dim>_IW2_</dim>
+ <port id="1">__SRC_DIMS_2__
</port>
</output>
</layer>
<layer name="in3" type="Input" precision="FP32" id="3">
<output>
- <port id="1">
- <dim>_IN3_</dim>
- <dim>_IC3_</dim>
- <dim>_IH3_</dim>
- <dim>_IW3_</dim>
+ <port id="1">__SRC_DIMS_3__
</port>
</output>
</layer>
@@ -455,22 +468,20 @@ class MKLDNNGraphTwoConcatTests: public TestsCommon,
<port id="1">
<dim>_CI41N_</dim>
<dim>_CI41C_</dim>
+ <dim>_CI41D_</dim>
<dim>_CI41H_</dim>
<dim>_CI41W_</dim>
</port>
<port id="2">
<dim>_CI42N_</dim>
<dim>_CI42C_</dim>
+ <dim>_CI42D_</dim>
<dim>_CI42H_</dim>
<dim>_CI42W_</dim>
</port>
</input>
<output>
- <port id="3">
- <dim>_CON1_</dim>
- <dim>_COC1_</dim>
- <dim>_COH1_</dim>
- <dim>_COW1_</dim>
+ <port id="3">__CO_DIMS_1__
</port>
</output>
</layer>
@@ -480,22 +491,20 @@ class MKLDNNGraphTwoConcatTests: public TestsCommon,
<port id="1">
<dim>_CI51N_</dim>
<dim>_CI51C_</dim>
+ <dim>_CI51D_</dim>
<dim>_CI51H_</dim>
<dim>_CI51W_</dim>
</port>
<port id="2">
<dim>_CI52N_</dim>
<dim>_CI52C_</dim>
+ <dim>_CI52D_</dim>
<dim>_CI52H_</dim>
<dim>_CI52W_</dim>
</port>
</input>
<output>
- <port id="3">
- <dim>_CON2_</dim>
- <dim>_COC2_</dim>
- <dim>_COH2_</dim>
- <dim>_COW2_</dim>
+ <port id="3">__CO_DIMS_2__
</port>
</output>
</layer>
@@ -508,7 +517,7 @@ class MKLDNNGraphTwoConcatTests: public TestsCommon,
</edges>
</net>
)V0G0N";
- void changeEdgeToLayer(std::string& model, int f_l, int f_p, int t_l, int t_p, dim4 dims) {
+ void changeEdgeToLayer(std::string& model, int f_l, int f_p, int t_l, int t_p, vector<size_t> dims) {
std::string TL = "_FL" + std::to_string(f_l) + std::to_string(f_p) + "_";
std::string TP = "_FP" + std::to_string(f_l) + std::to_string(f_p) + "_";
if (!FIND_STR(model, TL) || !FIND_STR(model, TP)) {
@@ -526,31 +535,40 @@ class MKLDNNGraphTwoConcatTests: public TestsCommon,
}
std::string CI = "_CI" + std::to_string(t_l) + std::to_string(t_p);
- REPLACE_WITH_NUM(model, CI + "N_", dims.n);
- REPLACE_WITH_NUM(model, CI + "C_", dims.c);
- REPLACE_WITH_NUM(model, CI + "H_", dims.h);
- REPLACE_WITH_NUM(model, CI + "W_", dims.w);
+ auto dims_size = dims.size();
+ REPLACE_WITH_NUM(model, CI + "N_", dims[0]);
+ REPLACE_WITH_NUM(model, CI + "C_", dims[1]);
+ REPLACE_WITH_NUM(model, CI + "H_", dims[dims_size - 2]);
+ REPLACE_WITH_NUM(model, CI + "W_", dims[dims_size - 1]);
+ if (dims_size < 5) REMOVE_LINE(model, std::string("<dim>") + CI + std::string("D_") + "</dim>");
+ else REPLACE_WITH_NUM(model, CI + "D_", dims[dims_size - 3]);
}
std::string getModel(two_concat_test_params p) {
std::string model = model_t;
- REPLACE_WITH_NUM(model, "_IN1_", p.in1.n);
- REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
- REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
- REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
-
- REPLACE_WITH_NUM(model, "_IN2_", p.in2.n);
- REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
- REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
- REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
-
- REPLACE_WITH_NUM(model, "_IN3_", p.in3.n);
- REPLACE_WITH_NUM(model, "_IC3_", p.in3.c);
- REPLACE_WITH_NUM(model, "_IW3_", p.in3.w);
- REPLACE_WITH_NUM(model, "_IH3_", p.in3.h);
-
- dim4 concat11;
+ std::string s_dims;
+ for (size_t i = 0; i < p.in1.size(); i++) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(p.in1[i]) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS_1__", s_dims);
+
+ s_dims = "";
+ for (size_t i = 0; i < p.in2.size(); i++) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(p.in2[i]) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS_2__", s_dims);
+
+ s_dims = "";
+ for (size_t i = 0; i < p.in3.size(); i++) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(p.in3[i]) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS_3__", s_dims);
+
+ vector<size_t> concat11;
switch (p.concat1.input1) {
case 1:
changeEdgeToLayer(model, 2, 1, 4, 1, p.in2);
@@ -565,7 +583,7 @@ class MKLDNNGraphTwoConcatTests: public TestsCommon,
concat11 = p.in1;
}
- dim4 concat12;
+ vector<size_t> concat12;
switch (p.concat1.input2) {
case 1:
changeEdgeToLayer(model, 2, 1, 4, 2, p.in2);
@@ -580,7 +598,7 @@ class MKLDNNGraphTwoConcatTests: public TestsCommon,
concat12 = p.in1;
}
- dim4 concat21;
+ vector<size_t> concat21;
switch (p.concat2.input1) {
case 1:
changeEdgeToLayer(model, 2, 1, 5, 1, p.in2);
@@ -595,7 +613,7 @@ class MKLDNNGraphTwoConcatTests: public TestsCommon,
concat21 = p.in1;
}
- dim4 concat22;
+ vector<size_t> concat22;
switch (p.concat2.input2) {
case 1:
changeEdgeToLayer(model, 2, 1, 5, 2, p.in2);
@@ -610,17 +628,25 @@ class MKLDNNGraphTwoConcatTests: public TestsCommon,
concat22 = p.in1;
}
- REPLACE_WITH_NUM(model, "_CON1_", p.concat1.axis == 0 ? concat11.n + concat12.n : concat21.n);
- REPLACE_WITH_NUM(model, "_COC1_", p.concat1.axis == 1 ? concat11.c + concat12.c : concat21.c);
- REPLACE_WITH_NUM(model, "_COH1_", p.concat1.axis == 2 ? concat11.h + concat12.h : concat21.h);
- REPLACE_WITH_NUM(model, "_COW1_", p.concat1.axis == 3 ? concat11.w + concat12.w : concat21.w);
+ s_dims = "";
+ for (size_t i = 0; i < p.in2.size(); i++) {
+ size_t concat = p.concat1.axis == i ? concat11[i] + concat12[i] : concat21[i];
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(concat) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__CO_DIMS_1__", s_dims);
+
REPLACE_WITH_NUM(model, "_CONCAT1_AXIS_", p.concat1.axis);
REPLACE_WITH_STR(model, "_CONCAT1_NAME_", p.concat1.name);
- REPLACE_WITH_NUM(model, "_CON2_", p.concat2.axis == 0 ? concat21.n + concat22.n : concat21.n);
- REPLACE_WITH_NUM(model, "_COC2_", p.concat2.axis == 1 ? concat21.c + concat22.c : concat21.c);
- REPLACE_WITH_NUM(model, "_COH2_", p.concat2.axis == 2 ? concat21.h + concat22.h : concat21.h);
- REPLACE_WITH_NUM(model, "_COW2_", p.concat2.axis == 3 ? concat21.w + concat22.w : concat21.w);
+ s_dims = "";
+ for (size_t i = 0; i < p.in2.size(); i++) {
+ size_t concat = p.concat2.axis == i ? concat21[i] + concat22[i] : concat21[i];
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(concat) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__CO_DIMS_2__", s_dims);
+
REPLACE_WITH_NUM(model, "_CONCAT2_AXIS_", p.concat2.axis);
REPLACE_WITH_STR(model, "_CONCAT2_NAME_", p.concat2.name);
return model;
@@ -642,19 +668,28 @@ protected:
MKLDNNGraphTestClass graph;
graph.CreateGraph(net_reader.getNetwork());
- InferenceEngine::SizeVector dims_src1 = {p.in1.n, p.in1.c, p.in1.h, p.in1.w};
- InferenceEngine::SizeVector dims_src2 = {p.in2.n, p.in2.c, p.in2.h, p.in2.w};
- InferenceEngine::SizeVector dims_src3 = {p.in3.n, p.in3.c, p.in3.h, p.in3.w};
+ InferenceEngine::SizeVector dims_src1 = p.in1;
+ InferenceEngine::SizeVector dims_src2 = p.in2;
+ InferenceEngine::SizeVector dims_src3 = p.in3;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.in1.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src1);
+ InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src1);
src1->allocate();
fill_data(src1->buffer(), src1->size());
- InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src2);
+ InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src2);
src2->allocate();
fill_data(src2->buffer(), src2->size());
- InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src3);
+ InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src3);
src3->allocate();
fill_data(src3->buffer(), src3->size());
@@ -996,46 +1031,26 @@ class MKLDNNGraphIncorrectConcatTests: public TestsCommon,
<layers>
<layer name="in1" type="Input" precision="FP32" id="1">
<output>
- <port id="1">
- <dim>_IN1_</dim>
- <dim>_IC1_</dim>
- <dim>_IH1_</dim>
- <dim>_IW1_</dim>
+ <port id="1">__SRC_DIMS_1__
</port>
</output>
</layer>
<layer name="in2" type="Input" precision="FP32" id="2">
<output>
- <port id="2">
- <dim>_IN2_</dim>
- <dim>_IC2_</dim>
- <dim>_IH2_</dim>
- <dim>_IW2_</dim>
+ <port id="2">__SRC_DIMS_2__
</port>
</output>
</layer>
<layer name="con" id="3" type="Concat" precision="FP32">
<concat_data axis="_AXIS_"/>
<input>
- <port id="1">
- <dim>_IN1_</dim>
- <dim>_IC1_</dim>
- <dim>_IH1_</dim>
- <dim>_IW1_</dim>
+ <port id="1">__SRC_DIMS_1__
</port>
- <port id="2">
- <dim>_IN2_</dim>
- <dim>_IC2_</dim>
- <dim>_IH2_</dim>
- <dim>_IW2_</dim>
+ <port id="2">__SRC_DIMS_2__
</port>
</input>
<output>
- <port id="3">
- <dim>_ON_</dim>
- <dim>_OC_</dim>
- <dim>_OH_</dim>
- <dim>_OW_</dim>
+ <port id="3">__DST_DIMS__
</port>
</output>
</layer>
@@ -1049,20 +1064,27 @@ class MKLDNNGraphIncorrectConcatTests: public TestsCommon,
std::string getModel(concat_test_params p) {
std::string model = model_t;
- REPLACE_WITH_NUM(model, "_IN1_", p.in1.n);
- REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
- REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
- REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
+ std::string s_dims;
+ for (auto& dim : p.in1) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS_1__", s_dims);
- REPLACE_WITH_NUM(model, "_IN2_", p.in2.n);
- REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
- REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
- REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
+ s_dims = "";
+ for (auto& dim : p.in2) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS_2__", s_dims);
- REPLACE_WITH_NUM(model, "_ON_", p.axis == 0 ? p.in1.n + p.in2.n : p.in1.n);
- REPLACE_WITH_NUM(model, "_OC_", p.axis == 1 ? p.in1.c + p.in2.c : p.in1.c);
- REPLACE_WITH_NUM(model, "_OH_", p.axis == 2 ? p.in1.h + p.in2.h : p.in1.h);
- REPLACE_WITH_NUM(model, "_OW_", p.axis == 3 ? p.in1.w + p.in2.w : p.in1.w);
+ s_dims = "";
+ for (size_t i = 0; i < p.in1.size(); i++) {
+ size_t dim = p.axis == i ? p.in1[i] + p.in2[i] : p.in1[i];
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims);
REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
return model;
@@ -1079,10 +1101,8 @@ protected:
std::string model = getModel(p);
InferenceEngine::CNNNetReader net_reader;
- ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
- MKLDNNGraphTestClass graph;
- ASSERT_THROW(graph.CreateGraph(net_reader.getNetwork()), InferenceEngine::details::InferenceEngineException);
+ ASSERT_THROW(net_reader.ReadNetwork(model.data(), model.length()),
+ InferenceEngine::details::InferenceEngineException);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp
index 7d1235230..dbfbc06a3 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -18,85 +17,118 @@
#define XBYAK_UNDEF_JNL
#include "../../../../../../../thirdparty/mkl-dnn/src/cpu/xbyak/xbyak_util.h"
+using namespace InferenceEngine;
using namespace ::testing;
using namespace std;
using namespace mkldnn;
-
struct conv_test_params {
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } in;
-
- size_t krn_w;
- size_t krn_h;
- size_t str_w;
- size_t str_h;
- size_t pad_w;
- size_t pad_h;
+ // Formats: NCHW, NCDHW
+ vector<size_t> dims;
+ // Formats: WH, WHD
+ vector<size_t> kernel;
+ vector<size_t> strides;
+ vector<size_t> pads_begin;
+ vector<size_t> pads_end;
size_t out_c;
size_t grp_c;
+ string auto_pad;
size_t num_prim_desc;
int selectedType;
- std::vector<MKLDNNPlugin::impl_desc_type> preferTypes;
+ vector<MKLDNNPlugin::impl_desc_type> preferTypes;
- std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
+ vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
};
template <typename data_t>
-void ref_conv(const InferenceEngine::TBlob<data_t> &src, const data_t *weights, const size_t weightsSize,
- InferenceEngine::TBlob<data_t> &dst, conv_test_params prm) {
- size_t KW = prm.krn_w;
- size_t KH = prm.krn_h;
+void ref_conv(const TBlob<data_t> &src, const data_t *weights, const size_t weightsSize,
+ TBlob<data_t> &dst, conv_test_params prm) {
+ auto dims_size = src.dims().size();
+
+ size_t KW = prm.kernel[X_AXIS];
+ size_t KH = prm.kernel[Y_AXIS];
+ size_t KD = dims_size == 5 ? prm.kernel[Z_AXIS] : 1u;
size_t GC = prm.grp_c;
size_t IC = src.dims()[1];
- size_t IH = src.dims()[2];
- size_t IW = src.dims()[3];
+ size_t ID = dims_size == 5 ? src.dims()[dims_size - 3] : 1u;
+ size_t IH = src.dims()[dims_size - 2];
+ size_t IW = src.dims()[dims_size - 1];
- size_t OW = (IW + 2 * prm.pad_w - prm.krn_w) / prm.str_w + 1;
- size_t OH = (IH + 2 * prm.pad_h - prm.krn_h) / prm.str_h + 1;
+ size_t OW = (IW + 2u * prm.pads_begin[X_AXIS] - prm.kernel[X_AXIS]) / prm.strides[X_AXIS] + 1u;
+ size_t OH = (IH + 2u * prm.pads_begin[Y_AXIS] - prm.kernel[Y_AXIS]) / prm.strides[Y_AXIS] + 1u;
+ size_t OD = dims_size == 5 ? (ID + 2u * prm.pads_begin[Z_AXIS] - prm.kernel[Z_AXIS]) / prm.strides[Z_AXIS] + 1u : 1u;
size_t OC = prm.out_c;
const data_t *src_data = src.readOnly();
const data_t *weights_data = weights;
- const data_t *bias_data = weights_data + KW * KH * OC * IC / GC;
+ const data_t *bias_data = weights_data + KW * KH * KD * OC * IC / GC;
data_t *dst_data = dst.data();
- IE_ASSERT(KW * KH * OC * IC / GC + OC == weightsSize);
+ IE_ASSERT(KW * KH * KD * OC * IC / GC + OC == weightsSize);
IE_ASSERT(OW == dst.dims()[0]);
IE_ASSERT(OH == dst.dims()[1]);
+
+ size_t SC1 = OH * OW;
+ size_t SC2 = SC1 * OD;
+ size_t SC3 = OC / GC;
+ size_t SC4 = SC2 * SC3;
+
+ size_t IC1 = IH * IW;
+ size_t IC2 = IC1 * ID;
+ size_t IC3 = IC / GC;
+ size_t IC4 = IC2 * IC3;
+
+ size_t KC1 = KH * KW;
+ size_t KC2 = KC1 * KD;
+ size_t KC3 = IC3 * KC2;
+ size_t KC4 = SC3 * KC3;
for (uint32_t g = 0; g < GC; g++) {
+ size_t gc = g * SC4;
+ size_t goc = g * SC3;
+ size_t gic = g * IC4;
+ size_t gkc = g * KC4;
for (uint32_t oc = 0; oc < OC / GC; oc++) {
- for (uint32_t oh = 0; oh < OH; oh++) {
- for (uint32_t ow = 0; ow < OW; ow++) {
- size_t oidx = g * OC / GC * OH * OW
- + oc * OH * OW + oh * OW + ow;
- dst_data[oidx] = bias_data[g * OC / GC + oc];
-
- for (size_t ic = 0; ic < IC / GC; ic++) {
- for (size_t kh = 0; kh < KH; kh++) {
- for (size_t kw = 0; kw < KW; kw++) {
- int32_t iw = ow * prm.str_w - prm.pad_w + kw;
- int32_t ih = oh * prm.str_h - prm.pad_h + kh;
- if (iw < 0 || iw >= (int32_t)IW || ih < 0
- || ih >= (int32_t)IH)
- continue;
- size_t iidx = g * IC / GC * IH * IW
- + ic * IH * IW + ih * IW + iw;
- size_t widx = g * OC / GC * IC / GC * KH * KW
- + oc * IC / GC * KH * KW
- + ic * KH * KW + kh * KW + kw;
-
- dst_data[ oidx] += src_data[iidx] * weights_data[widx];
+ size_t cc = gc + oc * SC2;
+ size_t gooc = goc + oc;
+ size_t gkoc = gkc + oc * KC3;
+ for (uint32_t od = 0; od < OD; od++) {
+ size_t dc = cc + od * SC1;
+ for (uint32_t oh = 0; oh < OH; oh++) {
+ size_t hc = dc + oh * OW;
+ for (uint32_t ow = 0; ow < OW; ow++) {
+ size_t oidx = hc + ow;
+
+ dst_data[oidx] = bias_data[gooc];
+
+ for (size_t ic = 0; ic < IC / GC; ic++) {
+ size_t icc = gkoc + ic * KC2;
+ size_t kicc = gic + ic * IC2;
+ for (size_t kd = 0; kd < KD; kd++) {
+ int32_t id = dims_size == 5 ? od * prm.strides[Z_AXIS] - prm.pads_begin[Z_AXIS] + kd : 0;
+ if (id < 0 || id >= (int32_t)ID) continue;
+ size_t kidc = kicc + id * IC1;
+ size_t kdc = icc + kd * KC1;
+ for (size_t kh = 0; kh < KH; kh++) {
+ int32_t ih = oh * prm.strides[Y_AXIS] - prm.pads_begin[Y_AXIS] + kh;
+ if (ih < 0 || ih >= (int32_t)IH) continue;
+ size_t kihc = kidc + ih * IW;
+ size_t khc = kdc + kh * KW;
+ for (size_t kw = 0; kw < KW; kw++) {
+ int32_t iw = ow * prm.strides[X_AXIS] - prm.pads_begin[X_AXIS] + kw;
+ if (iw < 0 || iw >= (int32_t)IW) continue;
+
+ size_t iidx = kihc + iw;
+ size_t widx = khc + kw;
+
+ dst_data[oidx] += src_data[iidx] * weights_data[widx];
+ }
+ }
}
}
}
@@ -108,42 +140,32 @@ void ref_conv(const InferenceEngine::TBlob<data_t> &src, const data_t *weights,
class MKLDNNGraphConvolutionTests: public TestsCommon,
public WithParamInterface<conv_test_params> {
- std::string model_t = R"V0G0N(
-<Net Name="Convolution_Only" version="2" precision="FP32" batch="1">
+ std::string model_t_5D = R"V0G0N(
+<net name="Convolution_Only" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="0">
<output>
- <port id="0">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="0">__SRC_DIMS__
</port>
</output>
</layer>
<layer name="conv1" id="1" type="Convolution" precision="FP32">
- <convolution stride-x="_SW_" stride-y="_SH_"
- pad-x="_PW_" pad-y="_PH_"
- kernel-x="_KW_" kernel-y="_KH_"
- output="_OC_" group="_GC_" PrimitivesPriority="_IMPLS_"/>
+ <convolution _AP_ kernel="_K_"
+ pads_begin="_PB_" pads_end="_PE_"
+ strides="_KS_"
+ output="_OC_" group="_GC_" PrimitivesPriority="_IMPLS_"/>
<weights offset="0" size="_S1_" />
<biases offset="_S1_" size="_S2_" />
<input>
- <port id="1">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="1">__SRC_DIMS__
</port>
</input>
<output>
<port id="2">
<dim>_IN_</dim>
- <dim>_OC_</dim>
- <dim>_OH_</dim>
- <dim>_OW_</dim>
+ <dim>_OC_</dim>__DST_DIMS__
</port>
</output>
</layer>
@@ -151,33 +173,53 @@ class MKLDNNGraphConvolutionTests: public TestsCommon,
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
</edges>
-</Net>
+</net>
)V0G0N";
protected:
std::string getModel(conv_test_params p) {
- std::string model = model_t;
- REPLACE_WITH_NUM(model, "_IW_", p.in.w);
- REPLACE_WITH_NUM(model, "_IH_", p.in.h);
- REPLACE_WITH_NUM(model, "_IC_", p.in.c);
- REPLACE_WITH_NUM(model, "_IN_", p.in.n);
-
- REPLACE_WITH_NUM(model, "_KW_", p.krn_w);
- REPLACE_WITH_NUM(model, "_KH_", p.krn_h);
- REPLACE_WITH_NUM(model, "_SW_", p.str_w);
- REPLACE_WITH_NUM(model, "_SH_", p.str_h);
- REPLACE_WITH_NUM(model, "_PW_", p.pad_w);
- REPLACE_WITH_NUM(model, "_PH_", p.pad_h);
+ std::string model = model_t_5D;
+ std::string s_dims;
+ for (auto& dim : p.dims) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims);
+
+ s_dims = "";
+ int k_len = p.kernel.size();
+ for (size_t i = 2; i < p.dims.size(); i++) {
+ size_t inx = k_len - i + 1;
+ size_t dim = (p.dims[i] + 2lu * p.pads_begin[inx] - p.kernel[inx]) / p.strides[inx] + 1lu;
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims);
+
+ REPLACE_WITH_NUM(model, "_IN_", p.dims[0]);
+
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel);
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides);
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin);
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end);
+ string auto_pad;
+ if (!p.auto_pad.empty()) auto_pad = string("auto_pad=") + string("\"") + p.auto_pad + string("\"");
+ REPLACE_WITH_STR(model, "_AP_", auto_pad);
REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
REPLACE_WITH_NUM(model, "_OC_", p.out_c);
- REPLACE_WITH_NUM(model, "_OH_", (p.in.h + 2 * p.pad_h - p.krn_h) / p.str_h + 1);
- REPLACE_WITH_NUM(model, "_OW_", (p.in.w + 2 * p.pad_w - p.krn_w) / p.str_w + 1);
- size_t w_data_size = (p.krn_w * p.krn_h * p.out_c * p.in.c / p.grp_c) * sizeof(float);
+ size_t w_data_size = 1;
+ for (auto ker : p.kernel) {
+ w_data_size *= ker;
+ }
+
+ w_data_size = (w_data_size * p.out_c * p.dims[1] / p.grp_c) * sizeof(float);
size_t b_data_size = p.out_c * sizeof(float);
+
REPLACE_WITH_NUM(model, "_S1_", w_data_size);
REPLACE_WITH_NUM(model, "_S2_", b_data_size);
+
std::string impls;
for (const auto& preferType : p.preferTypes) {
if (!impls.empty())
@@ -185,6 +227,7 @@ protected:
impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType);
}
REPLACE_WITH_STR(model, "_IMPLS_", impls);
+
return model;
}
@@ -197,19 +240,28 @@ protected:
conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
std::string model = getModel(p);
- InferenceEngine::CNNNetReader net_reader;
+ CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
- InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {(p.krn_w * p.krn_h * p.out_c * p.in.c / p.grp_c + p.out_c)
- * sizeof(float)});
+ size_t blob_size = p.out_c * p.dims[1] / p.grp_c;
+ for (auto k : p.kernel) {
+ blob_size *= k;
+ }
+ blob_size = (blob_size + p.out_c) * sizeof(float);
+ TBlob<uint8_t> *weights = new TBlob<uint8_t>
+ (Precision::U8, C, {blob_size});
weights->allocate();
+
fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
- InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+ size_t w_buffer_len = weights->size() / sizeof(float);
+
+ TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
net_reader.SetWeights(weights_ptr);
+ CNNNetwork network = net_reader.getNetwork();
MKLDNNGraphTestClass graph;
- graph.CreateGraph(net_reader.getNetwork());
+ graph.CreateGraph(network);
auto& nodes = graph.getNodes();
nodes = graph.getNodes();
@@ -241,39 +293,47 @@ protected:
}
}
- InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
+ Layout layout = ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = NCHW;
+ break;
+ case 5:
+ layout = NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ Blob::Ptr src = make_shared_blob<float, const SizeVector>
+ (Precision::FP32, layout, p.dims);
src->allocate();
fill_data(src->buffer(), src->size());
- auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
+ auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
if (srcPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
- InferenceEngine::BlobMap srcs;
- srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
+ BlobMap srcs;
+ srcs.insert(std::pair<std::string, Blob::Ptr>("in1", src));
- InferenceEngine::OutputsDataMap out;
- out = net_reader.getNetwork().getOutputsInfo();
- InferenceEngine::BlobMap outputBlobs;
+ OutputsDataMap out;
+ out = network.getOutputsInfo();
+ BlobMap outputBlobs;
- std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+ std::pair<std::string, DataPtr> item = *out.begin();
- InferenceEngine::TBlob<float>::Ptr output;
- output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+ TBlob<float>::Ptr output;
+ output = make_shared_blob<float>(item.second->getTensorDesc());
output->allocate();
outputBlobs[item.first] = output;
graph.Infer(srcs, outputBlobs);
-
- InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+ TBlob<float> dst_ref(item.second->getTensorDesc());
dst_ref.allocate();
ref_conv(*srcPtr, (const float *)weights->buffer(), weights->size() / sizeof(float), dst_ref, p);
- compare(*output, dst_ref);
- } catch (const InferenceEngine::details::InferenceEngineException &e) {
+ compare(*output, dst_ref, 0.0002f);
+ } catch (const details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
@@ -284,31 +344,68 @@ TEST_P(MKLDNNGraphConvolutionTests, TestsConvolution) {}
INSTANTIATE_TEST_CASE_P(
TestConvolution, MKLDNNGraphConvolutionTests,
::testing::Values(
- conv_test_params{{1, 9, 16, 32},
- 1, 1, 1, 1, 0, 0, 17, 1, 6, MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_1x1},
+ /*0*/ conv_test_params{{1, 9, 16, 32},
+ {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 6, MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_1x1 },
conv_test_params{{1, 9, 32, 16},
- 2, 4, 1, 1, 0, 0, 17, 1, 5, MKLDNNPlugin::impl_desc_type::jit },
+ {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit },
conv_test_params{{1, 9, 32, 16},
- 2, 4, 2, 1, 0, 0, 17, 1, 5, MKLDNNPlugin::impl_desc_type::jit },
+ {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit },
conv_test_params{{1, 3, 40, 40},
- 3, 3, 1, 2, 0, 0, 20, 1, 5, MKLDNNPlugin::impl_desc_type::jit },
+ {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit },
conv_test_params{{1, 1, 40, 40},
- 3, 3, 1, 2, 0, 0, 20, 1, 5, MKLDNNPlugin::impl_desc_type::jit },
+ {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit },
conv_test_params{{1, 1, 32, 16},
- 2, 4, 2, 1, 0, 0, 17, 1, 5, MKLDNNPlugin::impl_desc_type::jit },
- /*conv_test_params{{1, 9, 16, 32},
- 1, 1, 1, 1, 0, 0, 17, 1, 6, MKLDNNPlugin::impl_desc_type::gemm,
+ {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit },
+#ifdef USE_MKL
+ conv_test_params{{1, 9, 16, 32},
+ {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 6, MKLDNNPlugin::impl_desc_type::gemm,
{MKLDNNPlugin::impl_desc_type::gemm_any,
MKLDNNPlugin::impl_desc_type::gemm_blas,
MKLDNNPlugin::impl_desc_type::gemm_avx512,
MKLDNNPlugin::impl_desc_type::gemm_avx2,
- MKLDNNPlugin::impl_desc_type::gemm_sse42}
- },*/
+ MKLDNNPlugin::impl_desc_type::gemm_sse42} },
+#endif
conv_test_params{{1, 9, 32, 16},
- 2, 4, 1, 1, 0, 0, 17, 1, 5, MKLDNNPlugin::impl_desc_type::ref_any, {MKLDNNPlugin::impl_desc_type::ref_any} },
+ {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::ref_any,
+ {MKLDNNPlugin::impl_desc_type::ref_any} },
conv_test_params{{1, 4, 54, 96},
- 3, 3, 1, 1, 1, 1, 64, 1, 3, MKLDNNPlugin::impl_desc_type::ref_any,
- {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd, MKLDNNPlugin::impl_desc_type::ref_any}}));
+ {3, 3}, {1, 1}, {1, 1}, {0, 0}, 64, 1, "", 3, MKLDNNPlugin::impl_desc_type::ref_any,
+ {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd, MKLDNNPlugin::impl_desc_type::ref_any}},
+ // 5D
+ /*9*/ conv_test_params{{1, 3, 15, 20, 20},
+ {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::ref_any,
+ {MKLDNNPlugin::impl_desc_type::ref_any} },
+ conv_test_params{{1, 24, 15, 20, 20},
+ {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::ref_any,
+ {MKLDNNPlugin::impl_desc_type::ref_any} },
+ conv_test_params{{1, 32, 15, 20, 20},
+ {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::ref_any,
+ {MKLDNNPlugin::impl_desc_type::ref_any} },
+ conv_test_params{{1, 3, 15, 25, 20},
+ {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit },
+ conv_test_params{{1, 24, 15, 25, 20},
+ {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit },
+ /*14*/ conv_test_params{{1, 32, 15, 25, 20},
+ {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit },
+#ifdef USE_MKL
+ conv_test_params{{1, 5, 15, 20, 20},
+ {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas },
+ conv_test_params{{1, 5, 15, 20, 20},
+ {3, 3, 3}, {3, 2, 1}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas },
+ conv_test_params{{1, 5, 15, 20, 20},
+ {3, 3, 3}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas },
+ conv_test_params{{1, 16, 30, 30, 10},
+ {5, 5, 5}, {1, 1, 1}, {2, 2, 2}, {2, 2, 2}, 16, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas,
+ {MKLDNNPlugin::impl_desc_type::gemm_blas} },
+ conv_test_params{{1, 4, 16, 16, 16},
+ {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, 8, 1, "same_upper", 2, MKLDNNPlugin::impl_desc_type::gemm_blas },
+#endif
+ /*20*/ conv_test_params{{1, 16, 30, 30, 10},
+ {5, 5, 5}, {1, 1, 1}, {2, 2, 2}, {2, 2, 2}, 16, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit },
+ conv_test_params{{1, 16, 30, 30, 10},
+ {5, 5, 5}, {1, 1, 1}, {2, 2, 2}, {2, 2, 2}, 16, 1, "", 2, MKLDNNPlugin::impl_desc_type::ref_any,
+ {MKLDNNPlugin::impl_desc_type::ref_any} }));
+
class MKLDNNGraphDynBatchConvolutionTests: public MKLDNNGraphConvolutionTests {
protected:
@@ -317,52 +414,66 @@ protected:
TestsCommon::SetUp();
conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
std::string model = getModel(p);
- size_t MB = p.in.n;
- if (MB < 2)
- MB = 2;
+ std::vector<size_t> dims = p.dims;
+ if (dims[0] < 2)
+ dims[0] = 2;
- InferenceEngine::CNNNetReader net_reader;
+ CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
- InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C,
- {(p.krn_w * p.krn_h * p.out_c * p.in.c / p.grp_c + p.out_c) * sizeof(float)});
+ size_t blob_size = p.out_c * dims[1] / p.grp_c;
+ for (auto k : p.kernel) {
+ blob_size *= k;
+ }
+ blob_size = (blob_size + p.out_c) * sizeof(float);
+ TBlob<uint8_t> *weights = new TBlob<uint8_t>(Precision::U8, C,
+ {blob_size});
weights->allocate();
fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
- InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+ TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
net_reader.SetWeights(weights_ptr);
- InferenceEngine::CNNNetwork network = net_reader.getNetwork();
- auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
+ CNNNetwork network = net_reader.getNetwork();
+ auto implNet = dynamic_cast<details::CNNNetworkImpl *>(&((ICNNNetwork&)network));
ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
- InferenceEngine::ResponseDesc resp;
- InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp);
- ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg;
+ ResponseDesc resp;
+ StatusCode sts = implNet->setBatchSizeReshape(dims[0], &resp);
+ ASSERT_EQ((int)StatusCode::OK, sts) << resp.msg;
MKLDNNGraphTestClass graph;
- graph.CreateGraph(net_reader.getNetwork());
-
- InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
+ graph.CreateGraph(network);
+
+ Layout layout = ANY;
+ switch (dims.size()) {
+ case 4:
+ layout = NCHW;
+ break;
+ case 5:
+ layout = NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ Blob::Ptr src = make_shared_blob<float, const SizeVector>
+ (Precision::FP32, layout, dims);
src->allocate();
fill_data(src->buffer(), src->size());
- auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
+ auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
if (srcPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
- InferenceEngine::BlobMap srcs;
- srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
+ BlobMap srcs;
+ srcs.insert(std::pair<std::string, Blob::Ptr>("in1", src));
- InferenceEngine::OutputsDataMap out;
- out = net_reader.getNetwork().getOutputsInfo();
- InferenceEngine::BlobMap outputBlobs;
+ OutputsDataMap out;
+ out = network.getOutputsInfo();
+ BlobMap outputBlobs;
- std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+ std::pair<std::string, DataPtr> item = *out.begin();
- InferenceEngine::TBlob<float>::Ptr output;
- output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+ TBlob<float>::Ptr output;
+ output = make_shared_blob<float>(item.second->getTensorDesc());
output->allocate();
outputBlobs[item.first] = output;
@@ -373,9 +484,9 @@ protected:
node->getType() == MKLDNNPlugin::Convolution_Sum_Activation;
};
- graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkConvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child);
- graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkConvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child);
- } catch (const InferenceEngine::details::InferenceEngineException &e) {
+ graph.checkDynBatch(srcs, outputBlobs, dims[0], dims[0], checkConvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child);
+ graph.checkDynBatch(srcs, outputBlobs, 1, dims[0], checkConvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child);
+ } catch (const details::InferenceEngineException &e) {
FAIL() << e.what();
}
}
@@ -387,25 +498,25 @@ INSTANTIATE_TEST_CASE_P(
TestDynBatchConvolution, MKLDNNGraphDynBatchConvolutionTests,
::testing::Values(
conv_test_params{{1, 8, 16, 32},
- 1, 1, 1, 1, 0, 0, 17, 1, 7, MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_1x1,
+ {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 7, MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_1x1,
{MKLDNNPlugin::impl_desc_type::jit_avx512_winograd}},
conv_test_params{{1, 9, 32, 16},
- 2, 4, 1, 1, 0, 0, 17, 1, 5, MKLDNNPlugin::impl_desc_type::jit,
- {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd}},
+ {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit,
+ {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} },
conv_test_params{{1, 9, 32, 16},
- 2, 4, 2, 1, 0, 0, 17, 1, 5, MKLDNNPlugin::impl_desc_type::jit,
- {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd}},
+ {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit,
+ {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} },
conv_test_params{{1, 3, 40, 40},
- 3, 3, 1, 2, 0, 0, 20, 1, 5, MKLDNNPlugin::impl_desc_type::jit,
- {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd}},
+ {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit,
+ {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} },
conv_test_params{{1, 1, 40, 40},
- 3, 3, 1, 2, 0, 0, 20, 1, 5, MKLDNNPlugin::impl_desc_type::jit,
- {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd}},
+ {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit,
+ {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} },
conv_test_params{{1, 1, 32, 16},
- 2, 4, 2, 1, 0, 0, 17, 1, 5, MKLDNNPlugin::impl_desc_type::jit,
- {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd}},
+ {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit,
+ {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} },
conv_test_params{{1, 9, 16, 32},
- 1, 1, 1, 1, 0, 0, 17, 1, 7, MKLDNNPlugin::impl_desc_type::gemm,
+ {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 7, MKLDNNPlugin::impl_desc_type::gemm,
{MKLDNNPlugin::impl_desc_type::gemm_any,
MKLDNNPlugin::impl_desc_type::gemm_blas,
MKLDNNPlugin::impl_desc_type::gemm_avx512,
@@ -413,4 +524,4 @@ INSTANTIATE_TEST_CASE_P(
MKLDNNPlugin::impl_desc_type::gemm_sse42}
},
conv_test_params{{1, 9, 32, 16},
- 2, 4, 1, 1, 0, 0, 17, 1, 5, MKLDNNPlugin::impl_desc_type::ref_any, {MKLDNNPlugin::impl_desc_type::ref_any} }));
+ {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::ref_any, {MKLDNNPlugin::impl_desc_type::ref_any} }));
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_crop_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_crop_test.cpp
index cc0d21deb..545ac154f 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_crop_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_crop_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp
index 86dbb0b8d..b26351158 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -15,30 +14,26 @@
#include "tests_common.hpp"
+using namespace InferenceEngine;
using namespace ::testing;
using namespace std;
using namespace mkldnn;
struct deconv_test_params {
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } in;
-
- size_t krn_w;
- size_t krn_h;
- size_t str_w;
- size_t str_h;
- size_t pad_w;
- size_t pad_h;
+ // Formats: NCHW, NCDHW
+ vector<size_t> dims;
+ // Formats: WH, WHD
+ vector<size_t> kernel;
+ vector<size_t> strides;
+ vector<size_t> pads_begin;
+ vector<size_t> pads_end;
size_t out_c;
size_t grp_c;
bool with_bias;
+ string auto_pad;
size_t num_prim_desc;
@@ -51,19 +46,24 @@ struct deconv_test_params {
template <typename data_t>
void ref_deconv(const InferenceEngine::TBlob<data_t> &src, const InferenceEngine::Blob::Ptr &weights, const InferenceEngine::Blob::Ptr &bias,
InferenceEngine::TBlob<data_t> &dst, deconv_test_params prm) {
+ auto dims_size = src.dims().size();
size_t G = prm.grp_c;
- size_t KW = prm.krn_w;
- size_t KH = prm.krn_h;
+ size_t KW = prm.kernel[X_AXIS];
+ size_t KH = prm.kernel[Y_AXIS];
+ size_t KD = prm.kernel.size() > Z_AXIS ? prm.kernel[Z_AXIS] : 1u;
- size_t PW = prm.pad_w;
- size_t PH = prm.pad_h;
+ size_t PW = prm.pads_begin[X_AXIS];
+ size_t PH = prm.pads_begin[Y_AXIS];
+ size_t PD = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0u;
- size_t SW = prm.str_w;
- size_t SH = prm.str_h;
+ size_t SW = prm.strides[X_AXIS];
+ size_t SH = prm.strides[Y_AXIS];
+ size_t SD = prm.strides.size() > Z_AXIS ? prm.strides[Z_AXIS] : 1u;
- size_t IW = src.dims()[3];
- size_t IH = src.dims()[2];
+ size_t IW = src.dims()[dims_size - 1];
+ size_t IH = src.dims()[dims_size - 2];
+ size_t ID = dims_size == 5 ? src.dims()[dims_size - 3] : 1u;
size_t IC = src.dims()[1];
size_t MB = src.dims()[0];
@@ -71,6 +71,7 @@ void ref_deconv(const InferenceEngine::TBlob<data_t> &src, const InferenceEngine
size_t OW = SW * (IW - 1) + KW - 2 * PW;
size_t OH = SH * (IH - 1) + KH - 2 * PH;
+ size_t OD = dims_size == 5 ? (SD * (ID - 1) + KD - 2 * PD) : 1u;
const data_t *src_data = src.readOnly();
const data_t *weights_data = weights->buffer().as<data_t*>();
@@ -78,43 +79,69 @@ void ref_deconv(const InferenceEngine::TBlob<data_t> &src, const InferenceEngine
data_t *dst_data = dst.data();
- for (int g = 0; g < G; ++g) {
- for (int mb = 0; mb < MB; ++mb) {
- for (int oc = 0; oc < OC / G; ++oc) {
- for (int oh = 0; oh < OH; ++oh) {
- for (int ow = 0; ow < OW; ++ow) {
- size_t didx = mb * OC * OH * OW
- + (g * OC / G + oc) * OH * OW + oh * OW + ow;
-
- dst_data[didx] = data_t(0);
- if (prm.with_bias) dst_data[didx] += bias_data[oc];
-
- for (int ic = 0; ic < IC / G; ic++) {
- for (int kh = 0; kh < KH; kh++) {
- for (int kw = 0; kw < KW; kw++) {
- if (ow + PW < kw || oh + PH < kh)
- continue;
-
- size_t iw = ow - kw + PW;
- size_t ih = oh - kh + PH;
+ size_t CS1 = OH * OW;
+ size_t CS2 = CS1 * OD;
+ size_t CS3 = CS2 * OC;
- if (iw % SW != 0 || ih % SH != 0)
- continue;
+ size_t CI1 = IH * IW;
+ size_t CI2 = CI1 * ID;
+ size_t CI3 = CI2 * IC;
- iw /= SW;
- ih /= SH;
+ size_t CK1 = KH * KW;
+ size_t CK2 = CK1 * KD;
+ size_t CK3 = CK2 * (OC / G);
+ size_t CK4 = CK3 * (IC / G);
- if (ih < IH && iw < IW) {
- size_t sidx = mb * IC * IH * IW
- + (g * IC / G + ic) * IH * IW + ih * IW
- + iw;
-
- size_t widx = g * (IC / G) * (OC / G) * KH * KW +
- ic * (OC / G) * KH * KW +
- + oc * KH * KW + kh * KW
- + kw;
-
- dst_data[didx] += src_data[sidx] * weights_data[widx];
+ for (int g = 0; g < G; ++g) {
+ for (int mb = 0; mb < MB; ++mb) {
+ for (int oc = 0; oc < OC / G; ++oc) {
+ for (int od = 0; od < OD; ++od) {
+ for (int oh = 0; oh < OH; ++oh) {
+ for (int ow = 0; ow < OW; ++ow) {
+ size_t didx = mb * CS3
+ + (g * OC / G + oc) * CS2
+ + od * CS1
+ + oh * OW
+ + ow;
+
+ dst_data[didx] = data_t(0);
+ if (prm.with_bias) dst_data[didx] += bias_data[g * OC / G + oc];
+
+ for (int ic = 0; ic < IC / G; ic++) {
+ for (int kd = 0; kd < KD; kd++) {
+ for (int kh = 0; kh < KH; kh++) {
+ for (int kw = 0; kw < KW; kw++) {
+ if (ow + PW < kw || oh + PH < kh || od + PD < kd)
+ continue;
+
+ size_t iw = ow - kw + PW;
+ size_t ih = oh - kh + PH;
+ size_t id = od - kd + PD;
+
+ if (iw % SW != 0 || ih % SH != 0 || id % SD != 0)
+ continue;
+
+ iw /= SW;
+ ih /= SH;
+ id /= SD;
+
+ if (ih < IH && iw < IW && id < ID) {
+ size_t sidx = mb * CI3
+ + (g * IC / G + ic) * CI2
+ + id * CI1
+ + ih * IW
+ + iw;
+
+ size_t widx = g * CK4
+ + ic * CK3
+ + oc * CK2
+ + kd * CK1
+ + kh * KW
+ + kw;
+
+ dst_data[didx] += src_data[sidx] * weights_data[widx];
+ }
+ }
}
}
}
@@ -128,42 +155,32 @@ void ref_deconv(const InferenceEngine::TBlob<data_t> &src, const InferenceEngine
class MKLDNNGraphDeconvolutionalTests: public TestsCommon,
public WithParamInterface<deconv_test_params> {
- std::string model_t = R"V0G0N(
-<Net Name="Deconvolution_Only" version="2" precision="FP32" batch="1">
+ std::string model_t_5D = R"V0G0N(
+<net name="Deconvolution_Only" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="0">
<output>
- <port id="0">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="0">__SRC_DIMS__
</port>
</output>
</layer>
<layer name="deconv1" id="1" type="Deconvolution" precision="FP32">
- <deconvolution stride-x="_SW_" stride-y="_SH_"
- pad-x="_PW_" pad-y="_PH_"
- kernel-x="_KW_" kernel-y="_KH_"
- output="_OC_" group="_GC_"/>
+ <deconvolution _AP_ kernel="_K_"
+ pads_begin="_PB_" pads_end="_PE_"
+ strides="_KS_"
+ output="_OC_" group="_GC_" PrimitivesPriority="_IMPLS_"/>
<weights offset="0" size="_S1_" />
- <biases offset="_OFF2_" size="_S2_" />
+ <biases offset="_S1_" size="_S2_" />
<input>
- <port id="1">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="1">__SRC_DIMS__
</port>
</input>
<output>
<port id="2">
<dim>_IN_</dim>
- <dim>_OC_</dim>
- <dim>_OH_</dim>
- <dim>_OW_</dim>
+ <dim>_OC_</dim>__DST_DIMS__
</port>
</output>
</layer>
@@ -171,38 +188,61 @@ class MKLDNNGraphDeconvolutionalTests: public TestsCommon,
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
</edges>
-</Net>
+</net>
)V0G0N";
protected:
std::string getModel(deconv_test_params p) {
- std::string model = model_t;
-
- REPLACE_WITH_NUM(model, "_IW_", p.in.w);
- REPLACE_WITH_NUM(model, "_IH_", p.in.h);
- REPLACE_WITH_NUM(model, "_IC_", p.in.c);
- REPLACE_WITH_NUM(model, "_IN_", p.in.n);
+ std::string model = model_t_5D;
+ auto dims_size = p.dims.size();
+ std::string s_dims;
+ for (auto& dim : p.dims) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims);
+
+ s_dims = "";
+ int k_len = p.kernel.size();
+ for (size_t i = 2; i < p.dims.size(); i++) {
+ size_t inx = k_len - i + 1;
+ size_t dim = p.strides[inx] * (p.dims[i] - 1) + p.kernel[inx] - 2 * p.pads_begin[inx];
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims);
+ REPLACE_WITH_NUM(model, "_IN_", p.dims[0]);
- REPLACE_WITH_NUM(model, "_KW_", p.krn_w);
- REPLACE_WITH_NUM(model, "_KH_", p.krn_h);
- REPLACE_WITH_NUM(model, "_SW_", p.str_w);
- REPLACE_WITH_NUM(model, "_SH_", p.str_h);
- REPLACE_WITH_NUM(model, "_PW_", p.pad_w);
- REPLACE_WITH_NUM(model, "_PH_", p.pad_h);
+ if (!p.with_bias) REMOVE_LINE(model, "<biases offset=\"_S1_\" size=\"_S2_\" />");
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel);
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides);
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin);
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end);
REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
REPLACE_WITH_NUM(model, "_OC_", p.out_c);
- REPLACE_WITH_NUM(model, "_OH_", p.str_h * (p.in.h - 1) + p.krn_h - 2 * p.pad_h);
- REPLACE_WITH_NUM(model, "_OW_", p.str_w * (p.in.w - 1) + p.krn_w - 2 * p.pad_w);
+ string auto_pad;
+ if (!p.auto_pad.empty()) auto_pad = string("auto_pad=") + string("\"") + p.auto_pad + string("\"");
+ REPLACE_WITH_STR(model, "_AP_", auto_pad);
- size_t w_data_size = (p.krn_w * p.krn_h * p.out_c * (p.in.c / p.grp_c)) * sizeof(float);
+ size_t blob_size = p.out_c * (p.dims[1] / p.grp_c);
+ for (auto k : p.kernel) {
+ blob_size *= k;
+ }
+ size_t w_data_size = blob_size * sizeof(float);
REPLACE_WITH_NUM(model, "_S1_", w_data_size);
- if (!p.with_bias) REMOVE_LINE(model, "<biases offset=\"_OFF2_\" size=\"_S2_\" />");
size_t b_data_size = p.out_c * sizeof(float);
- REPLACE_WITH_NUM(model, "_OFF2_", w_data_size);
REPLACE_WITH_NUM(model, "_S2_", b_data_size);
+ std::string impls;
+ for (const auto& preferType : p.preferTypes) {
+ if (!impls.empty())
+ impls += ",";
+ impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType);
+ }
+ REPLACE_WITH_STR(model, "_IMPLS_", impls);
+
return model;
}
@@ -218,7 +258,11 @@ protected:
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
- InferenceEngine::SizeVector dims_weights = {p.krn_w * p.krn_h * p.out_c * (p.in.c / p.grp_c)};
+ size_t blob_size = p.out_c * (p.dims[1] / p.grp_c);
+ for (auto k : p.kernel) {
+ blob_size *= k;
+ }
+ InferenceEngine::SizeVector dims_weights = { blob_size };
std::vector<InferenceEngine::Blob::Ptr> blob_to_model;
InferenceEngine::Blob::Ptr weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, InferenceEngine::C, dims_weights);
@@ -262,9 +306,18 @@ protected:
}
}
- InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.dims;
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Layout layout = ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src->allocate();
fill_data(src->buffer(), src->size());
@@ -294,7 +347,7 @@ protected:
ref_deconv(*srcPtr, weights, bias, dst_ref, p);
- compare(*output, dst_ref);
+ compare(*output, dst_ref, 0.0002f);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
@@ -307,24 +360,59 @@ TEST_P(MKLDNNGraphDeconvolutionalTests, TestsDeconvolution) {}
INSTANTIATE_TEST_CASE_P(
TestDeconvolution, MKLDNNGraphDeconvolutionalTests,
::testing::Values(
- deconv_test_params{{1, 3, 3, 3}, 3, 3, 1, 1, 0, 0, 2, 1, false, 2, {MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{3, 3, 3, 3}, 4, 3, 1, 1, 0, 0, 2, 1, false, 2, {MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{1, 3, 3, 3}, 4, 3, 1, 2, 0, 0, 2, 1, false, 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{1, 3, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, false, 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{4, 17, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, false, 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
- /*deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 2, false, 3, {MKLDNNPlugin::impl_desc_type::gemm}},*/
- deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 8, false, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
- deconv_test_params{{2, 8, 5, 5}, 8, 8, 4, 4, 1, 1, 8, 8, false, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
- deconv_test_params{{2, 8, 5, 5}, 4, 8, 2, 4, 1, 1, 8, 8, false, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
- deconv_test_params{{1, 3, 3, 3}, 3, 3, 1, 1, 0, 0, 2, 1, true, 2, {MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{3, 3, 3, 3}, 4, 3, 1, 1, 0, 0, 2, 1, true, 2, {MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{1, 3, 3, 3}, 4, 3, 1, 2, 0, 0, 2, 1, true, 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{1, 3, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, true, 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{4, 17, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, true, 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
- /*deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 2, true, 3, {MKLDNNPlugin::impl_desc_type::gemm}},*/
- deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 8, true, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
- deconv_test_params{{2, 8, 5, 5}, 8, 8, 4, 4, 1, 1, 8, 8, true, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
- deconv_test_params{{2, 8, 5, 5}, 4, 8, 2, 4, 1, 1, 8, 8, true, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}
+ /*0*/ deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{3, 3, 3, 3}, {4, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{1, 3, 3, 3}, {4, 3}, {1, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{1, 3, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{4, 17, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
+ deconv_test_params{{2, 8, 5, 5}, {8, 8}, {4, 4}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
+ deconv_test_params{{2, 8, 5, 5}, {4, 8}, {2, 4}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
+ /*8*/ deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{3, 3, 3, 3}, {4, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{1, 3, 3, 3}, {4, 3}, {1, 2}, {0, 0}, {0, 0}, 2, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{1, 3, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{4, 17, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 8, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
+ deconv_test_params{{2, 8, 5, 5}, {8, 8}, {4, 4}, {1, 1}, {0, 0}, 8, 8, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
+ deconv_test_params{{2, 8, 5, 5}, {4, 8}, {2, 4}, {1, 1}, {0, 0}, 8, 8, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
+ deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::ref_any},
+ {MKLDNNPlugin::impl_desc_type::ref_any}},
+ /*17*/ deconv_test_params{{2, 8, 5, 5}, {1, 3}, {1, 1}, {0, 1}, {0, 1}, 8, 8, true, "", 2,
+ {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ deconv_test_params{{1, 6, 6, 5}, {3, 1}, {1, 1}, {1, 0}, {1, 0}, 9, 3, true, "", 2,
+ {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ deconv_test_params{{2, 24, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}},
+ deconv_test_params{{2, 24, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 1, true, "", 3, {MKLDNNPlugin::impl_desc_type::jit}},
+ deconv_test_params{{2, 72, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 72, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}},
+ deconv_test_params{{1, 12, 2, 2}, {4, 4}, {2, 2}, {1, 1}, {1, 1}, 12, 12, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}},
+#ifdef USE_MKL
+ deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 2, false, "", 3, {MKLDNNPlugin::impl_desc_type::gemm}},
+ deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 2, true, "", 3, {MKLDNNPlugin::impl_desc_type::gemm}},
+ deconv_test_params{{1, 6, 6, 5}, {3, 1}, {1, 1}, {1, 0}, {1, 0}, 9, 3, true, "", 2,
+ {MKLDNNPlugin::impl_desc_type::gemm_blas}},
+ deconv_test_params{{1, 64, 12, 12, 2}, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {1, 0, 0}, 32, 1, true, "", 4,
+ {MKLDNNPlugin::impl_desc_type::gemm_blas}},
+ deconv_test_params{{1, 32, 12, 12, 2}, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {1, 0, 0}, 16, 1, true, "", 4,
+ {MKLDNNPlugin::impl_desc_type::gemm_blas} },
+ deconv_test_params{{1, 25, 1, 1, 1}, {4, 4, 4}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 64, 1, true, "valid", 3,
+ {MKLDNNPlugin::impl_desc_type::gemm_blas} },
+ deconv_test_params{{1, 32, 16, 16, 16}, {4, 4, 4}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, 1, 1, true, "same_upper", 3,
+ {MKLDNNPlugin::impl_desc_type::gemm_blas} },
+ deconv_test_params{{1, 64, 12, 12, 2}, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {1, 0, 0}, 32, 1, true, "same_upper", 3,
+ {MKLDNNPlugin::impl_desc_type::gemm_blas} },
+ deconv_test_params{{1, 50, 1, 1, 1}, {4, 4, 4}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 128, 1, true, "", 3,
+ {MKLDNNPlugin::impl_desc_type::gemm_blas}, {MKLDNNPlugin::impl_desc_type::gemm_blas}},
+#endif
+ // 5D
+ deconv_test_params{{1, 2, 8, 5, 5}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 4, 1, true, "", 4,
+ {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any} }
+
+ // Blocked, with biases
+ // TODO support on jit
+// deconv_test_params{{2, 24, 5, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}},
+// deconv_test_params{{2, 24, 5, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 1, true, "", 3, {MKLDNNPlugin::impl_desc_type::jit}},
+// deconv_test_params{{2, 72, 5, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 72, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}}
));
class MKLDNNGraphDynBatchDeconvolutionalTests: public MKLDNNGraphDeconvolutionalTests {
@@ -334,14 +422,18 @@ protected:
TestsCommon::SetUp();
deconv_test_params p = ::testing::WithParamInterface<deconv_test_params>::GetParam();
std::string model = getModel(p);
- size_t MB = p.in.n;
+ size_t MB = p.dims[0];
if (MB < 2)
MB = 2;
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
- InferenceEngine::SizeVector dims_weights = {p.krn_w * p.krn_h * p.out_c * (p.in.c / p.grp_c)};
+
+ size_t blob_size = 1;
+ for (auto k : p.kernel) {
+ blob_size *= k;
+ }
+ InferenceEngine::SizeVector dims_weights = {blob_size * p.out_c * (p.dims[1] / p.grp_c)};
std::vector<InferenceEngine::Blob::Ptr> blob_to_model;
InferenceEngine::Blob::Ptr weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, InferenceEngine::C, dims_weights);
@@ -379,8 +471,18 @@ protected:
graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
graph.CreateGraph(net_reader.getNetwork());
- InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::SizeVector dims_src = p.dims;
+
+ InferenceEngine::Layout layout = ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
if (srcPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
@@ -419,13 +521,13 @@ TEST_P(MKLDNNGraphDynBatchDeconvolutionalTests, TestsDynBatchDeconvolutional) {}
INSTANTIATE_TEST_CASE_P(
TestsDynBatchDeconvolutional, MKLDNNGraphDynBatchDeconvolutionalTests,
::testing::Values(
- deconv_test_params{{1, 3, 3, 3}, 3, 3, 1, 1, 0, 0, 2, 1, false, 5, {MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{3, 3, 3, 3}, 4, 3, 1, 1, 0, 0, 2, 1, false, 5, {MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{1, 3, 3, 3}, 4, 3, 1, 2, 0, 0, 2, 1, false, 4, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{1, 3, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, false, 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{4, 17, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, false, 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
- deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 2, false, 3, {MKLDNNPlugin::impl_desc_type::gemm}},
- deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 8, false, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
- deconv_test_params{{2, 8, 5, 5}, 8, 8, 4, 4, 1, 1, 8, 8, false, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
- deconv_test_params{{2, 8, 5, 5}, 4, 8, 2, 4, 1, 1, 8, 8, false, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}
+ deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 5, {MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{3, 3, 3, 3}, {4, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 5, {MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{1, 3, 3, 3}, {4, 3}, {1, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 4, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{1, 3, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{4, 17, 3, 3}, {4, 3}, {2, 2}, {0, 0}, {0, 0}, 2, 1, false, "", 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
+ deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 2, false, "", 3, {MKLDNNPlugin::impl_desc_type::gemm}},
+ deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
+ deconv_test_params{{2, 8, 5, 5}, {8, 8}, {4, 4}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
+ deconv_test_params{{2, 8, 5, 5}, {4, 8}, {2, 4}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}
));
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp
index 4f07b8b1f..27bd24195 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -19,12 +18,8 @@ using namespace mkldnn;
struct depthwise_test_params {
mkldnn::algorithm alg;
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } in;
+ // Formats: NCHW, NCDHW
+ vector<size_t> dims;
bool isBroadcast;
@@ -39,8 +34,11 @@ struct depthwise_test_params {
template <typename data_t>
void ref_depthwise(const InferenceEngine::TBlob<data_t> &src, const data_t *weights, const size_t weightsSize,
InferenceEngine::TBlob<data_t> &dst, depthwise_test_params prm) {
- size_t IW = src.dims()[3];
- size_t IH = src.dims()[2];
+ auto dims_size = src.dims().size();
+
+ size_t IW = src.dims()[dims_size - 1];
+ size_t IH = src.dims()[dims_size - 2];
+ size_t ID = dims_size == 5 ? src.dims()[2] : 1u;
size_t IC = src.dims()[1];
size_t MB = src.dims()[0];
@@ -50,21 +48,28 @@ void ref_depthwise(const InferenceEngine::TBlob<data_t> &src, const data_t *weig
const data_t *bias_data = weights_data + bias_offset;
data_t *dst_data = dst.data();
- for(int mb = 0; mb < MB; mb++) {
- for(int c = 0; c < IC; c++) {
- for(int h = 0; h < IH; h++) {
- for(int w = 0; w < IW; w++) {
- int idx = mb * IC * IH * IW
- + c * IH * IW
- + h * IW + w;
-
- int widx = prm.isBroadcast ? 0 : c;
- int bidx = prm.isBroadcast ? 0 : c;
-
- if (prm.alg == depthwise_scale_shift)
- dst_data[idx] = src_data[idx] * weights_data[widx] + bias_data[bidx];
- else if (prm.alg == depthwise_prelu)
- dst_data[idx] = src_data[idx] > 0 ? src_data[idx] : src_data[idx]*weights_data[widx];
+ size_t c1 = IH * IW;
+ size_t c2 = IC * c1;
+ size_t c3 = ID * c2;
+ for (int mb = 0; mb < MB; mb++) {
+ size_t m1 = mb * c3;
+ for (int c = 0; c < IC; c++) {
+ size_t m2 = m1 + c * c1;
+ for (int d = 0; d < ID; d++) {
+ size_t m3 = m2 + d * c2;
+ for (int h = 0; h < IH; h++) {
+ size_t m4 = m3 + h * IW;
+ for (int w = 0; w < IW; w++) {
+ int idx = m4 + w;
+
+ int widx = prm.isBroadcast ? 0 : c;
+ int bidx = prm.isBroadcast ? 0 : c;
+
+ if (prm.alg == depthwise_scale_shift)
+ dst_data[idx] = src_data[idx] * weights_data[widx] + bias_data[bidx];
+ else if (prm.alg == depthwise_prelu)
+ dst_data[idx] = src_data[idx] > 0 ? src_data[idx] : src_data[idx]*weights_data[widx];
+ }
}
}
}
@@ -73,7 +78,7 @@ void ref_depthwise(const InferenceEngine::TBlob<data_t> &src, const data_t *weig
class MKLDNNGraphDepthwiseTests: public TestsCommon,
public WithParamInterface<depthwise_test_params> {
- std::string model_t = R"V0G0N(
+ std::string model_t_4D = R"V0G0N(
<Net Name="Lrn_Only" version="2" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="0">
@@ -115,14 +120,72 @@ class MKLDNNGraphDepthwiseTests: public TestsCommon,
</Net>
)V0G0N";
+
+ std::string model_t_5D = R"V0G0N(
+<Net Name="Lrn_Only" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="in1" type="Input" precision="FP32" id="0">
+ <output>
+ <port id="0">
+ <dim>_IN_</dim>
+ <dim>_IC_</dim>
+ <dim>_ID_</dim>
+ <dim>_IH_</dim>
+ <dim>_IW_</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="depthwise" id="1" type="_LT_" precision="FP32">
+ <data _P_NAME_="_P_VAL_" PrimitivesPriority="_IMPLS_"/>
+ <weights offset="0" size="_S1_" />
+ <biases offset="_S1_" size="_S2_" />
+
+ <input>
+ <port id="1">
+ <dim>_IN_</dim>
+ <dim>_IC_</dim>
+ <dim>_ID_</dim>
+ <dim>_IH_</dim>
+ <dim>_IW_</dim>
+ </port>
+ </input>
+ <output>
+ <port id="2">
+ <dim>_IN_</dim>
+ <dim>_IC_</dim>
+ <dim>_ID_</dim>
+ <dim>_IH_</dim>
+ <dim>_IW_</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+ </edges>
+</Net>
+)V0G0N";
+
protected:
std::string getModel(depthwise_test_params p) {
- std::string model = model_t;
+ std::string model;
+ auto dims_size = p.dims.size();
+ if (dims_size == 4) {
+ model = model_t_4D;
+ } else if (dims_size == 5) {
+ model = model_t_5D;
+ }
+
+ REPLACE_WITH_NUM(model, "_IW_", p.dims[dims_size - 1]);
+ REPLACE_WITH_NUM(model, "_IC_", p.dims[1]);
+ REPLACE_WITH_NUM(model, "_IN_", p.dims[0]);
- REPLACE_WITH_NUM(model, "_IW_", p.in.w);
- REPLACE_WITH_NUM(model, "_IH_", p.in.h);
- REPLACE_WITH_NUM(model, "_IC_", p.in.c);
- REPLACE_WITH_NUM(model, "_IN_", p.in.n);
+ switch (dims_size) {
+ case 5:
+ REPLACE_WITH_NUM(model, "_ID_", p.dims[dims_size - 3]);
+ case 4:
+ REPLACE_WITH_NUM(model, "_IH_", p.dims[dims_size - 2]);
+ }
if (p.alg == depthwise_scale_shift) {
REPLACE_WITH_STR(model, "_LT_", "ScaleShift");
@@ -135,7 +198,7 @@ protected:
REPLACE_WITH_NUM(model, "_P_VAL_", p.isBroadcast ? 1 : 0);
}
- size_t array_size = p.isBroadcast ? 1 : p.in.c;
+ size_t array_size = p.isBroadcast ? 1 : p.dims[1];
size_t w_data_size = array_size * sizeof(float);
size_t b_data_size = array_size * sizeof(float);
REPLACE_WITH_NUM(model, "_S1_", w_data_size);
@@ -161,7 +224,7 @@ protected:
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
- size_t weightSize = 2*p.in.c*sizeof(float);
+ size_t weightSize = 2 * p.dims[1] * sizeof(float);
InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {weightSize});
weights->allocate();
fill_data( weights->data().as<float*>(), weights->size() / sizeof(float));
@@ -185,9 +248,18 @@ protected:
}
}
- InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src->allocate();
fill_data(src->buffer(), src->size());
@@ -252,7 +324,17 @@ INSTANTIATE_TEST_CASE_P(
depthwise_test_params{depthwise_prelu, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
depthwise_test_params{depthwise_prelu, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
depthwise_test_params{depthwise_prelu, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
- depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}
+ depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ // 5D
+ // mkl-dnn does not support 5D depthwise on jit yet
+// depthwise_test_params{depthwise_scale_shift, {1, 32, 16, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
+// depthwise_test_params{depthwise_scale_shift, {4, 3, 16, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
+// depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
+// depthwise_test_params{depthwise_scale_shift, {4, 4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::jit},
+// depthwise_test_params{depthwise_scale_shift, {1, 32, 16, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+// depthwise_test_params{depthwise_scale_shift, {4, 3, 16, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ depthwise_test_params{depthwise_scale_shift, {4, 4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}
));
class MKLDNNGraphDynBatchDepthwiseTests: public MKLDNNGraphDepthwiseTests {
@@ -263,14 +345,14 @@ protected:
TestsCommon::SetUp();
depthwise_test_params p = ::testing::WithParamInterface<depthwise_test_params>::GetParam();
std::string model = getModel(p);
- size_t MB = p.in.n;
+ size_t MB = p.dims[0];
if (MB < 2)
MB = 2;
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
- InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {p.in.c * 4 * sizeof(float)});
+ InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {p.dims[1] * 4 * sizeof(float)});
weights->allocate();
fill_data( weights->data().as<float*>(), weights->size() / sizeof(float));
float * data = weights->buffer();
@@ -293,8 +375,17 @@ protected:
graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
graph.CreateGraph(net_reader.getNetwork());
- InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::SizeVector dims_src = p.dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
if (srcPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp
index ebb2df41c..e1d288db2 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -19,12 +18,8 @@ using namespace std;
using namespace mkldnn;
struct eltwise_test_params {
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } in;
+ // Formats: NCHW, NCDHW
+ vector<size_t> dims;
enum opType {
Sum = 0, Prod = 1, Max = 2
@@ -100,66 +95,38 @@ void ref_eltwise(const std::vector<InferenceEngine::TBlob<data_t>> &src, Inferen
class MKLDNNGraphEltwiseTests: public TestsCommon,
public WithParamInterface<eltwise_test_params> {
std::string model_t = R"V0G0N(
-<net name="EltwiseOnly" version="2" precision="FP32" batch="1">
+<net name="EltwiseOnly" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="1">
<output>
- <port id="1">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="1">__SRC_DIMS__
</port>
</output>
</layer>
<layer name="in2" type="Input" precision="FP32" id="2">
<output>
- <port id="2">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="2">__SRC_DIMS__
</port>
</output>
</layer>
<layer name="in3" type="Input" precision="FP32" id="3">
<output>
- <port id="3">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="3">__SRC_DIMS__
</port>
</output>
</layer>
<layer name="con" id="4" type="Eltwise" precision="FP32">
- <elementwise_data operation="_OP_" coeff="_COEFF_"/>
+ <data operation="_OP_" _COEFF_/>
<input>
- <port id="1">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="1">__SRC_DIMS__
</port>
- <port id="2">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="2">__SRC_DIMS__
</port>
- <port id="3">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="3">__SRC_DIMS__
</port>
</input>
<output>
- <port id="4">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="4">__SRC_DIMS__
</port>
</output>
</layer>
@@ -185,12 +152,19 @@ protected:
op = "max";
}
- REPLACE_WITH_NUM(model, "_IW_", p.in.w);
- REPLACE_WITH_NUM(model, "_IH_", p.in.h);
- REPLACE_WITH_NUM(model, "_IC_", p.in.c);
- REPLACE_WITH_NUM(model, "_IN_", p.in.n);
+ std::string src_dims;
+ for (auto& dim : p.dims) {
+ src_dims += "\n <dim>";
+ src_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS__", src_dims);
+
+ std::string scale;
+ if (!p.scales.empty()) {
+ scale = std::string("coeff=\"") + p.scales + std::string("\"");
+ }
REPLACE_WITH_STR(model, "_OP_", op);
- REPLACE_WITH_STR(model, "_COEFF_", p.scales);
+ REPLACE_WITH_STR(model, "_COEFF_", scale);
return model;
}
@@ -221,9 +195,18 @@ protected:
}
}
- InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src1->allocate();
InferenceEngine::TBlob<float>* srcPtr1 = dynamic_cast<InferenceEngine::TBlob<float>*>(src1.get());
@@ -232,7 +215,7 @@ protected:
FAIL() << "Cannot cast blob to TBlob<float>.";
fill_data(src1->buffer(), src1->size());
- InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src2->allocate();
InferenceEngine::TBlob<float>* srcPtr2 = dynamic_cast<InferenceEngine::TBlob<float>*>(src2.get());
@@ -240,7 +223,7 @@ protected:
if (srcPtr2 == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
fill_data(src2->buffer(), src2->size());
- InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src3->allocate();
InferenceEngine::TBlob<float>* srcPtr3 = dynamic_cast<InferenceEngine::TBlob<float>*>(src3.get());
@@ -273,7 +256,7 @@ protected:
ref_eltwise(src_vec, dst_ref, p);
- compare(*output, dst_ref);
+ compare(*output, dst_ref, 0.0005f);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
@@ -338,6 +321,17 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
}
+ } },
+ eltwise_test_params{{1, 32, 16, 16, 16}, eltwise_test_params::opType::Sum, "", 3, MKLDNNPlugin::impl_desc_type::ref, {
+ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
+ ASSERT_EQ(3, impl.getConfig().inConfs.size());
+ ASSERT_EQ(1, impl.getConfig().outConfs.size());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().inConfs.at(1).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().inConfs.at(2).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+ }
} }
));
@@ -348,7 +342,7 @@ protected:
TestsCommon::SetUp();
eltwise_test_params p = ::testing::WithParamInterface<eltwise_test_params>::GetParam();
std::string model = getModel(p);
- size_t MB = p.in.n;
+ size_t MB = p.dims[0];
if (MB < 2)
MB = 2;
@@ -365,9 +359,18 @@ protected:
graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
graph.CreateGraph(net_reader.getNetwork());
- InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src1->allocate();
InferenceEngine::TBlob<float>* srcPtr1 = dynamic_cast<InferenceEngine::TBlob<float>*>(src1.get());
@@ -376,7 +379,7 @@ protected:
FAIL() << "Cannot cast blob to TBlob<float>.";
fill_data(src1->buffer(), src1->size());
- InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src2->allocate();
InferenceEngine::TBlob<float>* srcPtr2 = dynamic_cast<InferenceEngine::TBlob<float>*>(src2.get());
@@ -384,7 +387,7 @@ protected:
if (srcPtr2 == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
fill_data(src2->buffer(), src2->size());
- InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src3->allocate();
InferenceEngine::TBlob<float>* srcPtr3 = dynamic_cast<InferenceEngine::TBlob<float>*>(src3.get());
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp
index a4ece624b..dcf001f33 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -20,12 +19,8 @@ using namespace std;
using namespace mkldnn;
struct fc_test_params {
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } in;
+ // Formats: NCHW, NCDHW
+ vector<size_t> in_dims;
size_t out_c;
@@ -41,32 +36,44 @@ struct fc_test_params {
template <typename data_t>
void ref_innerproduct(const InferenceEngine::TBlob<data_t> &src, const data_t *weights, const size_t weightsSize,
InferenceEngine::TBlob<data_t> &dst, fc_test_params prm) {
- size_t IW = src.dims()[3];
- size_t IH = src.dims()[2];
- size_t IC = src.dims()[1];
+ auto dims_size = src.dims().size();
+
size_t IB = src.dims()[0];
+ size_t IC = src.dims()[1];
+ size_t ID = dims_size == 5 ? src.dims()[dims_size - 3] : 1u;
+ size_t IH = src.dims()[dims_size - 2];
+ size_t IW = src.dims()[dims_size - 1];
size_t OC = prm.out_c;
const data_t *src_data = src.readOnly();
const data_t *weights_data = weights;
- const data_t *bias_data = weights_data + IW*IH*IC*OC;
+ const data_t *bias_data = weights_data + IW*IH*ID*IC*OC;
data_t *dst_data = dst.data();
- IE_ASSERT( IW*IH*IC*OC + OC == weightsSize);
- IE_ASSERT( OC == dst.dims()[0]);
+ IE_ASSERT( IW*IH*ID*IC*OC + OC == weightsSize );
+ IE_ASSERT( OC == dst.dims()[0] );
for (size_t n = 0; n < IB; n++) {
for (size_t oc = 0; oc < OC; oc++) {
dst_data[n*OC + oc] = bias_data[oc];
for (size_t ic = 0; ic < IC; ic++) {
- for (size_t kh = 0; kh < IH; kh++) {
- for (size_t kw = 0; kw < IW; kw++) {
- size_t iidx = n * IC * IH * IW + ic * IH * IW + kh * IW + kw;
- size_t widx = oc * IC * IH * IW
- + ic * IH * IW + kh * IW + kw;
-
- dst_data[n*OC + oc] += src_data[iidx] * weights_data[widx];
+ for (size_t kd = 0; kd < ID; kd++) {
+ for (size_t kh = 0; kh < IH; kh++) {
+ for (size_t kw = 0; kw < IW; kw++) {
+ size_t iidx = n * IC * ID * IH * IW
+ + ic * ID * IH * IW
+ + kd * IH * IW
+ + kh * IW
+ + kw;
+ size_t widx = oc * IC * ID * IH * IW
+ + ic * ID * IH * IW
+ + kd * IH * IW
+ + kh * IW
+ + kw;
+
+ dst_data[n*OC + oc] += src_data[iidx] * weights_data[widx];
+ }
}
}
}
@@ -77,15 +84,11 @@ void ref_innerproduct(const InferenceEngine::TBlob<data_t> &src, const data_t *w
class MKLDNNGraphFullyConnectedTests: public TestsCommon,
public WithParamInterface<fc_test_params> {
std::string model_t = R"V0G0N(
-<Net Name="FullyConnected_Only" version="2" precision="FP32" batch="1">
+<Net Name="FullyConnected_Only" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="0">
<output>
- <port id="0">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="0">__SRC_DIMS__
</port>
</output>
</layer>
@@ -96,11 +99,7 @@ class MKLDNNGraphFullyConnectedTests: public TestsCommon,
<biases offset="_S1_" size="_S2_" />
<input>
- <port id="1">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ <port id="1">__SRC_DIMS__
</port>
</input>
<output>
@@ -120,14 +119,19 @@ class MKLDNNGraphFullyConnectedTests: public TestsCommon,
protected:
std::string getModel(fc_test_params p) {
std::string model = model_t;
- REPLACE_WITH_NUM(model, "_IW_", p.in.w);
- REPLACE_WITH_NUM(model, "_IH_", p.in.h);
- REPLACE_WITH_NUM(model, "_IC_", p.in.c);
- REPLACE_WITH_NUM(model, "_IN_", p.in.n);
+ std::string s_dims;
+ for (auto& dim : p.in_dims) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims);
+ REPLACE_WITH_NUM(model, "_IN_", p.in_dims[0]);
REPLACE_WITH_NUM(model, "_OC_", p.out_c);
- size_t w_data_size = (p.in.w * p.in.h * p.in.c * p.out_c )* sizeof(float);
+ size_t w_data_size = p.out_c * sizeof(float);
+ for (int i = 1; i < p.in_dims.size(); i++)
+ w_data_size *= p.in_dims[i];
size_t b_data_size = p.out_c * sizeof(float);
REPLACE_WITH_NUM(model, "_S1_", w_data_size);
REPLACE_WITH_NUM(model, "_S2_", b_data_size);
@@ -153,7 +157,12 @@ protected:
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
- InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {(p.in.w * p.in.h * p.in.c * p.out_c + p.out_c) * sizeof(float)});
+ size_t weights_size = p.out_c;
+ for (int i = 1; i < p.in_dims.size(); i++) {
+ weights_size *= p.in_dims[i];
+ }
+ weights_size = (weights_size + p.out_c) * sizeof(float);
+ InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {weights_size});
weights->allocate();
fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
@@ -174,9 +183,18 @@ protected:
}
}
- InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.in_dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.in_dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src->allocate();
fill_data(src->buffer(), src->size());
@@ -224,7 +242,10 @@ INSTANTIATE_TEST_CASE_P(
fc_test_params{{1, 4, 227, 227}, 10, 6, MKLDNNPlugin::impl_desc_type::gemm },
fc_test_params{{1, 3, 227, 227}, 96, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
fc_test_params{{1, 4, 227, 227}, 8, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
- fc_test_params{{1, 4, 227, 227}, 10, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
+ fc_test_params{{1, 4, 227, 227}, 10, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ //5D
+ fc_test_params{{1, 4, 32, 32, 32}, 10, 6, MKLDNNPlugin::impl_desc_type::gemm },
+ fc_test_params{{1, 3, 32, 32, 32}, 96, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
class MKLDNNGraphDynBatchFullyConnectedTests: public MKLDNNGraphFullyConnectedTests {
virtual void SetUp() {
@@ -232,14 +253,19 @@ class MKLDNNGraphDynBatchFullyConnectedTests: public MKLDNNGraphFullyConnectedTe
TestsCommon::SetUp();
fc_test_params p = ::testing::WithParamInterface<fc_test_params>::GetParam();
std::string model = getModel(p);
- size_t MB = p.in.n;
+ size_t MB = p.in_dims[0];
if (MB < 2)
MB = 2;
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
- InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {(p.in.w * p.in.h * p.in.c * p.out_c + p.out_c) * sizeof(float)});
+ size_t weights_size = p.out_c;
+ for (int i = 1; i < p.in_dims.size(); i++) {
+ weights_size *= p.in_dims[i];
+ }
+ weights_size = (weights_size + p.out_c) * sizeof(float);
+ InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {weights_size});
weights->allocate();
fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
@@ -255,9 +281,18 @@ class MKLDNNGraphDynBatchFullyConnectedTests: public MKLDNNGraphFullyConnectedTe
graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
graph.CreateGraph(net_reader.getNetwork());
- InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.in_dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.in_dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src->allocate();
fill_data(src->buffer(), src->size());
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_gemm_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_gemm_test.cpp
new file mode 100644
index 000000000..8a2acf042
--- /dev/null
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_gemm_test.cpp
@@ -0,0 +1,627 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <gmock/gmock-spec-builders.h>
+#include "mkldnn_plugin/mkldnn_graph.h"
+
+#include "test_graph.hpp"
+
+#include "single_layer_common.hpp"
+#include <mkldnn_plugin/mkldnn_extension_utils.h>
+#include <inference_engine/cnn_network_impl.hpp>
+#include "tests_common.hpp"
+
+using namespace ::testing;
+using namespace std;
+using namespace mkldnn;
+
+struct gemm_test_params {
+ struct {
+ size_t MB1_A;
+ size_t MB2_A;
+ size_t MB1_B;
+ size_t MB2_B;
+ size_t MB1_C;
+ size_t MB2_C;
+ size_t MB1_D;
+ size_t MB2_D;
+ } batches;
+
+ size_t M;
+ size_t N;
+ size_t K;
+
+ float alpha;
+ float beta;
+
+ bool transposeA;
+ bool transposeB;
+
+ size_t num_prim_desc;
+
+ MKLDNNPlugin::impl_desc_type selectedType;
+
+ std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
+};
+
+template<typename data_t>
+void ref_gemm(const std::vector<InferenceEngine::TBlob<data_t>> &src, InferenceEngine::TBlob<data_t> &dst,
+ gemm_test_params prm) {
+ const data_t *src0_data = src[0].readOnly();
+ const data_t *src1_data = src[1].readOnly();
+ const data_t *src2_data = src.size() == 3 ? src[2].readOnly() : dst.readOnly();
+ data_t *dst_data = dst.data();
+
+ size_t MB1 = prm.batches.MB1_D;
+ size_t MB2 = prm.batches.MB2_D;
+ size_t M = prm.M;
+ size_t N = prm.N;
+ size_t K = prm.K;
+
+ for (int mb1 = 0; mb1 < MB1; mb1++) {
+ const data_t *a_data = src0_data;
+ const data_t *b_data = src1_data;
+ const data_t *c_data = src2_data;
+ data_t *d_data = dst_data;
+
+ for (int mb2 = 0; mb2 < MB2; mb2++) {
+ for (int i = 0; i < M; i++) {
+ for (int j = 0; j < N; j++) {
+ d_data[i * N + j] = src.size() == 3 ? prm.beta * c_data[i * N + j] : 0;
+
+ for (int k = 0; k < K; k++) {
+ size_t src0_off = prm.transposeA ? k * M + i : i * K + k;
+ size_t src1_off = prm.transposeB ? j * K + k : k * N + j;
+ d_data[i * N + j] += prm.alpha * a_data[src0_off] * b_data[src1_off];
+ }
+ }
+ }
+ a_data += prm.batches.MB2_A == MB2 ? M*K : 0;
+ b_data += prm.batches.MB2_B == MB2 ? K*N : 0;
+ c_data += prm.batches.MB2_C == MB2 ? M*N : 0;
+ d_data += M*N;
+ }
+
+ src0_data += prm.batches.MB1_A == MB1 ? prm.batches.MB2_A*M*K : 0;
+ src1_data += prm.batches.MB1_B == MB1 ? prm.batches.MB2_B*K*N : 0;
+ src2_data += prm.batches.MB1_C == MB1 ? prm.batches.MB2_C*M*N : 0;
+ dst_data += prm.batches.MB2_D*M*N;
+ }
+}
+
+class MKLDNNGraphGemmTests: public TestsCommon,
+ public WithParamInterface<gemm_test_params> {
+ std::string model_t = R"V0G0N(
+<net name="gemmOnly" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="in1" type="Input" precision="FP32" id="1">
+ <output>
+ <port id="1">
+ <dim>_MB1_A_</dim>
+ <dim>_MB2_A_</dim>
+ <dim>_M_</dim>
+ <dim>_K_</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="in2" type="Input" precision="FP32" id="2">
+ <output>
+ <port id="1">
+ <dim>_MB1_B_</dim>
+ <dim>_MB2_B_</dim>
+ <dim>_K_</dim>
+ <dim>_N_</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="in3" type="Input" precision="FP32" id="3">
+ <output>
+ <port id="1">
+ <dim>_MB1_C_</dim>
+ <dim>_MB2_C_</dim>
+ <dim>_M_</dim>
+ <dim>_N_</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="gemm" id="4" type="GEMM" precision="FP32">
+ <data alpha="_A_" beta="_B_" transpose_a="_TA_" transpose_b="_TB_"/>
+ <input>
+ <port id="1">
+ <dim>_MB1_A_</dim>
+ <dim>_MB2_A_</dim>
+ <dim>_M_</dim>
+ <dim>_K_</dim>
+ </port>
+ <port id="2">
+ <dim>_MB1_B_</dim>
+ <dim>_MB2_B_</dim>
+ <dim>_K_</dim>
+ <dim>_N_</dim>
+ </port>
+ <port id="3">
+ <dim>_MB1_C_</dim>
+ <dim>_MB2_C_</dim>
+ <dim>_M_</dim>
+ <dim>_N_</dim>
+ </port>
+ </input>
+ <output>
+ <port id="4">
+ <dim>_MB1_D_</dim>
+ <dim>_MB2_D_</dim>
+ <dim>_M_</dim>
+ <dim>_N_</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="1" from-port="1" to-layer="4" to-port="1"/>
+ <edge from-layer="2" from-port="1" to-layer="4" to-port="2"/>
+ <edge from-layer="3" from-port="1" to-layer="4" to-port="3"/>
+ </edges>
+</net>
+)V0G0N";
+
+protected:
+ std::string getModel(gemm_test_params p) {
+ std::string model = model_t;
+ std::string op;
+
+ REPLACE_WITH_NUM(model, "_MB1_A_", p.batches.MB1_A);
+ REPLACE_WITH_NUM(model, "_MB2_A_", p.batches.MB2_A);
+ REPLACE_WITH_NUM(model, "_MB1_B_", p.batches.MB1_B);
+ REPLACE_WITH_NUM(model, "_MB2_B_", p.batches.MB2_B);
+ REPLACE_WITH_NUM(model, "_MB1_C_", p.batches.MB1_C);
+ REPLACE_WITH_NUM(model, "_MB2_C_", p.batches.MB2_C);
+ REPLACE_WITH_NUM(model, "_MB1_D_", p.batches.MB1_D);
+ REPLACE_WITH_NUM(model, "_MB2_D_", p.batches.MB2_D);
+
+ REPLACE_WITH_NUM(model, "_M_", p.M);
+ REPLACE_WITH_NUM(model, "_N_", p.N);
+ REPLACE_WITH_NUM(model, "_K_", p.K);
+
+ REPLACE_WITH_NUM(model, "_A_", p.alpha);
+ REPLACE_WITH_NUM(model, "_B_", p.beta);
+ REPLACE_WITH_NUM(model, "_TA_", p.transposeA);
+ REPLACE_WITH_NUM(model, "_TB_", p.transposeB);
+
+ return model;
+ }
+
+ virtual void TearDown() {
+ }
+
+ virtual void SetUp() {
+ try {
+ TestsCommon::SetUp();
+ gemm_test_params p = ::testing::WithParamInterface<gemm_test_params>::GetParam();
+ std::string model = getModel(p);
+
+ InferenceEngine::CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+
+ MKLDNNGraphTestClass graph;
+ graph.CreateGraph(net_reader.getNetwork());
+
+ auto& nodes = graph.getNodes();
+ for (int i = 0; i < nodes.size(); i++) {
+ if (nodes[i]->getType() == MKLDNNPlugin::Gemm) {
+ ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size());
+ for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
+ p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j));
+ }
+ ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor());
+ ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType());
+ }
+ }
+
+ InferenceEngine::SizeVector dims_src1 = {p.batches.MB1_A, p.batches.MB2_A, p.M, p.K};
+ InferenceEngine::SizeVector dims_src2 = {p.batches.MB1_B, p.batches.MB2_B, p.K, p.N};
+ InferenceEngine::SizeVector dims_src3 = {p.batches.MB1_C, p.batches.MB2_C, p.M, p.N};
+ InferenceEngine::SizeVector dims_dst = {p.batches.MB1_D, p.batches.MB2_D, p.M, p.N};
+
+ InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src1);
+ src1->allocate();
+ InferenceEngine::TBlob<float>* srcPtr1 = dynamic_cast<InferenceEngine::TBlob<float>*>(src1.get());
+ if (srcPtr1 == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+ fill_data(src1->buffer(), src1->size());
+
+ InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src2);
+ src2->allocate();
+ InferenceEngine::TBlob<float>* srcPtr2 = dynamic_cast<InferenceEngine::TBlob<float>*>(src2.get());
+ if (srcPtr2 == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+ fill_data(src2->buffer(), src2->size());
+
+ InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src3);
+ src3->allocate();
+ InferenceEngine::TBlob<float>* srcPtr3 = dynamic_cast<InferenceEngine::TBlob<float>*>(src3.get());
+ if (srcPtr3 == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+ fill_data(src3->buffer(), src3->size());
+
+ InferenceEngine::BlobMap srcs;
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src1));
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in3", src3));
+
+ InferenceEngine::OutputsDataMap out;
+ out = net_reader.getNetwork().getOutputsInfo();
+ InferenceEngine::BlobMap outputBlobs;
+
+ std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+
+ InferenceEngine::TBlob<float>::Ptr output;
+ output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+ output->allocate();
+ outputBlobs[item.first] = output;
+
+ graph.Infer(srcs, outputBlobs);
+
+ InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+ dst_ref.allocate();
+
+ std::vector<InferenceEngine::TBlob<float>> src_vec = {*srcPtr1, *srcPtr2, *srcPtr3};
+
+ ref_gemm(src_vec, dst_ref, p);
+
+ compare(*output, dst_ref);
+ } catch (const InferenceEngine::details::InferenceEngineException &e) {
+ FAIL() << e.what();
+ }
+ }
+};
+
+TEST_P(MKLDNNGraphGemmTests, TestsGemm) {}
+
+INSTANTIATE_TEST_CASE_P(
+ TestsGemm, MKLDNNGraphGemmTests,
+ ::testing::Values(
+ gemm_test_params{{2, 1, 2, 1, 2, 1, 2, 1}, 3, 3, 2, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, {
+ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType());
+ ASSERT_EQ(3, impl.getConfig().inConfs.size());
+ ASSERT_EQ(1, impl.getConfig().outConfs.size());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+ }
+ } },
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 8, 5, 4, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, {
+ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType());
+ ASSERT_EQ(3, impl.getConfig().inConfs.size());
+ ASSERT_EQ(1, impl.getConfig().outConfs.size());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+ }
+ } },
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 16, 10, 12, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, {
+ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType());
+ ASSERT_EQ(3, impl.getConfig().inConfs.size());
+ ASSERT_EQ(1, impl.getConfig().outConfs.size());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+ }
+ } },
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 11, 10, 20, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, {
+ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType());
+ ASSERT_EQ(3, impl.getConfig().inConfs.size());
+ ASSERT_EQ(1, impl.getConfig().outConfs.size());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+ }
+ } },
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 5, 13, 2, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, {
+ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType());
+ ASSERT_EQ(3, impl.getConfig().inConfs.size());
+ ASSERT_EQ(1, impl.getConfig().outConfs.size());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+ }
+ } },
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 5, 15, 10, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any, {
+ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ ASSERT_EQ(MKLDNNPlugin::impl_desc_type::gemm_any, impl.getImplementationType());
+ ASSERT_EQ(3, impl.getConfig().inConfs.size());
+ ASSERT_EQ(1, impl.getConfig().outConfs.size());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(1).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(2).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+ }
+ } },
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 5, 6, 7, 2, 0, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 5, 6, 7, 0, 2, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 3, 7, 4, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 3, 4, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 3, 7, 4, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 3, 4, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 3, 7, 4, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 3, 4, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{3, 2, 3, 2, 3, 2, 3, 2}, 7, 4, 3, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 2, 3, 2, 3, 2, 3}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 2, 3, 1, 3, 2, 3}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{2, 3, 1, 3, 1, 3, 2, 3}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{5, 3, 5, 1, 5, 3, 5, 3}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{5, 3, 5, 1, 5, 1, 5, 3}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{5, 1, 5, 1, 5, 3, 5, 3}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 1, 5, 3, 5, 3, 5, 3}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 1, 1, 1, 5, 3, 5, 3}, 7, 4, 3, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{5, 4, 1, 1, 1, 1, 5, 4}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}
+ ));
+
+class MKLDNNGraphDynBatchGemmTests: public MKLDNNGraphGemmTests {
+protected:
+ virtual void SetUp() {
+ try {
+ TestsCommon::SetUp();
+ gemm_test_params p = ::testing::WithParamInterface<gemm_test_params>::GetParam();
+ std::string model = getModel(p);
+ size_t MB = p.batches.MB1_D;
+ if (MB < 2)
+ MB = 2;
+
+ InferenceEngine::CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+ InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+ auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
+ ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
+ InferenceEngine::ResponseDesc resp;
+ InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp);
+ ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg;
+
+ MKLDNNGraphTestClass graph;
+ graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
+ graph.CreateGraph(net_reader.getNetwork());
+
+ InferenceEngine::SizeVector dims_src1 = {MB, p.batches.MB2_A, p.M, p.K};
+ InferenceEngine::SizeVector dims_src2 = {MB, p.batches.MB2_B, p.K, p.N};
+ InferenceEngine::SizeVector dims_src3 = {MB, p.batches.MB2_C, p.M, p.N};
+ InferenceEngine::SizeVector dims_dst = {MB, p.batches.MB2_D, p.M, p.N};
+
+ InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src1);
+ src1->allocate();
+ InferenceEngine::TBlob<float>* srcPtr1 = dynamic_cast<InferenceEngine::TBlob<float>*>(src1.get());
+ if (srcPtr1 == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+ fill_data(src1->buffer(), src1->size());
+
+ InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src2);
+ src2->allocate();
+ InferenceEngine::TBlob<float>* srcPtr2 = dynamic_cast<InferenceEngine::TBlob<float>*>(src2.get());
+ if (srcPtr2 == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+ fill_data(src2->buffer(), src2->size());
+
+ InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src3);
+ src3->allocate();
+ InferenceEngine::TBlob<float>* srcPtr3 = dynamic_cast<InferenceEngine::TBlob<float>*>(src3.get());
+ if (srcPtr3 == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+ fill_data(src3->buffer(), src3->size());
+
+ InferenceEngine::BlobMap srcs;
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src1));
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in3", src3));
+
+ InferenceEngine::OutputsDataMap out;
+ out = net_reader.getNetwork().getOutputsInfo();
+ InferenceEngine::BlobMap outputBlobs;
+
+ std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+
+ InferenceEngine::TBlob<float>::Ptr output;
+ output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+ output->allocate();
+ outputBlobs[item.first] = output;
+
+ auto check = [](const MKLDNNPlugin::MKLDNNNodePtr& node) {
+ return node->getType() == MKLDNNPlugin::Gemm;
+ };
+
+ graph.checkDynBatch(srcs, outputBlobs, MB, MB, check);
+ graph.checkDynBatch(srcs, outputBlobs, 1, MB, check);
+ } catch (const InferenceEngine::details::InferenceEngineException &e) {
+ FAIL() << e.what();
+ }
+ }
+};
+
+TEST_P(MKLDNNGraphDynBatchGemmTests, TestsDynBatchGemm) {}
+
+INSTANTIATE_TEST_CASE_P(
+ TestsDynBatchGemm, MKLDNNGraphDynBatchGemmTests,
+ ::testing::Values(
+ gemm_test_params{{1, 3, 1, 3, 1, 3, 1, 3}, 3, 3, 3, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 1, 1, 1, 3, 1, 3}, 16, 15, 12, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}
+));
+
+class MKLDNNGraphSingleBatchDimGemmTests: public TestsCommon,
+ public WithParamInterface<gemm_test_params> {
+ std::string model_t = R"V0G0N(
+<net name="gemmOnly" version="2" precision="FP32" batch="1">
+ <layers>
+ <layer name="in1" type="Input" precision="FP32" id="1">
+ <output>
+ <port id="1">
+ <dim>_MB_A_</dim>
+ <dim>_M_</dim>
+ <dim>_K_</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="in2" type="Input" precision="FP32" id="2">
+ <output>
+ <port id="1">
+ <dim>_MB_B_</dim>
+ <dim>_K_</dim>
+ <dim>_N_</dim>
+ </port>
+ </output>
+ </layer>
+ <layer name="gemm" id="3" type="GEMM" precision="FP32">
+ <data alpha="_A_" beta="_B_" transpose_a="_TA_" transpose_b="_TB_"/>
+ <input>
+ <port id="1">
+ <dim>_MB_A_</dim>
+ <dim>_M_</dim>
+ <dim>_K_</dim>
+ </port>
+ <port id="2">
+ <dim>_MB_B_</dim>
+ <dim>_K_</dim>
+ <dim>_N_</dim>
+ </port>
+ </input>
+ <output>
+ <port id="3">
+ <dim>_MB_D_</dim>
+ <dim>_M_</dim>
+ <dim>_N_</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
+ <edge from-layer="2" from-port="1" to-layer="3" to-port="2"/>
+ </edges>
+</net>
+)V0G0N";
+
+protected:
+ std::string getModel(gemm_test_params p) {
+ std::string model = model_t;
+ std::string op;
+
+ REPLACE_WITH_NUM(model, "_MB_A_", p.batches.MB2_A);
+ REPLACE_WITH_NUM(model, "_MB_B_", p.batches.MB2_B);
+ REPLACE_WITH_NUM(model, "_MB_D_", p.batches.MB2_D);
+
+ REPLACE_WITH_NUM(model, "_M_", p.M);
+ REPLACE_WITH_NUM(model, "_N_", p.N);
+ REPLACE_WITH_NUM(model, "_K_", p.K);
+
+ REPLACE_WITH_NUM(model, "_A_", p.alpha);
+ REPLACE_WITH_NUM(model, "_B_", p.beta);
+ REPLACE_WITH_NUM(model, "_TA_", p.transposeA);
+ REPLACE_WITH_NUM(model, "_TB_", p.transposeB);
+
+ return model;
+ }
+
+ virtual void TearDown() {
+ }
+
+ virtual void SetUp() {
+ try {
+ TestsCommon::SetUp();
+ gemm_test_params p = ::testing::WithParamInterface<gemm_test_params>::GetParam();
+ std::string model = getModel(p);
+
+ InferenceEngine::CNNNetReader net_reader;
+ ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+
+ MKLDNNGraphTestClass graph;
+ graph.CreateGraph(net_reader.getNetwork());
+
+ auto& nodes = graph.getNodes();
+ for (int i = 0; i < nodes.size(); i++) {
+ if (nodes[i]->getType() == MKLDNNPlugin::Gemm) {
+ ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size());
+ for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
+ p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j));
+ }
+ ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor());
+ ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType());
+ }
+ }
+
+ InferenceEngine::SizeVector dims_src1 = {p.batches.MB2_A, p.M, p.K};
+ InferenceEngine::SizeVector dims_src2 = {p.batches.MB2_B, p.K, p.N};
+ InferenceEngine::SizeVector dims_dst = {p.batches.MB2_D, p.M, p.N};
+
+ InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::CHW, dims_src1);
+ src1->allocate();
+ InferenceEngine::TBlob<float>* srcPtr1 = dynamic_cast<InferenceEngine::TBlob<float>*>(src1.get());
+ if (srcPtr1 == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+ fill_data(src1->buffer(), src1->size());
+
+ InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::CHW, dims_src2);
+ src2->allocate();
+ InferenceEngine::TBlob<float>* srcPtr2 = dynamic_cast<InferenceEngine::TBlob<float>*>(src2.get());
+ if (srcPtr2 == nullptr)
+ FAIL() << "Cannot cast blob to TBlob<float>.";
+ fill_data(src2->buffer(), src2->size());
+
+ InferenceEngine::BlobMap srcs;
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src1));
+ srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
+
+ InferenceEngine::OutputsDataMap out;
+ out = net_reader.getNetwork().getOutputsInfo();
+ InferenceEngine::BlobMap outputBlobs;
+
+ std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+
+ InferenceEngine::TBlob<float>::Ptr output;
+ output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+ output->allocate();
+ outputBlobs[item.first] = output;
+
+ graph.Infer(srcs, outputBlobs);
+
+ InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+ dst_ref.allocate();
+
+ std::vector<InferenceEngine::TBlob<float>> src_vec = {*srcPtr1, *srcPtr2};
+
+ ref_gemm(src_vec, dst_ref, p);
+
+ compare(*output, dst_ref);
+ } catch (const InferenceEngine::details::InferenceEngineException &e) {
+ FAIL() << e.what();
+ }
+ }
+};
+
+TEST_P(MKLDNNGraphSingleBatchDimGemmTests, TestsGemm) {}
+
+INSTANTIATE_TEST_CASE_P(
+ TestsGemm, MKLDNNGraphSingleBatchDimGemmTests,
+ ::testing::Values(
+ gemm_test_params{{1, 1, 1, 1, 1, 1, 1, 1}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 1, 3, 1, 1, 1, 3}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 1, 1, 1, 1, 1, 3}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 1, 1, 1, 1, 1, 1, 1}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 1, 3, 1, 1, 1, 3}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 1, 1, 1, 1, 1, 3}, 7, 4, 3, 2, 3, true, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 1, 1, 1, 1, 1, 1, 1}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 1, 3, 1, 1, 1, 3}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 1, 1, 1, 1, 1, 3}, 7, 4, 3, 2, 3, false, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 1, 1, 1, 1, 1, 1, 1}, 7, 4, 3, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 1, 3, 1, 1, 1, 3}, 7, 4, 3, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
+ gemm_test_params{{1, 3, 1, 1, 1, 1, 1, 3}, 7, 4, 3, 2, 3, true, true, 1, MKLDNNPlugin::impl_desc_type::gemm_any}
+ ));
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_input_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_input_test.cpp
index 81823c695..1c1d76dab 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_input_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_input_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_leaks_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_leaks_test.cpp
index b68588238..793e3d4c9 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_leaks_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_leaks_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -21,7 +20,7 @@ public:
MKLDNNTestExecNetwork(InferenceEngine::ICNNNetwork &network, const MKLDNNPlugin::Config &cfg)
: MKLDNNExecNetwork(network, cfg, {}) {}
MKLDNNPlugin::MKLDNNGraph& getGraph() {
- return *graph;
+ return *graphs[0];
}
};
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp
index 5a314a63c..873bae5f3 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_permute_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_permute_test.cpp
index f92056631..a40add1a7 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_permute_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_permute_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -193,7 +192,9 @@ INSTANTIATE_TEST_CASE_P(
permute_test_params{{2, 3, 4, 5, 6}, {0, 3, 2, 4, 1}, 1, MKLDNNPlugin::impl_desc_type::unknown},
permute_test_params{{2, 8, 2, 2, 4, 5}, {0, 1, 4, 2, 5, 3}, 1, MKLDNNPlugin::impl_desc_type::unknown},
permute_test_params{{2, 8, 3, 3, 4, 5}, {0, 1, 4, 2, 5, 3}, 1, MKLDNNPlugin::impl_desc_type::unknown},
- permute_test_params{{2, 8, 3, 4}, {3, 0, 1, 2}, 2, MKLDNNPlugin::impl_desc_type::unknown}
+ permute_test_params{{2, 8, 3, 4}, {3, 0, 1, 2}, 2, MKLDNNPlugin::impl_desc_type::unknown},
+ permute_test_params{{2, 12, 9}, {0, 2, 1}, 1, MKLDNNPlugin::impl_desc_type::unknown},
+ permute_test_params{{2, 8, 3, 3, 4, 5}, {0, 3, 4, 1, 5, 2}, 1, MKLDNNPlugin::impl_desc_type::unknown}
));
class MKLDNNGraphDynBatchPermuteTests: public MKLDNNGraphPermuteTests {
@@ -271,5 +272,7 @@ INSTANTIATE_TEST_CASE_P(
permute_test_params{{2, 3, 4, 5, 6}, {0, 2, 4, 3, 1}, 1, MKLDNNPlugin::impl_desc_type::unknown},
permute_test_params{{2, 3, 4, 5, 6}, {0, 3, 2, 4, 1}, 1, MKLDNNPlugin::impl_desc_type::unknown},
permute_test_params{{2, 8, 2, 2, 4, 5}, {0, 1, 4, 2, 5, 3}, 1, MKLDNNPlugin::impl_desc_type::unknown},
- permute_test_params{{2, 8, 3, 3, 4, 5}, {0, 1, 4, 2, 5, 3}, 1, MKLDNNPlugin::impl_desc_type::unknown}
+ permute_test_params{{2, 8, 3, 3, 4, 5}, {0, 1, 4, 2, 5, 3}, 1, MKLDNNPlugin::impl_desc_type::unknown},
+ permute_test_params{{2, 12, 9}, {0, 2, 1}, 1, MKLDNNPlugin::impl_desc_type::unknown},
+ permute_test_params{{2, 8, 3, 3, 4, 5}, {0, 3, 4, 1, 5, 2}, 1, MKLDNNPlugin::impl_desc_type::unknown}
));
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp
index 80725d9f6..a1ee6bd25 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -10,152 +9,259 @@
#include "test_graph.hpp"
#include "single_layer_common.hpp"
+#include <ie_layers.h>
#include <mkldnn_plugin/mkldnn_extension_utils.h>
#include <inference_engine/cnn_network_impl.hpp>
#include "tests_common.hpp"
+#include "ir_gen_helper.hpp"
+#include <math.h>
-
+using namespace InferenceEngine;
using namespace ::testing;
using namespace std;
using namespace mkldnn;
+using namespace single_layer_tests;
struct pooling_test_params {
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } in;
-
- size_t krn_w;
- size_t krn_h;
- size_t str_w;
- size_t str_h;
- size_t pad_w;
- size_t pad_h;
+ // Formats: NCHW, NCDHW
+ vector<size_t> dims;
+ // Formats: WH, WHD
+ vector<size_t> kernel;
+ vector<size_t> strides;
+ vector<size_t> pads_begin;
+ vector<size_t> pads_end;
+
+ PoolingLayer::PoolType _type;
+ bool _exclude_pad;
size_t num_prim_desc;
MKLDNNPlugin::impl_desc_type selectedType;
- std::vector<MKLDNNPlugin::impl_desc_type> preferTypes;
+ vector<MKLDNNPlugin::impl_desc_type> preferTypes;
- std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
+ vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
};
template <typename data_t>
void ref_pool(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<data_t> &dst, pooling_test_params prm)
{
- size_t KW = prm.krn_w;
- size_t KH = prm.krn_h;
-
- size_t IW = prm.in.w;
- size_t IH = prm.in.h;
-
- size_t OW = (IW + 2 * prm.pad_w - prm.krn_w) / prm.str_w + 1;
- size_t OH = (IH + 2 * prm.pad_h - prm.krn_h) / prm.str_h + 1;
- size_t OC = prm.in.c;
+ int dims_size = prm.dims.size();
+
+ int KW = prm.kernel[X_AXIS];
+ int KH = prm.kernel[Y_AXIS];
+ int KD = dims_size == 5 ? prm.kernel[Z_AXIS] : 1;
+
+ int SW = prm.strides[X_AXIS];
+ int SH = prm.strides[Y_AXIS];
+ int SD = prm.strides.size() > Z_AXIS ? prm.strides[Z_AXIS] : 1;
+
+ int IW = prm.dims[dims_size - 1];
+ int IH = prm.dims[dims_size - 2];
+ int ID = dims_size == 5 ? prm.dims[dims_size - 3] : 1;
+
+ int PWB = prm.pads_begin[X_AXIS];
+ int PHB = prm.pads_begin[Y_AXIS];
+ int PDB = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0;
+ int PWE = prm.pads_end[X_AXIS];
+ int PHE = prm.pads_end[Y_AXIS];
+ int PDE = prm.pads_end.size() > Z_AXIS ? prm.pads_end[Z_AXIS] : 0;
+
+ int OW = (IW + PWB + PWE - KW) / SW + 1;
+ int OH = (IH + PHB + PHE - KH) / SH + 1;
+ int OD = dims_size == 5 ? (ID + PDB + PDE - KD) / SD + 1 : 1;
+ int OC = prm.dims[1];
const data_t *src_data = src.readOnly();
data_t *dst_data = dst.data();
- IE_ASSERT( OC == dst.dims()[2]);
-
- for (size_t c = 0; c < OC; c++) {
- for (size_t oh = 0; oh < OH; oh++) {
- for (size_t ow = 0; ow < OW; ow++) {
- size_t oidx = c * OH * OW
- + oh * OW + ow;
- data_t out_ref = data_t(0);
- bool is_initialized = false;
- for (uint32_t kh = 0; kh < KH; kh++) {
- for (uint32_t kw = 0; kw < KW; kw++) {
- int32_t iw = ow * prm.str_w - prm.pad_w + kw;
- int32_t ih = oh * prm.str_h - prm.pad_h + kh;
- if (iw < 0 || iw >= IW || ih < 0
- || ih >= IH)
- continue;
- uint32_t iidx = c * IH * IW + ih * IW + iw;
-
- data_t d = src_data[iidx];
- if (!is_initialized) {
- out_ref = d;
- is_initialized = true;
- } else {
- if (out_ref < d)
- out_ref = d;
+ IE_ASSERT(OC == dst.dims()[dims_size - 2]);
+
+ int k1 = OH * OW,
+ k2 = k1 * OD,
+ k3 = IH * IW,
+ k4 = k3 * ID;
+
+ if (prm._type == PoolingLayer::MAX) {
+ for (int c = 0; c < OC; c++) {
+ int cc = c * k2;
+ for (int od = 0; od < OD; od++) {
+ int cd = cc + od * k1;
+ for (int oh = 0; oh < OH; oh++) {
+ int ch = cd + oh * OW;
+ for (int ow = 0; ow < OW; ow++) {
+
+ int oidx = ch + ow;
+ data_t out_ref = data_t(0);
+ bool is_initialized = false;
+
+ for (int kd = 0; kd < KD; kd++) {
+ int id = dims_size == 5 ? od * SD - PDB + kd : 0lu;
+ if (id < 0 || id >= ID) continue;
+ for (int kh = 0; kh < KH; kh++) {
+ int ih = oh * SH - PHB + kh;
+ if (ih < 0 || ih >= IH) continue;
+ for (int kw = 0; kw < KW; kw++) {
+ int iw = ow * SW - PWB + kw;
+ if (iw < 0 || iw >= IW) continue;
+ int iidx = c * k4
+ + id * k3
+ + ih * IW
+ + iw;
+
+ data_t d = src_data[iidx];
+ if (!is_initialized) {
+ out_ref = d;
+ is_initialized = true;
+ } else {
+ if (out_ref < d)
+ out_ref = d;
+ }
+ }
+ }
}
+ dst_data[oidx] = out_ref;
}
}
- dst_data[oidx] = out_ref;
}
}
- }
+ } else if (prm._type == PoolingLayer::AVG) {
+
+ bool include_padding = false;
+ bool not_zero_l = false;
+ for (auto lr : prm.pads_begin) {
+ if (lr) {
+ not_zero_l = true;
+ break;
+ }
+ }
+ if (!prm._exclude_pad && not_zero_l)
+ include_padding = true;
+
+ int PDBKD = KD - PDB,
+ PHBKH = KH - PHB,
+ PWBKW = KW - PWB,
+ IDPDE = ID + PDE,
+ IHPHE = IH + PHE,
+ IWPWE = IW + PWE;
+
+ for (int c = 0; c < OC; c++) {
+ int cc = c * k2;
+ for (int od = 0; od < OD; od++) {
+ int cd = cc + od * k1;
+ int id_start = od * SD - PDB;
+ int id_end = std::min(od * SD + PDBKD, IDPDE);
+ for (int oh = 0; oh < OH; oh++) {
+ int ch = cd + oh * OW;
+ int ih_start = oh * SH - PHB;
+ int ih_end = std::min(oh * SH + PHBKH, IHPHE);
+ for (int ow = 0; ow < OW; ow++) {
+ size_t oidx = ch + ow;
+ dst_data[oidx] = (data_t)0;
+ int iw_start = ow * SW - PWB;
+ int iw_end = std::min(ow * SW + PWBKW, IWPWE);
+
+ // include_padding
+ double num_summands = (ih_end - ih_start) * (iw_end - iw_start) * (id_end - id_start);
+
+ id_start = std::max(id_start, 0);
+ ih_start = std::max(ih_start, 0);
+ iw_start = std::max(iw_start, 0);
+ id_end = std::min(id_end, ID);
+ ih_end = std::min(ih_end, IH);
+ iw_end = std::min(iw_end, IW);
+
+ if (!include_padding)
+ num_summands = (id_end - id_start) * (ih_end - ih_start) * (iw_end - iw_start);
+ if (num_summands == 0.0) continue;
+
+ double dst = 0.0;
+ for (int id = id_start; id < id_end; ++id) {
+ for (int ih = ih_start; ih < ih_end; ++ih) {
+ for (int iw = iw_start; iw < iw_end; ++iw) {
+ size_t iidx = c * k4
+ + id * k3
+ + ih * IW
+ + iw;
+
+ dst += (double)src_data[iidx];
+ } } }
+
+ dst_data[oidx] = (data_t)(dst / num_summands);
+ } } } } }
}
class MKLDNNGraphPoolingTests: public TestsCommon,
public WithParamInterface<pooling_test_params> {
- std::string model_t = R"V0G0N(
-<Net Name="Pooling_Only" version="2" precision="FP32" batch="1">
- <layers>
- <layer name="in1" type="Input" precision="FP32" id="0">
- <output>
- <port id="0">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
- </port>
- </output>
- </layer>
+ std::string layers_t = R"V0G0N(
<layer name="pool" id="1" type="Pooling" precision="FP32">
- <pooling stride-x="_SW_" stride-y="_SH_"
- pad-x="_PW_" pad-y="_PH_"
- kernel-x="_KW_" kernel-y="_KH_"
- method="MAX" round="Ceil" PrimitivesPriority="_IMPLS_"/>
+ <pooling kernel="_K_"
+ strides="_KS_"
+ pads_begin="_PB_" pads_end="_PE_"
+ pool-method="_PM_" exclude-pad="_EP_" rounding_type="floor"
+ PrimitivesPriority="_IMPLS_"/>
<input>
<port id="1">
- <dim>_IN_</dim>
- <dim>_IC_</dim>
- <dim>_IH_</dim>
- <dim>_IW_</dim>
+ __SRC_DIMS__
</port>
</input>
<output>
<port id="1">
<dim>_IN_</dim>
<dim>_IC_</dim>
- <dim>_OH_</dim>
- <dim>_OW_</dim>
+ __DST_DIMS__
</port>
</output>
</layer>
- </layers>
- <edges>
+)V0G0N";
+
+ std::string edges_t = R"V0G0N(
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
- </edges>
-</Net>
)V0G0N";
protected:
std::string getModel(pooling_test_params p) {
- std::string model = model_t;
+ std::string model = layers_t;
- REPLACE_WITH_NUM(model, "_IW_", p.in.w);
- REPLACE_WITH_NUM(model, "_IH_", p.in.h);
- REPLACE_WITH_NUM(model, "_IC_", p.in.c);
- REPLACE_WITH_NUM(model, "_IN_", p.in.n);
+ std::string s_dims;
+ for (auto& dim : p.dims) {
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims);
+
+ s_dims = "";
+ int k_len = p.kernel.size();
+ for (size_t i = 2lu; i < p.dims.size(); i++) {
+ size_t inx = k_len - i + 1lu;
+ size_t dim = (p.dims[i] + p.pads_begin[inx] + p.pads_end[inx] - p.kernel[inx]) / p.strides[inx] + 1lu;
+ s_dims += "\n <dim>";
+ s_dims += std::to_string(dim) + "</dim>";
+ }
+ REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims);
+
+ std::string pool_method;
+ switch (p._type) {
+ case PoolingLayer::AVG: pool_method = "avg";
+ break;
+ case PoolingLayer::ROI: pool_method = "roi";
+ break;
+ default: pool_method = "max";
+ }
+ REPLACE_WITH_STR(model, "_PM_", pool_method);
+
+ std::string exclude_pad = "false";
+ if (p._exclude_pad) exclude_pad = "true";
+ REPLACE_WITH_STR(model, "_EP_", exclude_pad);
- REPLACE_WITH_NUM(model, "_KW_", p.krn_w);
- REPLACE_WITH_NUM(model, "_KH_", p.krn_h);
- REPLACE_WITH_NUM(model, "_SW_", p.str_w);
- REPLACE_WITH_NUM(model, "_SH_", p.str_h);
- REPLACE_WITH_NUM(model, "_PW_", p.pad_w);
- REPLACE_WITH_NUM(model, "_PH_", p.pad_h);
+ REPLACE_WITH_NUM(model, "_IN_", p.dims[0]);
+ REPLACE_WITH_NUM(model, "_IC_", p.dims[1]);
- REPLACE_WITH_NUM(model, "_OW_", (p.in.w + 2 * p.pad_w - p.krn_w) / p.str_w + 1);
- REPLACE_WITH_NUM(model, "_OH_", (p.in.h + 2 * p.pad_h - p.krn_h) / p.str_h + 1);
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel);
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides);
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin);
+ REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end);
std::string impls;
for (const auto& preferType : p.preferTypes) {
@@ -164,6 +270,9 @@ protected:
impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType);
}
REPLACE_WITH_STR(model, "_IMPLS_", impls);
+
+ model = IRTemplateGenerator::getIRTemplate("Pooling_Only", p.dims, "FP32", model, edges_t);
+
return model;
}
@@ -193,9 +302,18 @@ protected:
}
}
- InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::Layout layout = ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src =
+ InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, p.dims);
src->allocate();
fill_data(src->buffer(), src->size());
@@ -225,7 +343,7 @@ protected:
ref_pool(*srcPtr, dst_ref, p);
- compare(*output, dst_ref);
+ compare(*output, dst_ref, 0.0001f);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
@@ -237,12 +355,64 @@ TEST_P(MKLDNNGraphPoolingTests, TestsPooling) {}
INSTANTIATE_TEST_CASE_P(
TestsPooling, MKLDNNGraphPoolingTests,
::testing::Values(
- pooling_test_params{{1, 3, 228, 228}, 2, 2, 2, 2, 0, 0, 6, MKLDNNPlugin::impl_desc_type::jit},
- pooling_test_params{{1, 3, 228, 228}, 4, 2, 2, 2, 0, 0, 4, MKLDNNPlugin::impl_desc_type::jit},
- pooling_test_params{{1, 3, 228, 228}, 4, 2, 2, 1, 0, 0, 4, MKLDNNPlugin::impl_desc_type::jit},
- pooling_test_params{{1, 3, 228, 228}, 2, 2, 2, 2, 0, 0, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
- pooling_test_params{{1, 3, 228, 228}, 4, 2, 2, 2, 0, 0, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
- pooling_test_params{{1, 3, 228, 228}, 4, 2, 2, 1, 0, 0, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
+ /*0*/ pooling_test_params{{1, 3, 228, 228}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 6, MKLDNNPlugin::impl_desc_type::jit},
+ pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::jit},
+ pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 1}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::jit},
+ pooling_test_params{{1, 3, 228, 228}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 6, MKLDNNPlugin::impl_desc_type::ref,
+ {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::ref,
+ {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 1}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::ref,
+ {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {1u, 0u}, {0u, 0u}, PoolingLayer::AVG, false, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {1u, 0u}, {0u, 0u}, PoolingLayer::AVG, false, 3u,
+ MKLDNNPlugin::impl_desc_type::jit },
+ pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {0u, 0u}, {0u, 0u}, PoolingLayer::AVG, true, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ /*9*/ pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {0u, 0u}, {0u, 0u}, PoolingLayer::AVG, true, 3u,
+ MKLDNNPlugin::impl_desc_type::jit },
+ pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, PoolingLayer::AVG, true, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, PoolingLayer::AVG, false, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, PoolingLayer::MAX, false, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ // TODO Fix jit implementation. End paddings
+// pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 0u}, PoolingLayer::AVG, true, 3u,
+// MKLDNNPlugin::impl_desc_type::jit },
+// pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 0u}, PoolingLayer::AVG, false, 3u,
+// MKLDNNPlugin::impl_desc_type::jit },
+// pooling_test_params{{1u, 4u, 128u, 128u}, {2u, 2u}, {2u, 2u}, {2u, 2u}, {2u, 0u}, PoolingLayer::MAX, false, 3u,
+// MKLDNNPlugin::impl_desc_type::jit },
+
+ // 5D tensor
+ pooling_test_params{{1u, 3u, 16u, 32u, 32u}, {2u, 2u, 2u}, {1u, 1u, 1u}, {0u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::MAX, false, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1u, 3u, 16u, 32u, 32u}, {2u, 2u, 2u}, {1u, 1u, 1u}, {0u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::MAX, false, 3u,
+ MKLDNNPlugin::impl_desc_type::jit },
+ pooling_test_params{{1u, 3u, 16u, 32u, 32u}, {2u, 2u, 2u}, {1u, 1u, 1u}, {1u, 1u, 1u}, {1u, 1u, 1u}, PoolingLayer::MAX, false, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1u, 32u, 60u, 60u, 60u}, {2u, 3u, 4u}, {2u, 2u, 2u}, {1u, 1u, 1u}, {1u, 2u, 3u}, PoolingLayer::MAX, false, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ /*20*/ pooling_test_params{{1u, 3u, 16u, 32u, 32u}, {2u, 2u, 2u}, {1u, 1u, 1u}, {1u, 2u, 3u}, {1u, 2u, 3u}, PoolingLayer::MAX, false, 3u,
+ MKLDNNPlugin::impl_desc_type::jit },
+ pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {1u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::AVG, false, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {1u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::AVG, false, 3u,
+ MKLDNNPlugin::impl_desc_type::jit },
+ pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {0u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::AVG, true, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {0u, 0u, 0u}, {0u, 0u, 0u}, PoolingLayer::AVG, true, 3u,
+ MKLDNNPlugin::impl_desc_type::jit },
+ pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {0u, 0u, 0u}, PoolingLayer::AVG, true, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, PoolingLayer::AVG, true, 3u,
+ MKLDNNPlugin::impl_desc_type::jit },
+ pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, PoolingLayer::AVG, false, 3u,
+ MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1u, 4u, 128u, 128u, 128u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, {2u, 2u, 2u}, PoolingLayer::AVG, false, 3u,
+ MKLDNNPlugin::impl_desc_type::jit } ));
class MKLDNNGraphDynBatchPoolingTests: public MKLDNNGraphPoolingTests {
@@ -252,7 +422,7 @@ protected:
TestsCommon::SetUp();
pooling_test_params p = ::testing::WithParamInterface<pooling_test_params>::GetParam();
std::string model = getModel(p);
- size_t MB = p.in.n;
+ size_t MB = p.dims[0];
if (MB < 2)
MB = 2;
@@ -269,9 +439,18 @@ protected:
graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
graph.CreateGraph(net_reader.getNetwork());
- InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Layout layout = ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
+ InferenceEngine::Blob::Ptr src =
+ InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, p.dims);
src->allocate();
fill_data(src->buffer(), src->size());
@@ -310,7 +489,7 @@ TEST_P(MKLDNNGraphDynBatchPoolingTests, TestsDynBatchPooling) {}
INSTANTIATE_TEST_CASE_P(
TestsDynBatchPooling, MKLDNNGraphDynBatchPoolingTests,
::testing::Values(
- pooling_test_params{{1, 3, 228, 228}, 4, 2, 2, 1, 0, 0, 4, MKLDNNPlugin::impl_desc_type::jit},
- pooling_test_params{{1, 3, 228, 228}, 2, 2, 2, 2, 0, 0, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
- pooling_test_params{{1, 3, 228, 228}, 4, 2, 2, 2, 0, 0, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
- pooling_test_params{{1, 3, 228, 228}, 4, 2, 2, 1, 0, 0, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
+ pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 1}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::jit},
+ pooling_test_params{{1, 3, 228, 228}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+ pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 1}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp
index af2cea4ab..83cde28ed 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_relu_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_relu_test.cpp
index 53feb582c..ce860c2e7 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_relu_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_relu_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -20,12 +19,8 @@ using namespace mkldnn;
struct relu_test_params {
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } in;
+ // Formats: NCHW, NCDHW
+ vector<size_t> dims;
float n_clope;
@@ -39,22 +34,29 @@ struct relu_test_params {
template <typename data_t>
void ref_relu(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<data_t> &dst, relu_test_params prm)
{
- size_t IW = prm.in.w;
- size_t IH = prm.in.h;
- size_t IC = prm.in.c;
+ auto dims_size = src.dims().size();
+
+ size_t IW = src.dims()[dims_size - 1];
+ size_t IH = src.dims()[dims_size - 2];
+ size_t ID = dims_size == 5 ? src.dims()[dims_size - 3] : 1u;
+ size_t IC = src.dims()[1];
const data_t *src_data = src.readOnly();
data_t *dst_data = dst.data();
for (uint32_t c = 0; c < IC; c++) {
- for (uint32_t h = 0; h < IH; h++) {
- for (uint32_t w = 0; w < IW; w++) {
- uint32_t oidx = c * IH * IW
- + h * IW + w;
-
- dst_data[oidx] = src_data[oidx] >= 0.0 ?
- src_data[oidx] :
- src_data[oidx] * prm.n_clope;
+ for (uint32_t d = 0; d < ID; d++) {
+ for (uint32_t h = 0; h < IH; h++) {
+ for (uint32_t w = 0; w < IW; w++) {
+ uint32_t oidx = c * ID * IH * IW
+ + d * IH * IW
+ + h * IW
+ + w;
+
+ dst_data[oidx] = src_data[oidx] >= 0.0 ?
+ src_data[oidx] :
+ src_data[oidx] * prm.n_clope;
+ }
}
}
}
@@ -63,13 +65,14 @@ void ref_relu(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<
class MKLDNNGraphReluTests: public TestsCommon,
public WithParamInterface<relu_test_params> {
std::string model_t = R"V0G0N(
-<Net Name="Relu_Only" version="2" precision="FP32" batch="1">
+<Net Name="Relu_Only" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="0">
<output>
<port id="0">
<dim>_IN_</dim>
<dim>_IC_</dim>
+ <dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
@@ -80,6 +83,7 @@ class MKLDNNGraphReluTests: public TestsCommon,
<port id="1">
<dim>_IN_</dim>
<dim>_IC_</dim>
+ <dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
@@ -88,6 +92,7 @@ class MKLDNNGraphReluTests: public TestsCommon,
<port id="2">
<dim>_IN_</dim>
<dim>_IC_</dim>
+ <dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
@@ -102,11 +107,24 @@ class MKLDNNGraphReluTests: public TestsCommon,
std::string getModel(relu_test_params p) {
std::string model = model_t;
+ auto dims_size = p.dims.size();
+
+ switch (dims_size) {
+ case 3:
+ REMOVE_LINE(model, "<dim>_IH_</dim>");
+ case 4:
+ REMOVE_LINE(model, "<dim>_ID_</dim>");
+ }
- REPLACE_WITH_NUM(model, "_IW_", p.in.w);
- REPLACE_WITH_NUM(model, "_IH_", p.in.h);
- REPLACE_WITH_NUM(model, "_IC_", p.in.c);
- REPLACE_WITH_NUM(model, "_IN_", p.in.n);
+ REPLACE_WITH_NUM(model, "_IW_", p.dims[dims_size - 1]);
+ REPLACE_WITH_NUM(model, "_IC_", p.dims[1]);
+ REPLACE_WITH_NUM(model, "_IN_", p.dims[0]);
+ switch (dims_size) {
+ case 5:
+ REPLACE_WITH_NUM(model, "_ID_", p.dims[dims_size - 3]);
+ case 4:
+ REPLACE_WITH_NUM(model, "_IH_", p.dims[dims_size - 2]);
+ }
return model;
}
@@ -138,9 +156,18 @@ protected:
}
}
- InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src->allocate();
fill_data(src->buffer(), src->size());
@@ -170,7 +197,7 @@ protected:
ref_relu(*srcPtr, dst_ref, p);
- compare(*output, dst_ref);
+ compare(*output, dst_ref, 0.0005f);
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
@@ -199,4 +226,22 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
}
- }}));
+ }},
+ relu_test_params{
+ {1, 64, 32, 32, 32}, 0.0f, 3, MKLDNNPlugin::impl_desc_type::ref_any, {
+ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ ASSERT_TRUE(impl.getImplementationType() | MKLDNNPlugin::impl_desc_type::ref_any);
+ ASSERT_EQ(1, impl.getConfig().inConfs.size());
+ ASSERT_EQ(1, impl.getConfig().outConfs.size());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+ },
+ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ ASSERT_TRUE(impl.getImplementationType() | MKLDNNPlugin::impl_desc_type::ref_any);
+ ASSERT_EQ(1, impl.getConfig().inConfs.size());
+ ASSERT_EQ(1, impl.getConfig().outConfs.size());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+ }
+ }}
+ ));
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp
index da81442a0..c7c13ade1 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_reshape_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_reshape_test.cpp
index 50b2900c2..d85aaa58b 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_reshape_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_reshape_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -81,14 +80,14 @@ __DST_DIMS__
std::string src_dims;
for (auto& dim : p.in) {
- src_dims += "<dim>";
+ src_dims += " <dim>";
src_dims += std::to_string(dim) + "</dim>\n";
}
REPLACE_WITH_STR(model, "__SRC_DIMS__", src_dims);
std::string dst_dims;
for (auto& dim : p.out) {
- dst_dims += "<dim>";
+ dst_dims += "\t\t<dim>";
dst_dims += std::to_string(dim) + "</dim>\n";
}
REPLACE_WITH_STR(model, "__DST_DIMS__", dst_dims);
@@ -176,7 +175,7 @@ TEST_P(MKLDNNGraphReshapeTests, TestsReshape) {}
INSTANTIATE_TEST_CASE_P(
TestsReshape, MKLDNNGraphReshapeTests,
::testing::Values(
- reshape_test_params{ {1, 3, 228, 228}, {1, 24, 2, 3249}, {1, 24, 2, 3249}, 0, -1, 2,
+ reshape_test_params{ {1, 3, 228, 228}, {1, 24, 2, 3249}, {1, 24, 2, 3249}, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown, { [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -184,7 +183,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 4 },{ 2, 2 },{ 2, 2 }, 0, -1, 2,
+ reshape_test_params{ { 4 },{ 2, 2 },{ 2, 2 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -192,7 +191,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 4 },{ 1, 2, 2 },{ 1, 2, 2 }, 0, -1, 2,
+ reshape_test_params{ { 4 },{ 1, 2, 2 },{ 1, 2, 2 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -200,7 +199,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 4 },{ 1, 4, 1, 1 },{ 1, 4, 1, 1 }, 0, -1, 2,
+ reshape_test_params{ { 4 },{ 1, 4, 1, 1 },{ 1, 4, 1, 1 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -208,7 +207,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 4, 4 },{ 1, 4, 4 },{ 1, 4, 4 }, 0, -1, 2,
+ reshape_test_params{ { 4, 4 },{ 1, 4, 4 },{ 1, 4, 4 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -216,7 +215,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 4, 4 },{ 1, 4, 2, 2 },{ 1, 4, 2, 2 }, 0, -1, 2,
+ reshape_test_params{ { 4, 4 },{ 1, 4, 2, 2 },{ 1, 4, 2, 2 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -224,7 +223,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 4, 2, 2 },{ 1, 4, 2, 2 },{ 1, 4, 2, 2 }, 0, -1, 2,
+ reshape_test_params{ { 4, 2, 2 },{ 1, 4, 2, 2 },{ 1, 4, 2, 2 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -232,7 +231,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 2, 2 },{ 4 },{ 4 }, 0, -1, 2,
+ reshape_test_params{ { 2, 2 },{ 4 },{ 4 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -240,7 +239,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 1, 2, 2 },{ 4 },{ 4 }, 0, -1, 2,
+ reshape_test_params{ { 1, 2, 2 },{ 4 },{ 4 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -248,7 +247,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 1, 1, 2, 2 },{ 4 },{ 4 }, 0, -1, 2,
+ reshape_test_params{ { 1, 1, 2, 2 },{ 4 },{ 4 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -256,7 +255,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::C, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 4, 2, 2 },{ 4, 4 },{ 4, 4 }, 0, -1, 2,
+ reshape_test_params{ { 4, 2, 2 },{ 4, 4 },{ 4, 4 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -264,7 +263,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 1, 4, 2, 2 },{ 4, 4 },{ 4, 4 }, 0, -1, 2,
+ reshape_test_params{ { 1, 4, 2, 2 },{ 4, 4 },{ 4, 4 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -272,7 +271,7 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 1, 4, 2, 2 },{ 4, 2, 2 },{ 4, 2, 2 }, 0, -1, 2,
+ reshape_test_params{ { 1, 4, 2, 2 },{ 4, 2, 2 },{ 4, 2, 2 }, 0, -1, 1,
MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -280,20 +279,28 @@ INSTANTIATE_TEST_CASE_P(
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::CHW, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 1, 4, 2, 2 },{ 4, 2, 2, 1, 1 },{ 4, 2, 2, 1, 1 }, 0, -1, 2,
- MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ reshape_test_params{ { 1, 4, 2, 2 }, { 4, 2, 2, 1, 1 }, { 4, 2, 2, 1, 1 }, 0, -1, 1,
+ MKLDNNPlugin::impl_desc_type::unknown, { [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
ASSERT_EQ(1, impl.getConfig().outConfs.size());
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
- ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().outConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().outConfs.at(0).desc.getLayout());
} } },
- reshape_test_params{ { 4, 2, 2, 1, 1 },{ 1, 4, 2, 2 },{ 1, 4, 2, 2 }, 0, -1, 2,
- MKLDNNPlugin::impl_desc_type::unknown,{ [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ reshape_test_params{ { 4, 2, 2, 1, 1 }, { 1, 4, 2, 2 }, { 1, 4, 2, 2 }, 0, -1, 1,
+ MKLDNNPlugin::impl_desc_type::unknown, { [](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
ASSERT_EQ(1, impl.getConfig().outConfs.size());
- ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().inConfs.at(0).desc.getLayout());
ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+ } } },
+ reshape_test_params{ { 1, 200 }, { 1, 200, 1, 1, 1 }, { 1, 200, 1, 1, 1 }, 0, -1, 1,
+ MKLDNNPlugin::impl_desc_type::unknown, { [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+ ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
+ ASSERT_EQ(1, impl.getConfig().inConfs.size());
+ ASSERT_EQ(1, impl.getConfig().outConfs.size());
+ ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().inConfs.at(0).desc.getLayout());
+ ASSERT_EQ(InferenceEngine::Layout::NCDHW, impl.getConfig().outConfs.at(0).desc.getLayout());
} } }
));
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_roi_pooling_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_roi_pooling_test.cpp
index 98b3edefe..1706f5788 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_roi_pooling_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_roi_pooling_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_simplernms_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_simplernms_test.cpp
index ab257910d..7109bdc9d 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_simplernms_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_simplernms_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp
index d91b9539f..1675b096d 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp
index cf6345030..e253a820e 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -19,26 +18,11 @@ using namespace std;
using namespace mkldnn;
struct split_test_params {
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } in;
-
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } out1;
-
- struct {
- size_t n;
- size_t c;
- size_t h;
- size_t w;
- } out2;
+ // Formats: NCHW, NCDHW
+ vector<size_t> dims;
+ std::vector<vector<size_t>> outs;
+
+ int axis;
size_t num_prim_desc;
@@ -49,133 +33,120 @@ struct split_test_params {
};
template <typename data_t>
-void ref_split(InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<data_t> &dst1, InferenceEngine::TBlob<data_t> &dst2) {
+void ref_split(InferenceEngine::TBlob<data_t> &src, std::vector<InferenceEngine::TBlob<data_t>>& dsts, split_test_params& prm) {
const float * srcData = src.readOnly();
- int MB = dst1.dims()[dst1.dims().size() - 1];
-
- float * dstData1 = dst1.data();
- int dstSize1 = dst1.size() / MB;
+ int outerSize = 1;
+ for (int i = 0; i < prm.axis; i++)
+ outerSize *= src.dims()[i];
- float *dstData2 = dst2.data();
- int dstSize2 = dst2.size() / MB;
+ for (size_t osIdx = 0; osIdx < outerSize; osIdx++) {
+ for (size_t dstIdx = 0; dstIdx < dsts.size(); dstIdx++) {
+ float* dstData = dsts[dstIdx].data();
+ int innerSize = dsts[dstIdx].size() / outerSize;
- for (int b = 0; b < MB; b++) {
- for (size_t j = 0; j < dstSize1; j++, srcData++) {
- dstData1[b*dstSize1 + j] = *srcData;
- }
-
- for (size_t j = 0; j < dstSize2; j++, srcData++) {
- dstData2[b*dstSize1 + j] = *srcData;
+ for (size_t j = 0; j < innerSize; j++, srcData++) {
+ dstData[osIdx*innerSize + j] = *srcData;
+ }
}
}
}
class MKLDNNGraphSplitTests: public TestsCommon,
public WithParamInterface<split_test_params> {
- // TODO: remove power layers from the test
std::string model_t = R"V0G0N(
-<net name="ConcatOnly" version="2" precision="FP32" batch="1">
+<net name="ConcatOnly" version="3" precision="FP32" batch="1">
<layers>
<layer name="in1" type="Input" precision="FP32" id="1">
<output>
<port id="1">
<dim>_IN_</dim>
<dim>_IC_</dim>
+ <dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</output>
</layer>
<layer name="split" id="2" type="Split" precision="FP32">
- <split_data axis="1" PrimitivesPriority="_IMPLS_"/>
+ <split_data axis="_AXIS_" PrimitivesPriority="_IMPLS_"/>
<input>
<port id="1">
<dim>_IN_</dim>
<dim>_IC_</dim>
+ <dim>_ID_</dim>
<dim>_IH_</dim>
<dim>_IW_</dim>
</port>
</input>
<output>
- <port id="2">
- <dim>_ON1_</dim>
- <dim>_OC1_</dim>
- <dim>_OH1_</dim>
- <dim>_OW1_</dim>
- </port>
- <port id="3">
- <dim>_ON2_</dim>
- <dim>_OC2_</dim>
- <dim>_OH2_</dim>
- <dim>_OW2_</dim>
- </port>
- </output>
- </layer>
- <layer name="power1" id="3" type="Power" precision="FP32">
- <power_data power="1" scale="1" shift="0"/>
- <input>
- <port id="1">
- <dim>_ON1_</dim>
- <dim>_OC1_</dim>
- <dim>_OH1_</dim>
- <dim>_OW1_</dim>
- </port>
- </input>
- <output>
- <port id="2">
- <dim>_ON1_</dim>
- <dim>_OC1_</dim>
- <dim>_OH1_</dim>
- <dim>_OW1_</dim>
- </port>
- </output>
- </layer>
- <layer name="power2" id="4" type="Power" precision="FP32">
- <power_data power="1" scale="1" shift="0"/>
- <input>
- <port id="1">
- <dim>_ON2_</dim>
- <dim>_OC2_</dim>
- <dim>_OH2_</dim>
- <dim>_OW2_</dim>
- </port>
- </input>
- <output>
- <port id="2">
- <dim>_ON2_</dim>
- <dim>_OC2_</dim>
- <dim>_OH2_</dim>
- <dim>_OW2_</dim>
- </port>
+ _OP_
</output>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
- <edge from-layer="2" from-port="2" to-layer="3" to-port="1"/>
- <edge from-layer="2" from-port="3" to-layer="4" to-port="1"/>
</edges>
</net>
)V0G0N";
+ std::string port_t = R"V0G0N(
+<port id="_ID_">
+ <dim>_N_</dim>
+ <dim>_C_</dim>
+ <dim>_D_</dim>
+ <dim>_H_</dim>
+ <dim>_W_</dim>
+</port>
+)V0G0N";
+
protected:
std::string getModel(split_test_params p) {
std::string model = model_t;
- REPLACE_WITH_NUM(model, "_IN_", p.in.n);
- REPLACE_WITH_NUM(model, "_IC_", p.in.c);
- REPLACE_WITH_NUM(model, "_IW_", p.in.w);
- REPLACE_WITH_NUM(model, "_IH_", p.in.h);
-
- REPLACE_WITH_NUM(model, "_ON1_", p.out1.n);
- REPLACE_WITH_NUM(model, "_OC1_", p.out1.c);
- REPLACE_WITH_NUM(model, "_OH1_", p.out1.h);
- REPLACE_WITH_NUM(model, "_OW1_", p.out1.w);
-
- REPLACE_WITH_NUM(model, "_ON2_", p.out2.n);
- REPLACE_WITH_NUM(model, "_OC2_", p.out2.c);
- REPLACE_WITH_NUM(model, "_OH2_", p.out2.h);
- REPLACE_WITH_NUM(model, "_OW2_", p.out2.w);
+ auto dims_size = p.dims.size();
+
+ switch (dims_size) {
+ case 3:
+ REMOVE_LINE(model, "<dim>_IH_</dim>");
+ case 4:
+ REMOVE_LINE(model, "<dim>_ID_</dim>");
+ }
+ REPLACE_WITH_NUM(model, "_IN_", p.dims[0]);
+ REPLACE_WITH_NUM(model, "_IC_", p.dims[1]);
+ REPLACE_WITH_NUM(model, "_IW_", p.dims[dims_size - 1]);
+ switch (dims_size) {
+ case 5:
+ REPLACE_WITH_NUM(model, "_ID_", p.dims[dims_size - 3]);
+ case 4:
+ REPLACE_WITH_NUM(model, "_IH_", p.dims[dims_size - 2]);
+ }
+
+ std::string outPorts;
+ for (int idx = 0; idx < p.outs.size(); idx++) {
+ std::string outPort = port_t;
+ switch (dims_size) {
+ case 3:
+ REMOVE_LINE(outPort, "<dim>_H_</dim>");
+ case 4:
+ REMOVE_LINE(outPort, "<dim>_D_</dim>");
+ }
+ REPLACE_WITH_NUM(outPort, "_ID_", idx);
+ REPLACE_WITH_NUM(outPort, "_N_", p.outs[idx][0]);
+ REPLACE_WITH_NUM(outPort, "_C_", p.outs[idx][1]);
+ REPLACE_WITH_NUM(outPort, "_W_", p.outs[idx][dims_size - 1]);
+ switch (dims_size) {
+ case 5:
+ REPLACE_WITH_NUM(outPort, "_D_", p.outs[idx][dims_size - 3]);
+ case 4:
+ REPLACE_WITH_NUM(outPort, "_H_", p.outs[idx][dims_size - 2]);
+ }
+
+ outPorts += outPort;
+ }
+ REPLACE_WITH_STR(model, "_OP_", outPorts);
+
+ REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
+
std::string impls;
for (const auto& preferType : p.preferTypes) {
if (!impls.empty())
@@ -195,7 +166,7 @@ protected:
std::string model = getModel(p);
InferenceEngine::CNNNetReader net_reader;
- ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+ net_reader.ReadNetwork(model.data(), model.length());
MKLDNNGraphTestClass graph;
graph.CreateGraph(net_reader.getNetwork());
@@ -212,16 +183,25 @@ protected:
}
ASSERT_LE(3, nodes.size());
- InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src->allocate();
fill_data(src->buffer(), src->size());
InferenceEngine::BlobMap srcs;
srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
- InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
+ auto srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
if (srcPtr == nullptr)
FAIL() << "Cannot cast blob to TBlob<float>.";
@@ -229,33 +209,26 @@ protected:
InferenceEngine::OutputsDataMap out;
out = net_reader.getNetwork().getOutputsInfo();
InferenceEngine::BlobMap outputBlobs;
- auto it = out.begin();
-
- std::pair<std::string, InferenceEngine::DataPtr> item = *it;
-
- InferenceEngine::TBlob<float>::Ptr output1;
- output1 = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
- output1->allocate();
- outputBlobs[item.first] = output1;
-
- InferenceEngine::TBlob<float> dst_ref1(item.second->getTensorDesc());
- dst_ref1.allocate();
-
- item = *(++it);
- InferenceEngine::TBlob<float>::Ptr output2;
- output2 = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
- output2->allocate();
- outputBlobs[item.first] = output2;
-
- InferenceEngine::TBlob<float> dst_ref2(item.second->getTensorDesc());
- dst_ref2.allocate();
+ std::vector<InferenceEngine::TBlob<float>> dst_refs;
+ for (auto& item : out) {
+ InferenceEngine::TBlob<float>::Ptr output;
+ output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+ output->allocate();
+ outputBlobs[item.first] = output;
+
+ InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+ dst_ref.allocate();
+ dst_refs.push_back(dst_ref);
+ }
graph.Infer(srcs, outputBlobs);
- ref_split(*srcPtr, dst_ref1, dst_ref2);
+ ref_split(*srcPtr, dst_refs, p);
- compare(*output1, dst_ref1);
- compare(*output2, dst_ref2);
+ int ref_idx = 0;
+ for (auto& output : outputBlobs) {
+ compare(*output.second, dst_refs[ref_idx++], 0.0005f);
+ }
} catch (const InferenceEngine::details::InferenceEngineException &e) {
FAIL() << e.what();
}
@@ -269,9 +242,8 @@ INSTANTIATE_TEST_CASE_P(
::testing::Values(
split_test_params {
{1, 24, 2, 5},
- {1, 16, 2, 5},
- {1, 8, 2, 5},
- 3, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+ {{1, 16, 2, 5}, {1, 8, 2, 5}},
+ 1, 3, MKLDNNPlugin::impl_desc_type::unknown, {}, {
[](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -300,9 +272,8 @@ INSTANTIATE_TEST_CASE_P(
},
split_test_params {
{1, 20, 2, 5},
- {1, 13, 2, 5},
- {1, 7, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+ {{1, 13, 2, 5}, {1, 7, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
[](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -323,9 +294,8 @@ INSTANTIATE_TEST_CASE_P(
},
split_test_params {
{1, 20, 2, 5},
- {1, 10, 2, 5},
- {1, 10, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+ {{1, 10, 2, 5}, {1, 10, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
[](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -346,9 +316,8 @@ INSTANTIATE_TEST_CASE_P(
},
split_test_params {
{2, 20, 2, 5},
- {2, 10, 2, 5},
- {2, 10, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+ {{2, 10, 2, 5}, {2, 10, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
[](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -369,27 +338,76 @@ INSTANTIATE_TEST_CASE_P(
},
split_test_params {
{1, 24, 2, 5},
- {1, 16, 2, 5},
- {1, 8, 2, 5},
- 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ {{1, 16, 2, 5}, {1, 8, 2, 5}},
+ 1, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
},
split_test_params {
{1, 20, 2, 5},
- {1, 13, 2, 5},
- {1, 7, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ {{1, 13, 2, 5}, {1, 7, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
},
split_test_params {
{1, 20, 2, 5},
- {1, 10, 2, 5},
- {1, 10, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ {{1, 10, 2, 5}, {1, 10, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
},
split_test_params {
{2, 20, 2, 5},
- {2, 10, 2, 5},
- {2, 10, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}}));
+ {{2, 10, 2, 5}, {2, 10, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {2, 20, 2, 5},
+ {{2, 15, 2, 5}, {2, 5, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {9, 11, 7, 5},
+ {{3, 11, 7, 5}, {6, 11, 7, 5}},
+ 0, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {3, 11, 7, 5},
+ {{3, 11, 4, 5}, {3, 11, 3, 5}},
+ 2, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {3, 11, 7, 5},
+ {{3, 11, 7, 1}, {3, 11, 7, 4}},
+ 3, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {5, 6, 7, 15},
+ {{1, 6, 7, 15}, {2, 6, 7, 15}, {1, 6, 7, 15}, {1, 6, 7, 15}},
+ 0, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {5, 6, 7, 15},
+ {{5, 1, 7, 15}, {5, 2, 7, 15}, {5, 1, 7, 15}, {5, 2, 7, 15}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {5, 6, 7, 15},
+ {{5, 6, 3, 15}, {5, 6, 1, 15}, {5, 6, 2, 15}, {5, 6, 1, 15}},
+ 2, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {5, 6, 7, 15},
+ {{5, 6, 7, 5}, {5, 6, 7, 3}, {5, 6, 7, 4}, {5, 6, 7, 3}},
+ 3, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {5, 6, 7, 15},
+ {{5, 6, 7, 15}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}},
+ split_test_params {
+ {1, 32, 16, 16, 16},
+ {{1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}},
+ 1, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}},
+ split_test_params {
+ {1, 32, 16, 16, 16},
+ {{1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}, {1, 8, 16, 16, 16}},
+ 1, 3, MKLDNNPlugin::impl_desc_type::unknown, {}}));
class MKLDNNGraphDynBatchSplitTests: public MKLDNNGraphSplitTests {
protected:
@@ -397,7 +415,7 @@ protected:
try {
split_test_params p = ::testing::WithParamInterface<split_test_params>::GetParam();
std::string model = getModel(p);
- size_t MB = p.in.n;
+ size_t MB = p.dims[0];
if (MB < 2)
MB = 2;
@@ -414,9 +432,18 @@ protected:
graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
graph.CreateGraph(net_reader.getNetwork());
- InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
+ InferenceEngine::SizeVector dims_src = p.dims;
+ InferenceEngine::Layout layout = InferenceEngine::ANY;
+ switch (p.dims.size()) {
+ case 4:
+ layout = InferenceEngine::NCHW;
+ break;
+ case 5:
+ layout = InferenceEngine::NCDHW;
+ break;
+ }
- InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
+ InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, layout, dims_src);
src->allocate();
fill_data(src->buffer(), src->size());
@@ -465,9 +492,8 @@ INSTANTIATE_TEST_CASE_P(
::testing::Values(
split_test_params {
{1, 24, 2, 5},
- {1, 16, 2, 5},
- {1, 8, 2, 5},
- 3, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+ {{1, 16, 2, 5}, {1, 8, 2, 5}},
+ 1, 3, MKLDNNPlugin::impl_desc_type::unknown, {}, {
[](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -496,9 +522,8 @@ INSTANTIATE_TEST_CASE_P(
},
split_test_params {
{1, 20, 2, 5},
- {1, 13, 2, 5},
- {1, 7, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+ {{1, 13, 2, 5}, {1, 7, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
[](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -519,9 +544,8 @@ INSTANTIATE_TEST_CASE_P(
},
split_test_params {
{1, 20, 2, 5},
- {1, 10, 2, 5},
- {1, 10, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+ {{1, 10, 2, 5}, {1, 10, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
[](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -542,9 +566,8 @@ INSTANTIATE_TEST_CASE_P(
},
split_test_params {
{2, 20, 2, 5},
- {2, 10, 2, 5},
- {2, 10, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+ {{2, 10, 2, 5}, {2, 10, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
[](MKLDNNPlugin::PrimitiveDescInfo impl) {
ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
ASSERT_EQ(1, impl.getConfig().inConfs.size());
@@ -564,25 +587,51 @@ INSTANTIATE_TEST_CASE_P(
}
},
split_test_params {
- {1, 24, 2, 5},
- {1, 16, 2, 5},
- {1, 8, 2, 5},
- 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ {2, 24, 2, 5},
+ {{2, 16, 2, 5}, {2, 8, 2, 5}},
+ 1, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
},
split_test_params {
{1, 20, 2, 5},
- {1, 13, 2, 5},
- {1, 7, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ {{1, 13, 2, 5}, {1, 7, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
},
split_test_params {
{1, 20, 2, 5},
- {1, 10, 2, 5},
- {1, 10, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ {{1, 10, 2, 5}, {1, 10, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
},
split_test_params {
{2, 20, 2, 5},
- {2, 10, 2, 5},
- {2, 10, 2, 5},
- 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}}));
+ {{2, 10, 2, 5}, {2, 10, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {2, 20, 2, 5},
+ {{2, 15, 2, 5}, {2, 5, 2, 5}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {3, 11, 7, 5},
+ {{3, 11, 4, 5}, {3, 11, 3, 5}},
+ 2, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {3, 11, 7, 5},
+ {{3, 11, 7, 1}, {3, 11, 7, 4}},
+ 3, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {5, 6, 7, 15},
+ {{5, 1, 7, 15}, {5, 2, 7, 15}, {5, 1, 7, 15}, {5, 2, 7, 15}},
+ 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {5, 6, 7, 15},
+ {{5, 6, 3, 15}, {5, 6, 1, 15}, {5, 6, 2, 15}, {5, 6, 1, 15}},
+ 2, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+ },
+ split_test_params {
+ {5, 6, 7, 15},
+ {{5, 6, 7, 5}, {5, 6, 7, 3}, {5, 6, 7, 4}, {5, 6, 7, 3}},
+ 3, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}}));
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp
index cc8d3c473..4bb207dde 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_conv_depthwise_fusing_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_conv_depthwise_fusing_test.cpp
index fb9e0dbc1..2974b3784 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_conv_depthwise_fusing_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_conv_depthwise_fusing_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_dw_conv_fusing_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_dw_conv_fusing_test.cpp
index f30b2899d..bc653a11e 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_dw_conv_fusing_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_dw_conv_fusing_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_optimization_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_optimization_test.cpp
index fc890e801..72c0c8ed9 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_optimization_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_optimization_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp b/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp
index 1d3780f7e..52bcb45af 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -511,7 +510,6 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeConcat) {
compare(*output, *dstOut);
// Compare for batch2
- graph = {};
net_reader.getNetwork().setBatchSize(2);
graph.CreateGraph(net_reader.getNetwork());
desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 3, 7, 7}, InferenceEngine::NCHW);
@@ -812,7 +810,8 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeDWConvolution) {
compare(*output, *dstOut);
}
-TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeDWDeconvolution) {
+// TODO change hardcoded reference to dynamically generated
+TEST_F(MKLDNNGraphStructureTests, DISABLED_TestNoRedundantReordersBeforeDWDeconvolution) {
std::string model = R"V0G0N(
<net name="deconv" version="2" batch="1">
<layers>
@@ -944,7 +943,7 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeDWDeconvolution)
outputBlobs["deconv2"] = output2;
graph.Infer(srcs, outputBlobs);
-
+
std::vector<float> refDst1 = {-0.042f, -0.563f, -0.150f, 0.396f, 0.224f, 0.229f, -0.335f, -0.390f, -0.213f, 0.959f, 0.520f, -0.507f,
-0.200f, -0.202f, 0.441f, 0.499f, 0.000f, 0.000f, 0.000f, 0.000f, 0.363f, 0.141f, -0.497f, -0.332f, -0.311f,
0.423f, 0.693f, -0.012f, -0.328f, -0.106f, 0.518f, 0.353f, 0.000f, 0.000f, 0.000f, 0.000f, 0.050f, -0.352f,
@@ -1238,7 +1237,7 @@ TEST_F(MKLDNNGraphStructureTests, TestOutputAfterInplacePlusConcat) {
}
TEST_F(MKLDNNGraphStructureTests, TestResnetPart) {
- std::string model = R"V0G0N(
+ std::string modelB = R"V0G0N(
<net name="ResNet-152" version="2" batch="1">
<layers>
<layer name="input" type="Input" precision="FP32" id="0">
@@ -1531,7 +1530,8 @@ TEST_F(MKLDNNGraphStructureTests, TestResnetPart) {
</output>
<weights offset="401152" size="147456"/>
<biases offset="548608" size="256"/>
- </layer>
+ </layer> )V0G0N";
+ std::string modelE =R"V0G0N(
<layer name="res2b_branch2b_relu" type="ReLU" precision="FP32" id="29">
<input>
<port id="58">
@@ -1706,6 +1706,7 @@ TEST_F(MKLDNNGraphStructureTests, TestResnetPart) {
</net>
)V0G0N";
+ std::string model = modelB + modelE;
InferenceEngine::CNNNetReader net_reader;
ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
@@ -2268,7 +2269,6 @@ TEST_F(MKLDNNGraphStructureTests, TestResultsAfterGroupedConvWithStrides) {
graph.Infer(srcs, outputBlobs);
// Compare for batch2
- graph = {};
net_reader.getNetwork().setBatchSize(2);
graph.CreateGraph(net_reader.getNetwork());
desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 24, 80, 80}, InferenceEngine::NCHW);
@@ -3277,7 +3277,6 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedPartDPN92) {
}
// Compare for batch2
- graph = {};
net_reader.getNetwork().setBatchSize(2);
graph.CreateGraph(net_reader.getNetwork());
desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 32, 14, 14}, InferenceEngine::NCHW);
@@ -4081,7 +4080,6 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedPartPlateRecognitionBarrier0001) {
}
// Compare for batch2
- graph = {};
net_reader.getNetwork().setBatchSize(2);
graph.CreateGraph(net_reader.getNetwork());
desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 128, 1, 88}, InferenceEngine::NCHW);
@@ -4334,10 +4332,10 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedVNect0002) {
auto& nodes = graph.getNodes();
for (auto &node : nodes) {
if ( node->getType() == MKLDNNPlugin::Output &&
- (node->getName() == "out_slice_heatmaps.1" ||
+ (node->getName() == "out_slice_heatmaps.0" ||
+ node->getName() == "out_slice_heatmaps.1" ||
node->getName() == "out_slice_heatmaps.2" ||
- node->getName() == "out_slice_heatmaps.3" ||
- node->getName() == "out_slice_heatmaps.4" ) ) {
+ node->getName() == "out_slice_heatmaps.3" ) ) {
outputs_num++;
}
}
@@ -4812,9 +4810,9 @@ TEST_F(MKLDNNGraphStructureTests, TestConstantLayerAsOutput) {
net_reader.SetWeights(weights_ptr);
- std::shared_ptr<InferenceEngine::IExtension> cpuExt(new InferenceEngine::Extensions::Cpu::CpuExtensions());
+ InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
- extMgr->AddExtension(cpuExt);
+ extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
MKLDNNGraphTestClass graph;
graph.CreateGraph(net_reader.getNetwork(), extMgr);
@@ -6081,8 +6079,8 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithSplit) {
ASSERT_EQ(nodes[4].get()->getType(), MKLDNNPlugin::Type::Output);
InferenceEngine::OutputsDataMap outputs = reader.getNetwork().getOutputsInfo();
- const std::pair<std::string, InferenceEngine::DataPtr> splitOutputItem1 = std::make_pair("Split.1", outputs["Split.1"]);
- const std::pair<std::string, InferenceEngine::DataPtr> splitOutputItem2 = std::make_pair("Split.2", outputs["Split.2"]);
+ const std::pair<std::string, InferenceEngine::DataPtr> splitOutputItem1 {"Split.0", outputs["Split.0"]};
+ const std::pair<std::string, InferenceEngine::DataPtr> splitOutputItem2 {"Split.1", outputs["Split.1"]};
std::vector<float> splitExpectedOutputData1(batchSize);
std::vector<float> splitExpectedOutputData2(batchSize);
@@ -6219,7 +6217,7 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithFakeOutput) {
InferenceEngine::OutputsDataMap outputs = reader.getNetwork().getOutputsInfo();
const std::pair<std::string, InferenceEngine::DataPtr> reshapeOutputItem = std::make_pair("Reshape", outputs["Reshape"]);
- const std::string splitOutputName = std::string("Split.") + (splitFromPortNumber == 1 ? "2" : "1");
+ const std::string splitOutputName = std::string("Split.") + (splitFromPortNumber == 1 ? "1" : "0");
const std::pair<std::string, InferenceEngine::DataPtr> splitOutputItem = std::make_pair(splitOutputName, outputs[splitOutputName]);
std::vector<float> reshapeExpectedOutputData(batchSize);
@@ -6399,9 +6397,9 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithMultipleData) {
ASSERT_EQ(nodes[3].get()->getType(), MKLDNNPlugin::Type::Reshape);
ASSERT_EQ(nodes[4].get()->getType(), MKLDNNPlugin::Type::Output);
ASSERT_EQ(nodes[5].get()->getType(), MKLDNNPlugin::Type::Reorder);
- ASSERT_EQ(nodes[6].get()->getType(), MKLDNNPlugin::Type::Output);
- ASSERT_EQ(nodes[7].get()->getType(), MKLDNNPlugin::Type::Reorder);
- ASSERT_EQ(nodes[8].get()->getType(), MKLDNNPlugin::Type::Reshape);
+ ASSERT_EQ(nodes[6].get()->getType(), MKLDNNPlugin::Type::Reshape);
+ ASSERT_EQ(nodes[7].get()->getType(), MKLDNNPlugin::Type::Output);
+ ASSERT_EQ(nodes[8].get()->getType(), MKLDNNPlugin::Type::Reorder);
ASSERT_EQ(nodes[9].get()->getType(), MKLDNNPlugin::Type::Output);
ASSERT_EQ(nodes[10].get()->getType(), MKLDNNPlugin::Type::Reshape);
ASSERT_EQ(nodes[11].get()->getType(), MKLDNNPlugin::Type::Output);
@@ -6411,7 +6409,7 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithMultipleData) {
std::make_pair("reshape1", outputs.find("reshape1")->second),
std::make_pair("reshape2", outputs.find("reshape2")->second),
std::make_pair("reshape3", outputs.find("reshape3")->second),
- std::make_pair("split.1", outputs.find("split.1")->second)
+ std::make_pair("split.0", outputs.find("split.0")->second)
};
std::vector<std::vector<float>> expectedOutputData = {
diff --git a/inference-engine/tests/unit/engines/mkldnn/graph/test_graph.hpp b/inference-engine/tests/unit/engines/mkldnn/graph/test_graph.hpp
index 620be63cc..b0d7bfb83 100644
--- a/inference-engine/tests/unit/engines/mkldnn/graph/test_graph.hpp
+++ b/inference-engine/tests/unit/engines/mkldnn/graph/test_graph.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -101,26 +100,81 @@ public:
// need to retain converted blobs until infer finish
std::vector<InferenceEngine::Blob::Ptr> convertedInputs;
for (auto input : inputs) {
- InferenceEngine::TBlob<float> *in_f = nullptr;
switch (input.second->precision()) {
- case InferenceEngine::Precision::FP32:
+ case InferenceEngine::Precision::FP32: {
+ InferenceEngine::TBlob<float> *in_f = nullptr;
in_f = dynamic_cast<InferenceEngine::TBlob<float> *>(input.second.get());
- break;
- default:
- THROW_IE_EXCEPTION << "Unsupported input precision " << input.second->precision();
- }
+ if (in_f == nullptr) {
+ FAIL() << "Input data precision not supported. Expected float.";
+ }
- switch (input.second->precision()) {
- case InferenceEngine::Precision::FP32: break;
- default: FAIL() << "Unsupported precision";
- }
+ if (in_f->readOnly() == nullptr) {
+ THROW_IE_EXCEPTION << "Input data was not allocated.";
+ }
+ }
+ break;
+ case InferenceEngine::Precision::I32: {
+ InferenceEngine::TBlob<int32_t> *in_f = nullptr;
+ in_f = dynamic_cast<InferenceEngine::TBlob<int32_t> *>(input.second.get());
+ if (in_f == nullptr) {
+ FAIL() << "Input data precision not supported. Expected float.";
+ }
- if (in_f == nullptr) {
- FAIL() << "Input data precision not supported. Expected float.";
- }
+ if (in_f->readOnly() == nullptr) {
+ THROW_IE_EXCEPTION << "Input data was not allocated.";
+ }
+ }
+ break;
+ case InferenceEngine::Precision::U16: {
+ InferenceEngine::TBlob<uint16_t> *in_f = nullptr;
+ in_f = dynamic_cast<InferenceEngine::TBlob<uint16_t> *>(input.second.get());
+ if (in_f == nullptr) {
+ FAIL() << "Input data precision not supported. Expected float.";
+ }
+
+ if (in_f->readOnly() == nullptr) {
+ THROW_IE_EXCEPTION << "Input data was not allocated.";
+ }
+ }
+ break;
+ case InferenceEngine::Precision::I16: {
+ InferenceEngine::TBlob<int16_t> *in_f = nullptr;
+ in_f = dynamic_cast<InferenceEngine::TBlob<int16_t> *>(input.second.get());
+ if (in_f == nullptr) {
+ FAIL() << "Input data precision not supported. Expected float.";
+ }
+
+ if (in_f->readOnly() == nullptr) {
+ THROW_IE_EXCEPTION << "Input data was not allocated.";
+ }
+ }
+ break;
+ case InferenceEngine::Precision::U8: {
+ InferenceEngine::TBlob<uint8_t> *in_f = nullptr;
+ in_f = dynamic_cast<InferenceEngine::TBlob<uint8_t> *>(input.second.get());
+ if (in_f == nullptr) {
+ FAIL() << "Input data precision not supported. Expected float.";
+ }
- if (in_f->readOnly() == nullptr) {
- THROW_IE_EXCEPTION << "Input data was not allocated.";
+ if (in_f->readOnly() == nullptr) {
+ THROW_IE_EXCEPTION << "Input data was not allocated.";
+ }
+ }
+ break;
+ case InferenceEngine::Precision::I8: {
+ InferenceEngine::TBlob<int8_t> *in_f = nullptr;
+ in_f = dynamic_cast<InferenceEngine::TBlob<int8_t> *>(input.second.get());
+ if (in_f == nullptr) {
+ FAIL() << "Input data precision not supported. Expected float.";
+ }
+
+ if (in_f->readOnly() == nullptr) {
+ THROW_IE_EXCEPTION << "Input data was not allocated.";
+ }
+ }
+ break;
+ default:
+ THROW_IE_EXCEPTION << "Unsupported input precision " << input.second->precision();
}
PushInputData(input.first, input.second, batch);
@@ -207,4 +261,4 @@ public:
}
}
}
-}; \ No newline at end of file
+};
diff --git a/inference-engine/tests/unit/engines/mkldnn/mkldnn_primitive_test.cpp b/inference-engine/tests/unit/engines/mkldnn/mkldnn_primitive_test.cpp
index 331ee4847..fd517de87 100644
--- a/inference-engine/tests/unit/engines/mkldnn/mkldnn_primitive_test.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/mkldnn_primitive_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/engines/mkldnn/test_layers.cpp b/inference-engine/tests/unit/engines/mkldnn/test_layers.cpp
index 3d9383224..7db41747c 100644
--- a/inference-engine/tests/unit/engines/mkldnn/test_layers.cpp
+++ b/inference-engine/tests/unit/engines/mkldnn/test_layers.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/graph_tools/graph_copy_tests.cpp b/inference-engine/tests/unit/graph_tools/graph_copy_tests.cpp
index 631f68f44..2971eb7a3 100644
--- a/inference-engine/tests/unit/graph_tools/graph_copy_tests.cpp
+++ b/inference-engine/tests/unit/graph_tools/graph_copy_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -304,57 +303,6 @@ TEST(CNNSpecificGraphCopyTests, copyPreprocess) {
ASSERT_FLOAT_EQ(pp[2]->meanValue, 122);
}
-
-TEST(CNNSpecificGraphCopyTests, copyV1IR) {
- CNNNetReader netReader;
- //define minimal network with Clamp layer
- const std::string SINGLE_LAYER_MODEL = R"V0G0N(
- <net name="SingleLayer" version="1" batch="1">
- <input>
- <dim>3</dim>
- <dim>224</dim>
- <dim>224</dim>
- </input>
- <layers>
- <layer id="1" name="ClampLayer" precision="FP16" type="Clamp">
- <data max="6" min="0"/>
- <input>
- <port id="0">
- <dim>3</dim>
- <dim>224</dim>
- <dim>224</dim>
- </port>
- </input>
- <output>
- <port id="1">
- <dim>3</dim>
- <dim>224</dim>
- <dim>224</dim>
- </port>
- </output>
- </layer>
- </layers>
- <edges>
- </edges>
- </net>
- )V0G0N";
- ASSERT_NO_THROW(netReader.ReadNetwork(SINGLE_LAYER_MODEL.data(), SINGLE_LAYER_MODEL.length()));
- ASSERT_TRUE(netReader.isParseSuccess());
- auto network = netReader.getNetwork();
-
- //copy the network
- struct EmptyStruct {};
- auto visitor = [&](CNNLayerPtr lp) { return injectData<EmptyStruct>(lp); };
- auto copied_net_ptr = CNNNetCopy(network, visitor);
- auto copied_net = CNNNetwork(copied_net_ptr.get());
-
- //check that Clamp layer was properly copied
- auto layer = std::dynamic_pointer_cast<ClampLayer>(copied_net.getLayerByName("ClampLayer"));
- ASSERT_NE(layer, nullptr) << "Could not perform dynamic cast from base pointer to Clamp layer pointer. "
- "Net copy could be incorrect.";
-}
-
-
TEST(CNNSpecificGraphCopyTests, copyNetworkWithDeconvolution) {
CNNNetReader netReader;
//define minimal network with deconvolution layer
diff --git a/inference-engine/tests/unit/graph_tools/graph_test_base.hpp b/inference-engine/tests/unit/graph_tools/graph_test_base.hpp
index dc1b40e84..94c0876a6 100644
--- a/inference-engine/tests/unit/graph_tools/graph_test_base.hpp
+++ b/inference-engine/tests/unit/graph_tools/graph_test_base.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/graph_tools/graph_tools_test.cpp b/inference-engine/tests/unit/graph_tools/graph_tools_test.cpp
index 306adb2b6..94e9c516a 100644
--- a/inference-engine/tests/unit/graph_tools/graph_tools_test.cpp
+++ b/inference-engine/tests/unit/graph_tools/graph_tools_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/alocator_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/alocator_tests.cpp
index 6fc2b8eb1..178f11676 100644
--- a/inference-engine/tests/unit/inference_engine_tests/alocator_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/alocator_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/blob_proxy_test.cpp b/inference-engine/tests/unit/inference_engine_tests/blob_proxy_test.cpp
index 427098bd8..9de222c50 100644
--- a/inference-engine/tests/unit/inference_engine_tests/blob_proxy_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/blob_proxy_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -47,23 +46,14 @@ TEST_F(BlobProxyTests, convertByteBlobToFloat) {
}
}
-TEST_F(BlobProxyTests, shouldThrowOnAllocate) {
- SizeVector v = {1, 2, 3};
- auto allocator = createMockAllocator();
-
- TBlobProxy<float> proxy(Precision::FP32, C, TBlob<float>(Precision::FP32, CHW, v, dynamic_pointer_cast<IAllocator>(allocator)), 2, {2});
-
- EXPECT_THROW(((Blob&)proxy).allocate(), InferenceEngineException);
-}
-
-TEST_F(BlobProxyTests, shouldThrowOnDeAllocate)
+TEST_F(BlobProxyTests, shouldNotDeAllocate)
{
SizeVector v = {1, 2, 3};
auto allocator = createMockAllocator();
TBlobProxy<float> proxy(Precision::FP32, C, TBlob<float>(Precision::FP32, CHW, v, dynamic_pointer_cast<IAllocator>(allocator)), 2, {2});
- EXPECT_THROW(((Blob&)proxy).deallocate(), InferenceEngineException);
+ EXPECT_EQ(((Blob&)proxy).deallocate(), false);
}
@@ -236,15 +226,6 @@ TEST_F(BlobProxyTests, canReturnConstantData) {
ASSERT_NE(proxy.cbuffer().as<const void*>(), nullptr);
}
-TEST_F(BlobProxyTests, noAllocDeallocLogic) {
- TBlob<float>::Ptr b(new TBlob<float>(Precision::FP32, C));
- b->set({ 1.0f, 2.0f, 3.0f });
- TBlobProxy<uint8_t> proxy(Precision::U8, C, b, 0, { b->byteSize() });
- ASSERT_ANY_THROW(((Blob*) &proxy)->allocate());
- ASSERT_ANY_THROW(((Blob*) &proxy)->deallocate());
-}
-
-
TEST_F(BlobProxyTests, canIterateOverData) {
TBlob<uint8_t>::Ptr b(new TBlob<uint8_t >(Precision::FP32, C));
b->set({ 1, 2, 3 });
diff --git a/inference-engine/tests/unit/inference_engine_tests/blob_test.cpp b/inference-engine/tests/unit/inference_engine_tests/blob_test.cpp
index 59015c265..e104c4cd1 100644
--- a/inference-engine/tests/unit/inference_engine_tests/blob_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/blob_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -35,7 +34,6 @@ protected:
return shared_ptr<MockAllocator>(new MockAllocator());
}
-
public:
};
@@ -489,4 +487,4 @@ TEST_F(BlobTests, makeRoiBlobWrongSize) {
// try to create ROI blob with wrong size
ROI roi = {0, 1, 1, 4, 4}; // cropped picture with: id = 0, (x,y) = (1,1), sizeX (W) = 4, sizeY (H) = 4
ASSERT_THROW(make_shared_blob(blob, roi), InferenceEngine::details::InferenceEngineException);
-} \ No newline at end of file
+}
diff --git a/inference-engine/tests/unit/inference_engine_tests/caslesseq_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/caslesseq_tests.cpp
index 98037672a..b0c23e583 100644
--- a/inference-engine/tests/unit/inference_engine_tests/caslesseq_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/caslesseq_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cnn_network_test.cpp b/inference-engine/tests/unit/inference_engine_tests/cnn_network_test.cpp
index 69bfdb194..4a4b3d4c6 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cnn_network_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cnn_network_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_base_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_base_tests.cpp
index beed3e5c4..425b06276 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_base_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_base_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_tests.cpp
index 00fe58f38..b1c93683e 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_thread_safe_default_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_thread_safe_default_tests.cpp
index 5ac02ae19..594ee1912 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_thread_safe_default_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_thread_safe_default_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -170,9 +169,9 @@ TEST_F(InferRequestThreadSafeDefaultTests, callbackTakesOKIfAsyncRequestWasOK) {
testRequest), [](IInferRequest *p) { p->Release(); });
testRequest->SetPointerToPublicInterface(asyncRequest);
- testRequest->SetCompletionCallback({[](InferenceEngine::IInferRequest::Ptr request, StatusCode status) {
+ testRequest->SetCompletionCallback([](InferenceEngine::IInferRequest::Ptr request, StatusCode status) {
ASSERT_EQ((int) StatusCode::OK, status);
- }});
+ });
EXPECT_CALL(*mockInferRequestInternal.get(), InferImpl()).Times(1);
testRequest->StartAsync();
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_thread_safe_internal.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_thread_safe_internal.cpp
index 285938e72..49cdadc5c 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_thread_safe_internal.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/async_infer_request_thread_safe_internal.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/callback_manager_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/callback_manager_tests.cpp
index 0d20efd84..7d1013793 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/callback_manager_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/callback_manager_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_base_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_base_tests.cpp
index 320d3df37..f4c472e72 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_base_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_base_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_tests.cpp
index d31ca1792..399ec7a52 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_thread_safe_async_only_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_thread_safe_async_only_tests.cpp
index 76c44fe0b..2542017f5 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_thread_safe_async_only_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_thread_safe_async_only_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_thread_safe_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_thread_safe_tests.cpp
index 23544e611..3c09801ec 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_thread_safe_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executable_network_thread_safe_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executor_manager_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executor_manager_tests.cpp
index 909380aa4..450bcd3ec 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executor_manager_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/executor_manager_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/iinference_plugin_internal_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/iinference_plugin_internal_tests.cpp
index 701085c8b..a76857b46 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/iinference_plugin_internal_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/iinference_plugin_internal_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/memory_state_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/memory_state_tests.cpp
index 33a431f6a..799f0bd76 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/memory_state_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/memory_state_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/plugin_base_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/plugin_base_tests.cpp
index 6779fea56..3df6a6021 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/plugin_base_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/plugin_base_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_common_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_common_tests.cpp
index b535cc97e..e0918ab8b 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_common_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_common_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_executor_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_executor_tests.cpp
index b9d4e2602..0cbc51634 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_executor_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_executor_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_synchronizer_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_synchronizer_tests.cpp
index f94b7b890..47b1ef213 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_synchronizer_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_synchronizer_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_tests.cpp
index a2836e1b0..792e134a8 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_tests_utils.hpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_tests_utils.hpp
index 6523c2631..5f4238f6d 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_tests_utils.hpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_tests_utils.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_with_stages_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_with_stages_tests.cpp
index 38e4be252..6f665e651 100644
--- a/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_with_stages_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/cpp_interfaces/task_with_stages_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/data_test.cpp b/inference-engine/tests/unit/inference_engine_tests/data_test.cpp
index 263859b0f..883986141 100644
--- a/inference-engine/tests/unit/inference_engine_tests/data_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/data_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -93,3 +92,21 @@ TEST_F(DataTests, canSetNotEmptyDimsForBlockingDescBlocked) {
TEST_F(DataTests, canSetNotEmptyDimsForBlockingDescNCHW) {
ASSERT_NO_THROW(BlockingDesc(notEmptyDims, NCHW));
}
+
+TEST_F(DataTests, setPrecision) {
+ Data data(data_name, emptyDims, Precision::FP32, Layout::NCHW);
+
+ EXPECT_EQ(Precision::FP32, data.precision);
+ EXPECT_EQ(Precision::FP32, data.getPrecision());
+ EXPECT_EQ(Precision::FP32, data.getTensorDesc().getPrecision());
+
+ data.setPrecision(Precision::FP16);
+ EXPECT_EQ(Precision::FP16, data.precision);
+ EXPECT_EQ(Precision::FP16, data.getPrecision());
+ EXPECT_EQ(Precision::FP16, data.getTensorDesc().getPrecision());
+
+ data.precision = Precision::Q78;
+ EXPECT_EQ(Precision::Q78, data.precision);
+ EXPECT_EQ(Precision::Q78, data.getPrecision());
+ EXPECT_EQ(Precision::Q78, data.getTensorDesc().getPrecision());
+}
diff --git a/inference-engine/tests/unit/inference_engine_tests/debug_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/debug_tests.cpp
index 7833b9025..5acaeabe3 100644
--- a/inference-engine/tests/unit/inference_engine_tests/debug_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/debug_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/device_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/device_tests.cpp
index 13f4a8a6d..c83d89aae 100644
--- a/inference-engine/tests/unit/inference_engine_tests/device_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/device_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/exception_test.cpp b/inference-engine/tests/unit/inference_engine_tests/exception_test.cpp
index 8dff71f14..fc93d4881 100644
--- a/inference-engine/tests/unit/inference_engine_tests/exception_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/exception_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/inference_engine_plugin_test.cpp b/inference-engine/tests/unit/inference_engine_tests/inference_engine_plugin_test.cpp
index baa5e8cff..a23b74c55 100644
--- a/inference-engine/tests/unit/inference_engine_tests/inference_engine_plugin_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/inference_engine_plugin_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/inference_engine_test.cpp b/inference-engine/tests/unit/inference_engine_tests/inference_engine_test.cpp
index a0ea44f03..ab307cf40 100644
--- a/inference-engine/tests/unit/inference_engine_tests/inference_engine_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/inference_engine_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/layer_transform_test.cpp b/inference-engine/tests/unit/inference_engine_tests/layer_transform_test.cpp
index a62750224..fcb5875da 100644
--- a/inference-engine/tests/unit/inference_engine_tests/layer_transform_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/layer_transform_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/layers_test.cpp b/inference-engine/tests/unit/inference_engine_tests/layers_test.cpp
index 9a39c1ebf..6d18b6422 100644
--- a/inference-engine/tests/unit/inference_engine_tests/layers_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/layers_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -353,33 +352,56 @@ TEST_F(LayersTests, poolIRv2BackwardCompatibility) {
pool._stride[Y_AXIS] = 3u;
ASSERT_EQ(pool._stride_y, 3u);
}
+
TEST_F(LayersTests, canGetPadBeginForConvolution) {
ConvolutionLayer layer(getDefaultParamsForLayer());
+ PropertyVector<unsigned> ref{{1, 2}};
+ layer._padding = ref;
+
+ auto allPads = getPaddings(layer);
+
+ ASSERT_EQ(allPads.begin, ref);
+}
+
+TEST_F(LayersTests, canGetPadEndForConvolution) {
+ ConvolutionLayer layer(getDefaultParamsForLayer());
+ PropertyVector<unsigned> ref{{1, 2}};
+ layer._pads_end = ref;
+
+ auto allPads = getPaddings(layer);
+
+ ASSERT_EQ(allPads.end, ref);
+}
+
+TEST_F(LayersTests, canGetPad3DBeginForConvolution) {
+ ConvolutionLayer layer(getDefaultParamsForLayer());
PropertyVector<unsigned> ref;
ref.insert(X_AXIS, 1);
ref.insert(Y_AXIS, 2);
+ ref.insert(Z_AXIS, 3);
layer._padding = ref;
- auto allPads = getConvPaddings(layer);
+ auto allPads = getPaddings(layer);
ASSERT_EQ(allPads.begin, ref);
}
-TEST_F(LayersTests, canGetPadEndForConvolution) {
+TEST_F(LayersTests, canGetPad3DEndForConvolution) {
ConvolutionLayer layer(getDefaultParamsForLayer());
PropertyVector<unsigned> ref;
ref.insert(X_AXIS, 1);
ref.insert(Y_AXIS, 2);
+ ref.insert(Z_AXIS, 3);
layer._pads_end = ref;
- auto allPads = getConvPaddings(layer);
+ auto allPads = getPaddings(layer);
ASSERT_EQ(allPads.end, ref);
}
TEST_F(LayersTests, returnDefaultPadForEmptyConvolution) {
ConvolutionLayer layer(getDefaultParamsForLayer());
- auto allPads = getConvPaddings(layer);
+ auto allPads = getPaddings(layer);
PropertyVector<unsigned> ref_begin(2, 0u);
PropertyVector<unsigned> ref_end;
ASSERT_EQ(allPads.begin, ref_begin);
@@ -389,16 +411,21 @@ TEST_F(LayersTests, returnDefaultPadForEmptyConvolution) {
TEST_F(LayersTests, returnEmptyPadForValidPadConvolution) {
ConvolutionLayer layer(getDefaultParamsForLayer());
layer.params["auto_pad"] = "valid";
- auto allPads = getConvPaddings(layer);
- PropertyVector<unsigned> ref(2,0);
+ auto allPads = getPaddings(layer);
+ PropertyVector<unsigned> ref(2,0u);
ASSERT_EQ(allPads.begin, ref);
ASSERT_EQ(allPads.end, ref);
+
+ PropertyVector<unsigned> ref3D(2,0u);
+ layer._kernel.insert(Z_AXIS, 0u);
+ ASSERT_EQ(allPads.begin, ref3D);
+ ASSERT_EQ(allPads.end, ref3D);
}
TEST_F(LayersTests, throwOnSamePadForEmptyConvolution) {
ConvolutionLayer layer(getDefaultParamsForLayer());
layer.params["auto_pad"] = "same_upper";
- ASSERT_THROW(getConvPaddings(layer), details::InferenceEngineException);
+ ASSERT_THROW(getPaddings(layer), details::InferenceEngineException);
}
TEST_F(LayersTests, throwOnInvalidDimsSamePadForConvolution) {
@@ -406,7 +433,7 @@ TEST_F(LayersTests, throwOnInvalidDimsSamePadForConvolution) {
layer.params["auto_pad"] = "same_upper";
auto emptyData = std::make_shared<InferenceEngine::Data>("", Precision::UNSPECIFIED);
layer.insData.push_back(emptyData);
- ASSERT_THROW(getConvPaddings(layer), details::InferenceEngineException);
+ ASSERT_THROW(getPaddings(layer), details::InferenceEngineException);
}
TEST_F(LayersTests, throwOn2DSamePadForConvolution) {
@@ -415,7 +442,7 @@ TEST_F(LayersTests, throwOn2DSamePadForConvolution) {
auto notEmptyData = std::make_shared<InferenceEngine::Data>("", SizeVector{1, 1}, Precision::UNSPECIFIED,
Layout::NC);
layer.insData.push_back(notEmptyData);
- ASSERT_THROW(getConvPaddings(layer), details::InferenceEngineException);
+ ASSERT_THROW(getPaddings(layer), details::InferenceEngineException);
}
TEST_F(LayersTests, throwWithNotEnoughParamsSamePadForConvolution) {
@@ -423,7 +450,12 @@ TEST_F(LayersTests, throwWithNotEnoughParamsSamePadForConvolution) {
layer.params["auto_pad"] = "same_upper";
auto notEmptyData = std::make_shared<InferenceEngine::Data>("", SizeVector{1, 2, 3, 4}, Precision::UNSPECIFIED);
layer.insData.push_back(notEmptyData);
- ASSERT_NO_THROW(getConvPaddings(layer));
+ ASSERT_NO_THROW(getPaddings(layer));
+
+ auto notEmptyData3D = std::make_shared<InferenceEngine::Data>("", SizeVector{1, 2, 3, 4, 5}, Precision::UNSPECIFIED, Layout::NCDHW);
+ layer._kernel.insert(Z_AXIS, 0u);
+ layer.insData[0] = notEmptyData3D;
+ ASSERT_NO_THROW(getPaddings(layer));
}
// parameters are from real model, like Mobilenet-SSD
@@ -433,19 +465,39 @@ TEST_F(LayersTests, canGetSamePadForConvolutionEvenInput) {
TensorDesc tensorDesc(Precision::UNSPECIFIED, SizeVector{1, 144, 160, 160}, Layout::NCHW);
auto notEmptyData = std::make_shared<InferenceEngine::Data>("", tensorDesc);
layer.insData.push_back(notEmptyData);
- layer._dilation.insert(X_AXIS, 1);
- layer._dilation.insert(Y_AXIS, 1);
- layer._kernel.insert(X_AXIS, 3);
- layer._kernel.insert(Y_AXIS, 3);
- layer._stride.insert(X_AXIS, 2);
- layer._stride.insert(Y_AXIS, 2);
+ layer._dilation = PropertyVector<unsigned>{{1, 1}};
+ layer._kernel = PropertyVector<unsigned>{{3, 3}};
+ layer._stride = PropertyVector<unsigned>{{2, 2}};
- auto pad = getConvPaddings(layer);
+ auto pad = getPaddings(layer);
ASSERT_EQ(pad.begin, PropertyVector<unsigned>(2, 0));
ASSERT_EQ(pad.end, PropertyVector<unsigned>(2, 1));
}
+// parameters are from real model, like V-Net
+TEST_F(LayersTests, canGetSamePadForConvolutionEvenInput3D) {
+ ConvolutionLayer layer(getDefaultParamsForLayer());
+ layer.params["auto_pad"] = "same_upper";
+ TensorDesc tensorDesc(Precision::UNSPECIFIED, SizeVector{1, 6, 190, 190, 20}, Layout::NCDHW);
+ auto notEmptyData = std::make_shared<InferenceEngine::Data>("", tensorDesc);
+ layer.insData.push_back(notEmptyData);
+ layer._dilation.insert(X_AXIS, 1u);
+ layer._dilation.insert(Y_AXIS, 1u);
+ layer._dilation.insert(Z_AXIS, 1u);
+ layer._kernel.insert(X_AXIS, 5u);
+ layer._kernel.insert(Y_AXIS, 5u);
+ layer._kernel.insert(Z_AXIS, 5u);
+ layer._stride.insert(X_AXIS, 1u);
+ layer._stride.insert(Y_AXIS, 1u);
+ layer._stride.insert(Z_AXIS, 1u);
+
+ auto pad = getPaddings(layer);
+
+ ASSERT_EQ(pad.begin, PropertyVector<unsigned>(3, 2u));
+ ASSERT_EQ(pad.end, PropertyVector<unsigned>(3, 2u));
+}
+
// parameters are from real model, like Mobilenet-SSD
TEST_F(LayersTests, canGetSamePadForConvolutionOddInput) {
ConvolutionLayer layer(getDefaultParamsForLayer());
@@ -453,16 +505,83 @@ TEST_F(LayersTests, canGetSamePadForConvolutionOddInput) {
TensorDesc tensorDesc(Precision::UNSPECIFIED, SizeVector{1, 144, 75, 75}, Layout::NCHW);
auto notEmptyData = std::make_shared<InferenceEngine::Data>("", tensorDesc);
layer.insData.push_back(notEmptyData);
- layer._dilation.insert(X_AXIS, 1);
- layer._dilation.insert(Y_AXIS, 1);
- layer._kernel.insert(X_AXIS, 3);
- layer._kernel.insert(Y_AXIS, 3);
- layer._stride.insert(X_AXIS, 2);
- layer._stride.insert(Y_AXIS, 2);
+ layer._dilation = PropertyVector<unsigned>{{1, 1}};
+ layer._kernel = PropertyVector<unsigned>{{3, 3}};
+ layer._stride = PropertyVector<unsigned>{{2, 2}};
+ PropertyVector<unsigned> ref(2, 1);
+
+ auto pad = getPaddings(layer);
+
+ ASSERT_EQ(pad.begin, ref);
+ ASSERT_EQ(pad.end, ref);
+}
+
+TEST_F(LayersTests, canGetSamePadForDeConvolutionEvenInput) {
+ DeconvolutionLayer layer(getDefaultParamsForLayer());
+ layer.params["auto_pad"] = "same_upper";
+ TensorDesc tensorDesc(Precision::UNSPECIFIED, SizeVector{1, 144, 160, 160}, Layout::NCHW);
+ auto notEmptyData = std::make_shared<InferenceEngine::Data>("", tensorDesc);
+ layer.insData.push_back(notEmptyData);
+ layer._dilation = PropertyVector<unsigned>{{1, 1}};
+ layer._kernel = PropertyVector<unsigned>{{3, 3}};
+ layer._stride = PropertyVector<unsigned>{{2, 2}};
+
+ auto pad = getPaddings(layer);
+
+ ASSERT_EQ(pad.begin, PropertyVector<unsigned>(2, 0));
+ ASSERT_EQ(pad.end, PropertyVector<unsigned>(2, 1));
+}
+
+TEST_F(LayersTests, canGetSamePadForDeConvolutionOddInput) {
+ DeconvolutionLayer layer(getDefaultParamsForLayer());
+ layer.params["auto_pad"] = "same_upper";
+ TensorDesc tensorDesc(Precision::UNSPECIFIED, SizeVector{1, 144, 75, 75}, Layout::NCHW);
+ auto notEmptyData = std::make_shared<InferenceEngine::Data>("", tensorDesc);
+ layer.insData.push_back(notEmptyData);
+ layer._dilation = PropertyVector<unsigned>{{1, 1}};
+ layer._kernel = PropertyVector<unsigned>{{3, 3}};
+ layer._stride = PropertyVector<unsigned>{{2, 2}};
PropertyVector<unsigned> ref(2, 1);
- auto pad = getConvPaddings(layer);
+ auto pad = getPaddings(layer);
ASSERT_EQ(pad.begin, ref);
ASSERT_EQ(pad.end, ref);
}
+
+TEST_F(LayersTests, canGetSamePadForPoolingEvenInput) {
+ PoolingLayer layer(getDefaultParamsForLayer());
+ layer.params["auto_pad"] = "same_upper";
+ TensorDesc tensorDesc(Precision::UNSPECIFIED, SizeVector{1, 144, 160, 160}, Layout::NCHW);
+ auto notEmptyData = std::make_shared<InferenceEngine::Data>("", tensorDesc);
+ layer.insData.push_back(notEmptyData);
+ layer._kernel = PropertyVector<unsigned>{{3, 3}};
+ layer._stride = PropertyVector<unsigned>{{2, 2}};
+
+ auto pad = getPaddings(layer);
+
+ ASSERT_EQ(pad.begin, PropertyVector<unsigned>(2, 0));
+ ASSERT_EQ(pad.end, PropertyVector<unsigned>(2, 1));
+}
+
+TEST_F(LayersTests, canGetSamePadForPoolingOddInput) {
+ PoolingLayer layer(getDefaultParamsForLayer());
+ layer.params["auto_pad"] = "same_upper";
+ TensorDesc tensorDesc(Precision::UNSPECIFIED, SizeVector{1, 144, 75, 75}, Layout::NCHW);
+ auto notEmptyData = std::make_shared<InferenceEngine::Data>("", tensorDesc);
+ layer.insData.push_back(notEmptyData);
+ layer._kernel = PropertyVector<unsigned>{{3, 3}};
+ layer._stride = PropertyVector<unsigned>{{2, 2}};
+ PropertyVector<unsigned> ref(2, 1);
+
+ auto pad = getPaddings(layer);
+
+ ASSERT_EQ(pad.begin, ref);
+ ASSERT_EQ(pad.end, ref);
+}
+
+
+TEST_F(LayersTests, cannotGetPadForUnsupportedLayer) {
+ FullyConnectedLayer layer(getDefaultParamsForLayer());
+ ASSERT_ANY_THROW(getPaddingsImpl(layer));
+} \ No newline at end of file
diff --git a/inference-engine/tests/unit/inference_engine_tests/locked_memory_test.cpp b/inference-engine/tests/unit/inference_engine_tests/locked_memory_test.cpp
index 4ba4688a2..7a7ee5e7b 100644
--- a/inference-engine/tests/unit/inference_engine_tests/locked_memory_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/locked_memory_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/plugin_dispatcher_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/plugin_dispatcher_tests.cpp
index e60a80595..b54aa383e 100644
--- a/inference-engine/tests/unit/inference_engine_tests/plugin_dispatcher_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/plugin_dispatcher_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -14,16 +13,19 @@
using namespace InferenceEngine;
using namespace ::testing;
-class PluginDispatcherTests : public ::testing::Test {};
+class PluginDispatcherTests : public ::testing::Test {
+public:
+ const std::string nameExt(const std::string& name) { return name + IE_BUILD_POSTFIX;}
+};
TEST_F(PluginDispatcherTests, canLoadMockPlugin) {
PluginDispatcher dispatcher({ "", "./", "./lib" });
- ASSERT_NO_THROW(dispatcher.getPluginByName("mock_engine"));
+ ASSERT_NO_THROW(dispatcher.getPluginByName(nameExt("mock_engine")));
}
TEST_F(PluginDispatcherTests, throwsOnUnknownPlugin) {
PluginDispatcher dispatcher({ "./", "./lib" });
- ASSERT_THROW(dispatcher.getPluginByName("unknown_plugin"), InferenceEngine::details::InferenceEngineException);
+ ASSERT_THROW(dispatcher.getPluginByName(nameExt("unknown_plugin")), InferenceEngine::details::InferenceEngineException);
}
TEST_F(PluginDispatcherTests, throwsOnDeviceWithoutPlugins) {
@@ -42,12 +44,12 @@ TEST_F(PluginDispatcherTests, triesToLoadEveryPluginSuitableForDevice) {
ON_CALL(disp, getPluginByName(_)).WillByDefault(ThrowException());
#ifdef ENABLE_MKL_DNN
- EXPECT_CALL(disp, getPluginByName("MKLDNNPlugin")).Times(1);
+ EXPECT_CALL(disp, getPluginByName(nameExt("MKLDNNPlugin"))).Times(1);
#endif
#ifdef ENABLE_OPENVX_CVE
- EXPECT_CALL(disp, getPluginByName("OpenVXPluginCVE")).Times(1);
+ EXPECT_CALL(disp, getPluginByName(nameExt("OpenVXPluginCVE"))).Times(1);
#elif defined ENABLE_OPENVX
- EXPECT_CALL(disp, getPluginByName("OpenVXPlugin")).Times(1);
+ EXPECT_CALL(disp, getPluginByName(nameExt("OpenVXPlugin"))).Times(1);
#endif
ASSERT_THROW(disp.getSuitablePlugin(TargetDevice::eCPU), InferenceEngine::details::InferenceEngineException);
}
@@ -56,7 +58,7 @@ TEST_F(PluginDispatcherTests, triesToLoadEveryPluginSuitableForDevice) {
TEST_F(PluginDispatcherTests, returnsIfLoadSuccessfull) {
MockDispatcher disp({ "./", "./lib" });
PluginDispatcher dispatcher({ "", "./", "./lib" });
- auto ptr = dispatcher.getPluginByName("mock_engine");
+ auto ptr = dispatcher.getPluginByName(nameExt("mock_engine"));
EXPECT_CALL(disp, getPluginByName(_)).WillOnce(Return(ptr));
ASSERT_NO_THROW(disp.getSuitablePlugin(TargetDevice::eCPU));
diff --git a/inference-engine/tests/unit/inference_engine_tests/pointer_test.cpp b/inference-engine/tests/unit/inference_engine_tests/pointer_test.cpp
index c317c9cb7..78985fe6a 100644
--- a/inference-engine/tests/unit/inference_engine_tests/pointer_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/pointer_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/pre_allocator_test.cpp b/inference-engine/tests/unit/inference_engine_tests/pre_allocator_test.cpp
index ed03e138a..42e06a04d 100644
--- a/inference-engine/tests/unit/inference_engine_tests/pre_allocator_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/pre_allocator_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/precision_test.cpp b/inference-engine/tests/unit/inference_engine_tests/precision_test.cpp
index 84d69c0b7..a044b95ee 100644
--- a/inference-engine/tests/unit/inference_engine_tests/precision_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/precision_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/preprocess_test.cpp b/inference-engine/tests/unit/inference_engine_tests/preprocess_test.cpp
index d39aab4c4..70ef0dccc 100644
--- a/inference-engine/tests/unit/inference_engine_tests/preprocess_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/preprocess_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/range_iterator_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/range_iterator_tests.cpp
index 0368df7af..0a10c8cdf 100644
--- a/inference-engine/tests/unit/inference_engine_tests/range_iterator_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/range_iterator_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/response_buffer_test.cpp b/inference-engine/tests/unit/inference_engine_tests/response_buffer_test.cpp
index 6e25efe0d..4087637d8 100644
--- a/inference-engine/tests/unit/inference_engine_tests/response_buffer_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/response_buffer_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/shared_object_loader_test.cpp b/inference-engine/tests/unit/inference_engine_tests/shared_object_loader_test.cpp
index a70d3e315..cdea8de81 100644
--- a/inference-engine/tests/unit/inference_engine_tests/shared_object_loader_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/shared_object_loader_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/so_pointer_tests.cpp b/inference-engine/tests/unit/inference_engine_tests/so_pointer_tests.cpp
index 374f82784..ed0e35249 100644
--- a/inference-engine/tests/unit/inference_engine_tests/so_pointer_tests.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/so_pointer_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/inference_engine_tests/tensor_desc_test.cpp b/inference-engine/tests/unit/inference_engine_tests/tensor_desc_test.cpp
index 04f30a815..16bd43ba3 100644
--- a/inference-engine/tests/unit/inference_engine_tests/tensor_desc_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/tensor_desc_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -46,7 +45,7 @@ TEST_F(TensorDescTests, CreateEmptyBlob) {
ASSERT_EQ(blob->getTensorDesc().getLayout(), Layout::NCHW);
}
-TEST_F(TensorDescTests, CreateBlockedBlob) {
+TEST_F(TensorDescTests, CreateBlockedBlobNCHW) {
TensorDesc desc(Precision::FP32, {1, 4, 2, 1}, {{1, 2, 2, 1, 2}, {0, 1, 2, 3, 1}});
float data[8] = {1, 2, 3, 4, 5, 6, 7, 8};
Blob::Ptr blockedBlob = make_shared_blob<float>(desc, data);
@@ -58,6 +57,18 @@ TEST_F(TensorDescTests, CreateBlockedBlob) {
ASSERT_EQ(Layout::BLOCKED, blockedBlob->layout());
}
+TEST_F(TensorDescTests, CreateBlockedBlobNCDHW) {
+ TensorDesc desc(Precision::FP32, {1, 4, 2, 2, 1}, {{1, 2, 2, 2, 1, 2}, {0, 1, 2, 3, 4, 1}});
+ float data[8] = {1, 2, 3, 4, 5, 6, 7, 8};
+ Blob::Ptr blockedBlob = make_shared_blob<float>(desc, data);
+ Blob::Ptr ncdhwBlob = make_shared_blob<float>({Precision::FP32, {1, 4, 2, 2, 1}, Layout::NCDHW}, data);
+ ASSERT_NE(blockedBlob->getTensorDesc().offset(6), ncdhwBlob->getTensorDesc().offset(6));
+ ASSERT_EQ(5, blockedBlob->getTensorDesc().offset(6));
+ ASSERT_EQ(6, ncdhwBlob->getTensorDesc().offset(6));
+ ASSERT_EQ(Layout::NCDHW, ncdhwBlob->layout());
+ ASSERT_EQ(Layout::BLOCKED, blockedBlob->layout());
+}
+
TEST_F(TensorDescTests, CompareNHWCandNCHWLayouts) {
TensorDesc descNCHW(Precision::FP32, {1, 3, 4, 2}, Layout::NCHW);
TensorDesc descNHWC(Precision::FP32, {1, 3, 4, 2}, Layout::NHWC);
@@ -70,3 +81,16 @@ TEST_F(TensorDescTests, CompareNHWCandNCHWLayouts) {
ASSERT_EQ(descNCHW.getBlockingDesc().getOrder(), nchw);
ASSERT_EQ(descNHWC.getBlockingDesc().getOrder(), nhwc);
}
+
+TEST_F(TensorDescTests, CompareNDHWCandNCDHWLayouts) {
+ TensorDesc descNCDHW(Precision::FP32, {1, 3, 4, 4, 2}, Layout::NCDHW);
+ TensorDesc descNDHWC(Precision::FP32, {1, 3, 4, 4, 2}, Layout::NDHWC);
+ SizeVector ncdhw = {0, 1, 2, 3, 4};
+ SizeVector ndhwc = {0, 2, 3, 4, 1};
+
+ ASSERT_NE(descNCDHW, descNDHWC);
+ ASSERT_NE(descNCDHW.getBlockingDesc(), descNDHWC.getBlockingDesc());
+ ASSERT_NE(descNCDHW.getBlockingDesc().getOrder(), descNDHWC.getBlockingDesc().getOrder());
+ ASSERT_EQ(descNCDHW.getBlockingDesc().getOrder(), ncdhw);
+ ASSERT_EQ(descNDHWC.getBlockingDesc().getOrder(), ndhwc);
+}
diff --git a/inference-engine/tests/unit/inference_engine_tests/util_test.cpp b/inference-engine/tests/unit/inference_engine_tests/util_test.cpp
index 8bd11a311..d62e0a1af 100644
--- a/inference-engine/tests/unit/inference_engine_tests/util_test.cpp
+++ b/inference-engine/tests/unit/inference_engine_tests/util_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mem_solver/mem_solver_test.cpp b/inference-engine/tests/unit/mem_solver/mem_solver_test.cpp
index fa3fc111c..8ffab0f61 100644
--- a/inference-engine/tests/unit/mem_solver/mem_solver_test.cpp
+++ b/inference-engine/tests/unit/mem_solver/mem_solver_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_default.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_default.hpp
index fd530ccc3..5141a1f55 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_default.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_default.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_internal.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_internal.hpp
index 2d7fb5a12..a8adfbb54 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_internal.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_internal.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_thread_safe_internal.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_thread_safe_internal.hpp
index 86a40a8af..720c58411 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_thread_safe_internal.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_async_infer_request_thread_safe_internal.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_network_internal.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_network_internal.hpp
index aacc7b01a..08bd36731 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_network_internal.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_network_internal.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_thread_safe_async_only.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_thread_safe_async_only.hpp
index 45f75c505..d6658bc7c 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_thread_safe_async_only.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_thread_safe_async_only.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_thread_safe_default.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_thread_safe_default.hpp
index f909c33a5..f67323b7e 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_thread_safe_default.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_executable_thread_safe_default.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_infer_request_internal.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_infer_request_internal.hpp
index e20330dd6..42e299ba0 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_infer_request_internal.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_infer_request_internal.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp
index 6e7a750bb..bf3b54038 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp
index 13b01f042..cf37848e6 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp
index 2326ef822..c5316bf9e 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp
index 3caee4216..dd1bb497e 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp
index 3b122de09..03a4043e2 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/mock_plugin_impl.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/mock_plugin_impl.hpp
index 796cd6f81..66c9910bc 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/mock_plugin_impl.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/mock_plugin_impl.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/mock_task_executor.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/mock_task_executor.hpp
index 439f18c25..34ccb5cad 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/mock_task_executor.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/mock_task_executor.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/cpp_interfaces/mock_task_synchronizer.hpp b/inference-engine/tests/unit/mocks/cpp_interfaces/mock_task_synchronizer.hpp
index aec5bf896..2d34f1e42 100644
--- a/inference-engine/tests/unit/mocks/cpp_interfaces/mock_task_synchronizer.hpp
+++ b/inference-engine/tests/unit/mocks/cpp_interfaces/mock_task_synchronizer.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/mock_allocator.hpp b/inference-engine/tests/unit/mocks/mock_allocator.hpp
index 9588151d6..ad53afbff 100644
--- a/inference-engine/tests/unit/mocks/mock_allocator.hpp
+++ b/inference-engine/tests/unit/mocks/mock_allocator.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/mock_error_listener.hpp b/inference-engine/tests/unit/mocks/mock_error_listener.hpp
index 8dfe3fd2b..420fc228a 100644
--- a/inference-engine/tests/unit/mocks/mock_error_listener.hpp
+++ b/inference-engine/tests/unit/mocks/mock_error_listener.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/mock_iasync_infer_request.hpp b/inference-engine/tests/unit/mocks/mock_iasync_infer_request.hpp
index e1eb9ad03..571a7d4b0 100644
--- a/inference-engine/tests/unit/mocks/mock_iasync_infer_request.hpp
+++ b/inference-engine/tests/unit/mocks/mock_iasync_infer_request.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/mock_icnn_network.hpp b/inference-engine/tests/unit/mocks/mock_icnn_network.hpp
index 786707522..43337fb46 100644
--- a/inference-engine/tests/unit/mocks/mock_icnn_network.hpp
+++ b/inference-engine/tests/unit/mocks/mock_icnn_network.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -41,6 +40,7 @@ class MockICNNNetwork : public InferenceEngine::ICNNNetwork {
MOCK_QUALIFIED_METHOD1(getInputShapes, const noexcept, void (InferenceEngine::ICNNNetwork::InputShapes&));
MOCK_QUALIFIED_METHOD2(reshape, noexcept, InferenceEngine::StatusCode (const InferenceEngine::ICNNNetwork::InputShapes &, InferenceEngine::ResponseDesc *));
MOCK_QUALIFIED_METHOD2(AddExtension, noexcept, InferenceEngine::StatusCode (const InferenceEngine::IShapeInferExtensionPtr &, InferenceEngine::ResponseDesc *));
+ MOCK_QUALIFIED_METHOD3(serialize, const noexcept, InferenceEngine::StatusCode (const std::string &, const std::string &, InferenceEngine::ResponseDesc*));
};
/**
diff --git a/inference-engine/tests/unit/mocks/mock_iexecutable_network.hpp b/inference-engine/tests/unit/mocks/mock_iexecutable_network.hpp
index d88a060a5..d28d81c14 100644
--- a/inference-engine/tests/unit/mocks/mock_iexecutable_network.hpp
+++ b/inference-engine/tests/unit/mocks/mock_iexecutable_network.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/mock_iformat_parser.hpp b/inference-engine/tests/unit/mocks/mock_iformat_parser.hpp
index fff3deda8..12b7c2fb1 100644
--- a/inference-engine/tests/unit/mocks/mock_iformat_parser.hpp
+++ b/inference-engine/tests/unit/mocks/mock_iformat_parser.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -20,7 +19,5 @@ struct MockIFormatParser : public InferenceEngine::details::IFormatParser {
MOCK_METHOD1(Parse, InferenceEngine::details::CNNNetworkImplPtr(pugi::xml_node &));
MOCK_METHOD1(SetWeights, void(const InferenceEngine::TBlob<uint8_t>::Ptr &));
-
- MOCK_METHOD2(CopyBlobsByName, void(void*, std::string));
};
diff --git a/inference-engine/tests/unit/mocks/mock_inference_engine.hpp b/inference-engine/tests/unit/mocks/mock_inference_engine.hpp
index 60fcb4469..150629c61 100644
--- a/inference-engine/tests/unit/mocks/mock_inference_engine.hpp
+++ b/inference-engine/tests/unit/mocks/mock_inference_engine.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/mock_not_empty_icnn_network.hpp b/inference-engine/tests/unit/mocks/mock_not_empty_icnn_network.hpp
index 4016d1a59..bc71baed6 100644
--- a/inference-engine/tests/unit/mocks/mock_not_empty_icnn_network.hpp
+++ b/inference-engine/tests/unit/mocks/mock_not_empty_icnn_network.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -42,4 +41,5 @@ public:
MOCK_QUALIFIED_METHOD1(getInputShapes, const noexcept, void (ICNNNetwork::InputShapes &));
MOCK_QUALIFIED_METHOD2(reshape, noexcept, StatusCode (const ICNNNetwork::InputShapes &, ResponseDesc *));
MOCK_QUALIFIED_METHOD2(AddExtension, noexcept, StatusCode (const IShapeInferExtensionPtr &, ResponseDesc *));
+ MOCK_QUALIFIED_METHOD3(serialize, const noexcept, StatusCode (const std::string &, const std::string &, InferenceEngine::ResponseDesc*));
};
diff --git a/inference-engine/tests/unit/mocks/mock_plugin_dispatcher.hpp b/inference-engine/tests/unit/mocks/mock_plugin_dispatcher.hpp
index d508114a5..769690fcc 100644
--- a/inference-engine/tests/unit/mocks/mock_plugin_dispatcher.hpp
+++ b/inference-engine/tests/unit/mocks/mock_plugin_dispatcher.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/shape_infer/mock_input_controller.hpp b/inference-engine/tests/unit/mocks/shape_infer/mock_input_controller.hpp
index 96d95834c..4e2c2d4c8 100644
--- a/inference-engine/tests/unit/mocks/shape_infer/mock_input_controller.hpp
+++ b/inference-engine/tests/unit/mocks/shape_infer/mock_input_controller.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/shape_infer/mock_ishape_infer_impl.hpp b/inference-engine/tests/unit/mocks/shape_infer/mock_ishape_infer_impl.hpp
index e057b9cde..75e70dee0 100644
--- a/inference-engine/tests/unit/mocks/shape_infer/mock_ishape_infer_impl.hpp
+++ b/inference-engine/tests/unit/mocks/shape_infer/mock_ishape_infer_impl.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/shape_infer/mock_output_controller.hpp b/inference-engine/tests/unit/mocks/shape_infer/mock_output_controller.hpp
index 60395fb7f..a3cc339f9 100644
--- a/inference-engine/tests/unit/mocks/shape_infer/mock_output_controller.hpp
+++ b/inference-engine/tests/unit/mocks/shape_infer/mock_output_controller.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/shape_infer/mock_reshaper_launcher.hpp b/inference-engine/tests/unit/mocks/shape_infer/mock_reshaper_launcher.hpp
index 479efe302..46045469d 100644
--- a/inference-engine/tests/unit/mocks/shape_infer/mock_reshaper_launcher.hpp
+++ b/inference-engine/tests/unit/mocks/shape_infer/mock_reshaper_launcher.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/mocks/shape_infer/mock_shape_infer_extension.hpp b/inference-engine/tests/unit/mocks/shape_infer/mock_shape_infer_extension.hpp
index afeb130ce..f579954c4 100644
--- a/inference-engine/tests/unit/mocks/shape_infer/mock_shape_infer_extension.hpp
+++ b/inference-engine/tests/unit/mocks/shape_infer/mock_shape_infer_extension.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/opencv_test_gapi/CMakeLists.txt b/inference-engine/tests/unit/opencv_test_gapi/CMakeLists.txt
new file mode 100644
index 000000000..73d3af56b
--- /dev/null
+++ b/inference-engine/tests/unit/opencv_test_gapi/CMakeLists.txt
@@ -0,0 +1,34 @@
+# Copyright (C) 2018 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+if(NOT ENABLE_GAPI_TESTS)
+ message(WARNING "Skipping GAPI unit tests")
+ return()
+endif()
+
+find_package(OpenCV COMPONENTS gapi)
+if(NOT(OpenCV_FOUND))
+ message(WARNING "No suitable OpenCV version detected, " ${TARGET_NAME} " skipped")
+ return()
+endif()
+
+file(GLOB SOURCES *.cpp common/*.cpp cpu/*.cpp)
+file(GLOB HEADERS *.hpp common/*.hpp cpu/*.hpp)
+
+set(TARGET opencv_test_gapi)
+add_executable(${TARGET} ${SOURCES} ${HEADERS})
+
+target_include_directories(${TARGET}
+ PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}"
+ PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/common"
+ PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/cpu"
+ PRIVATE "${IE_MAIN_SOURCE_DIR}/thirdparty/fluid/modules/gapi/include/")
+
+target_link_libraries(${TARGET} ${OpenCV_LIBS} inference_engine gtest gtest_main)
+
+if(GAPI_TEST_PERF)
+ target_compile_definitions(${TARGET} PRIVATE -DPERF_TEST=1)
+else()
+ target_compile_definitions(${TARGET} PRIVATE -DPERF_TEST=0)
+endif()
diff --git a/inference-engine/tests/unit/topology_verification_tests/v1_topology_verification_test.cpp b/inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests.cpp
index 168b0ba84..fb57725af 100644
--- a/inference-engine/tests/unit/topology_verification_tests/v1_topology_verification_test.cpp
+++ b/inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests.cpp
@@ -1,5 +1,5 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
+#include "gapi_core_tests_inl.hpp"
diff --git a/inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests.hpp b/inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests.hpp
new file mode 100644
index 000000000..7a251f9d6
--- /dev/null
+++ b/inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests.hpp
@@ -0,0 +1,47 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#ifndef OPENCV_GAPI_CORE_TESTS_HPP
+#define OPENCV_GAPI_CORE_TESTS_HPP
+
+#include "gapi_tests_common.hpp"
+#include "ie_preprocess.hpp"
+
+#include <gtest/gtest.h>
+
+namespace opencv_test
+{
+
+struct ResizeTestGAPI: public testing::TestWithParam<std::tuple<int, int, std::pair<cv::Size, cv::Size>, double, cv::GCompileArgs>> {};
+
+struct Split2TestGAPI: public TestParams<std::tuple<int, cv::Size, cv::GCompileArgs>> {};
+struct Split3TestGAPI: public TestParams<std::tuple<int, cv::Size, cv::GCompileArgs>> {};
+struct Split4TestGAPI: public TestParams<std::tuple<int, cv::Size, cv::GCompileArgs>> {};
+
+struct Merge2TestGAPI: public TestParams<std::tuple<int, cv::Size, cv::GCompileArgs>> {};
+struct Merge3TestGAPI: public TestParams<std::tuple<int, cv::Size, cv::GCompileArgs>> {};
+struct Merge4TestGAPI: public TestParams<std::tuple<int, cv::Size, cv::GCompileArgs>> {};
+
+//------------------------------------------------------------------------------
+
+struct ResizeTestIE: public testing::TestWithParam<std::tuple<int, int, std::pair<cv::Size, cv::Size>, double>> {};
+
+struct SplitTestIE: public TestParams<std::tuple<int, cv::Size>> {};
+struct MergeTestIE: public TestParams<std::tuple<int, cv::Size>> {};
+
+//------------------------------------------------------------------------------
+
+using PreprocParams = std::tuple< InferenceEngine::Precision // input-output data type
+ , InferenceEngine::ResizeAlgorithm // resize algorithm, if needed
+ , InferenceEngine::Layout // input tensor layout
+ , InferenceEngine::Layout // output tensor layout
+ , int // number of channels
+ , std::pair<cv::Size, cv::Size>
+ >;
+
+struct PreprocTest: public TestParams<PreprocParams> {};
+
+} // opencv_test
+
+#endif //OPENCV_GAPI_CORE_TESTS_HPP
diff --git a/inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests_inl.hpp b/inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests_inl.hpp
new file mode 100644
index 000000000..3daaba5ae
--- /dev/null
+++ b/inference-engine/tests/unit/opencv_test_gapi/common/gapi_core_tests_inl.hpp
@@ -0,0 +1,876 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#ifndef OPENCV_GAPI_CORE_TESTS_INL_HPP
+#define OPENCV_GAPI_CORE_TESTS_INL_HPP
+
+#include "gapi_core_tests.hpp"
+
+#include "blob_factory.hpp"
+#include "blob_transform.hpp"
+#include "ie_preprocess.hpp"
+#include "ie_preprocess_data.hpp"
+#include "ie_preprocess_gapi_kernels.hpp"
+
+#include <opencv2/core.hpp>
+#include <opencv2/imgproc.hpp>
+#include <opencv2/gapi.hpp>
+
+#include <cstdarg>
+#include <cstdio>
+#include <ctime>
+
+#include <chrono>
+
+#define CV_MAT_CHANNELS(flags) (((flags) >> CV_CN_SHIFT) + 1)
+
+// Can be set externally (via CMake) if built with -DGAPI_TEST_PERF=ON
+#ifndef PERF_TEST
+#define PERF_TEST 0 // 1=test performance, 0=don't
+#endif
+
+namespace opencv_test
+{
+
+#if PERF_TEST
+// performance test: iterate function, measure and print milliseconds per call
+template<typename F> static void test_ms(F func, int iter, const char format[], ...)
+{
+ using std::chrono::high_resolution_clock;
+
+ std::vector<high_resolution_clock::duration> samples(iter); samples.clear();
+ if (0 == iter)
+ return;
+
+ for (int i=0; i < iter; i++)
+ {
+ auto start = high_resolution_clock::now();
+ func(); // iterate calls
+ samples.push_back(high_resolution_clock::now() - start);
+ }
+
+ std::sort(samples.begin(), samples.end());
+
+ auto median = samples[samples.size() / 2];
+
+ double median_ms = std::chrono::duration_cast<std::chrono::microseconds>(median).count() * 0.001; // convert to milliseconds
+
+ printf("Performance(ms): %lg ", median_ms);
+
+ va_list args;
+ va_start(args, format);
+ vprintf(format, args);
+ va_end(args);
+
+ printf("\n");
+}
+
+static cv::String interpToString(int interp)
+{
+ switch(interp)
+ {
+ case cv::INTER_AREA : return "INTER_AREA";
+ case cv::INTER_LINEAR : return "INTER_LINEAR";
+ case cv::INTER_NEAREST: return "INTER_NEAREST";
+ }
+ CV_Assert(!"ERROR: unsupported interpolation!");
+ return nullptr;
+}
+
+static cv::String depthToString(int depth)
+{
+ switch(depth)
+ {
+ case CV_8U : return "CV_8U";
+ case CV_32F : return "CV_32F";
+ }
+ CV_Assert(!"ERROR: unsupported depth!");
+ return nullptr;
+}
+
+static cv::String typeToString(int type)
+{
+ switch(type)
+ {
+ case CV_8UC1 : return "CV_8UC1";
+ case CV_8UC2 : return "CV_8UC2";
+ case CV_8UC3 : return "CV_8UC3";
+ case CV_8UC4 : return "CV_8UC4";
+ case CV_32FC1 : return "CV_32FC1";
+ case CV_32FC2 : return "CV_32FC2";
+ case CV_32FC3 : return "CV_32FC3";
+ case CV_32FC4 : return "CV_32FC4";
+ }
+ CV_Assert(!"ERROR: unsupported type!");
+ return nullptr;
+}
+#endif // PERF_TEST
+
+TEST_P(ResizeTestGAPI, AccuracyTest)
+{
+ int type = 0, interp = 0;
+ cv::Size sz_in, sz_out;
+ double tolerance = 0.0;
+ cv::GCompileArgs compile_args;
+ std::pair<cv::Size, cv::Size> sizes;
+ std::tie(type, interp, sizes, tolerance, compile_args) = GetParam();
+ std::tie(sz_in, sz_out) = sizes;
+
+ cv::Mat in_mat1 (sz_in, type );
+ cv::Scalar mean = cv::Scalar::all(127);
+ cv::Scalar stddev = cv::Scalar::all(40.f);
+
+ cv::randn(in_mat1, mean, stddev);
+
+ cv::Mat out_mat(sz_out, type);
+ cv::Mat out_mat_ocv(sz_out, type);
+
+ // G-API code //////////////////////////////////////////////////////////////
+ cv::GMat in, out;
+ switch (CV_MAT_CHANNELS(type))
+ {
+ case 1:
+ out = InferenceEngine::gapi::ScalePlane::on(in, type, sz_in, sz_out, interp);
+ break;
+ case 3:
+ {
+ int depth = CV_MAT_DEPTH(type);
+ int type1 = CV_MAKE_TYPE(depth, 1);
+ cv::GMat in0, in1, in2, out0, out1, out2;
+ std::tie(in0, in1, in2) = InferenceEngine::gapi::Split3::on(in);
+ out0 = InferenceEngine::gapi::ScalePlane::on(in0, type1, sz_in, sz_out, interp);
+ out1 = InferenceEngine::gapi::ScalePlane::on(in1, type1, sz_in, sz_out, interp);
+ out2 = InferenceEngine::gapi::ScalePlane::on(in2, type1, sz_in, sz_out, interp);
+ out = InferenceEngine::gapi::Merge3::on(out0, out1, out2);
+ }
+ break;
+ default: CV_Assert(!"ERROR: unsupported number of channels!");
+ }
+
+ cv::GComputation c(in, out);
+
+ // compile graph, and test once
+
+ auto own_in_mat1 = cv::to_own(in_mat1);
+ auto own_out_mat = cv::to_own(out_mat);
+
+ std::vector<cv::gapi::own::Mat> v_in = { own_in_mat1 };
+ std::vector<cv::gapi::own::Mat> v_out = { own_out_mat };
+
+ c.apply(v_in, v_out, std::move(compile_args));
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ test_ms([&](){ c.apply(v_in, v_out); },
+ 100, "Resize GAPI %s %s %dx%d -> %dx%d",
+ interpToString(interp).c_str(), typeToString(type).c_str(),
+ sz_in.width, sz_in.height, sz_out.width, sz_out.height);
+#endif
+
+ // OpenCV code /////////////////////////////////////////////////////////////
+ {
+ cv::resize(in_mat1, out_mat_ocv, sz_out, 0, 0, interp);
+ }
+ // Comparison //////////////////////////////////////////////////////////////
+ {
+ cv::Mat absDiff;
+ cv::absdiff(out_mat, out_mat_ocv, absDiff);
+ EXPECT_EQ(0, cv::countNonZero(absDiff > tolerance));
+ }
+}
+
+TEST_P(Split2TestGAPI, AccuracyTest)
+{
+ int depth = std::get<0>(GetParam());
+ cv::Size sz_in = std::get<1>(GetParam());
+ auto compile_args = std::get<2>(GetParam());
+
+ int type1 = CV_MAKE_TYPE(depth, 1);
+ int type2 = CV_MAKE_TYPE(depth, 2);
+ initMatrixRandU(type2, sz_in, type1);
+
+ cv::Mat out_mat2 = cv::Mat(sz_in, type1);
+ cv::Mat out_mat_ocv2 = cv::Mat(sz_in, type1);
+
+ // G-API code //////////////////////////////////////////////////////////////
+ cv::GMat in1, out1, out2;
+ std::tie(out1, out2) = InferenceEngine::gapi::Split2::on(in1);
+ cv::GComputation c(cv::GIn(in1), cv::GOut(out1, out2));
+
+ // compile graph, and test once
+
+ auto own_in_mat1 = cv::to_own(in_mat1);
+ auto own_out_mat_gapi = cv::to_own(out_mat_gapi);
+ auto own_out_mat2 = cv::to_own(out_mat2);
+
+ std::vector<cv::gapi::own::Mat> v_in = { own_in_mat1 };
+ std::vector<cv::gapi::own::Mat> v_out = { own_out_mat_gapi, own_out_mat2 };
+
+ c.apply(v_in, v_out, std::move(compile_args));
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ test_ms([&](){ c.apply(v_in, v_out); },
+ 400, "Split GAPI %s %dx%d", typeToString(type2).c_str(), sz_in.width, sz_in.height);
+#endif
+
+ // OpenCV code /////////////////////////////////////////////////////////////
+ {
+ std::vector<cv::Mat> out_mats_ocv = {out_mat_ocv, out_mat_ocv2};
+ cv::split(in_mat1, out_mats_ocv);
+ }
+ // Comparison //////////////////////////////////////////////////////////////
+ {
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv != out_mat_gapi));
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv2 != out_mat2));
+ }
+}
+
+TEST_P(Split3TestGAPI, AccuracyTest)
+{
+ int depth = std::get<0>(GetParam());
+ cv::Size sz_in = std::get<1>(GetParam());
+ auto compile_args = std::get<2>(GetParam());
+
+ int type1 = CV_MAKE_TYPE(depth, 1);
+ int type3 = CV_MAKE_TYPE(depth, 3);
+ initMatrixRandU(type3, sz_in, type1);
+
+ cv::Mat out_mat2 = cv::Mat(sz_in, type1);
+ cv::Mat out_mat3 = cv::Mat(sz_in, type1);
+ cv::Mat out_mat_ocv2 = cv::Mat(sz_in, type1);
+ cv::Mat out_mat_ocv3 = cv::Mat(sz_in, type1);
+
+ // G-API code //////////////////////////////////////////////////////////////
+ cv::GMat in1, out1, out2, out3;
+ std::tie(out1, out2, out3) = InferenceEngine::gapi::Split3::on(in1);
+ cv::GComputation c(cv::GIn(in1), cv::GOut(out1, out2, out3));
+
+ // compile graph, and test once
+
+ auto own_in_mat1 = cv::to_own(in_mat1);
+ auto own_out_mat_gapi = cv::to_own(out_mat_gapi);
+ auto own_out_mat2 = cv::to_own(out_mat2);
+ auto own_out_mat3 = cv::to_own(out_mat3);
+
+ std::vector<cv::gapi::own::Mat> v_in = { own_in_mat1 };
+ std::vector<cv::gapi::own::Mat> v_out = { own_out_mat_gapi, own_out_mat2, own_out_mat3 };
+
+ c.apply(v_in, v_out, std::move(compile_args));
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ test_ms([&](){ c.apply(v_in, v_out); },
+ 400, "Split GAPI %s %dx%d", typeToString(type3).c_str(), sz_in.width, sz_in.height);
+#endif
+
+ // OpenCV code /////////////////////////////////////////////////////////////
+ {
+ std::vector<cv::Mat> out_mats_ocv = {out_mat_ocv, out_mat_ocv2, out_mat_ocv3};
+ cv::split(in_mat1, out_mats_ocv);
+ }
+ // Comparison //////////////////////////////////////////////////////////////
+ {
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv != out_mat_gapi));
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv2 != out_mat2));
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv3 != out_mat3));
+ }
+}
+
+TEST_P(Split4TestGAPI, AccuracyTest)
+{
+ int depth = std::get<0>(GetParam());
+ cv::Size sz_in = std::get<1>(GetParam());
+ auto compile_args = std::get<2>(GetParam());
+
+ int type1 = CV_MAKE_TYPE(depth, 1);
+ int type4 = CV_MAKE_TYPE(depth, 4);
+ initMatrixRandU(type4, sz_in, type1);
+
+ cv::Mat out_mat2 = cv::Mat(sz_in, type1);
+ cv::Mat out_mat3 = cv::Mat(sz_in, type1);
+ cv::Mat out_mat4 = cv::Mat(sz_in, type1);
+ cv::Mat out_mat_ocv2 = cv::Mat(sz_in, type1);
+ cv::Mat out_mat_ocv3 = cv::Mat(sz_in, type1);
+ cv::Mat out_mat_ocv4 = cv::Mat(sz_in, type1);
+
+ // G-API code //////////////////////////////////////////////////////////////
+ cv::GMat in1, out1, out2, out3, out4;
+ std::tie(out1, out2, out3, out4) = InferenceEngine::gapi::Split4::on(in1);
+ cv::GComputation c(cv::GIn(in1), cv::GOut(out1, out2, out3, out4));
+
+ // compile graph, and test once
+
+ auto own_in_mat1 = cv::to_own(in_mat1);
+ auto own_out_mat_gapi = cv::to_own(out_mat_gapi);
+ auto own_out_mat2 = cv::to_own(out_mat2);
+ auto own_out_mat3 = cv::to_own(out_mat3);
+ auto own_out_mat4 = cv::to_own(out_mat4);
+
+ std::vector<cv::gapi::own::Mat> v_in = { own_in_mat1 };
+ std::vector<cv::gapi::own::Mat> v_out = { own_out_mat_gapi, own_out_mat2,
+ own_out_mat3, own_out_mat4 };
+
+ c.apply(v_in, v_out, std::move(compile_args));
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ test_ms([&](){ c.apply(v_in, v_out); },
+ 400, "Split GAPI %s %dx%d", typeToString(type4).c_str(), sz_in.width, sz_in.height);
+#endif
+
+ // OpenCV code /////////////////////////////////////////////////////////////
+ {
+ std::vector<cv::Mat> out_mats_ocv = {out_mat_ocv, out_mat_ocv2, out_mat_ocv3, out_mat_ocv4};
+ cv::split(in_mat1, out_mats_ocv);
+ }
+ // Comparison //////////////////////////////////////////////////////////////
+ {
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv != out_mat_gapi));
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv2 != out_mat2));
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv3 != out_mat3));
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv4 != out_mat4));
+ }
+}
+
+TEST_P(Merge2TestGAPI, AccuracyTest)
+{
+ int depth = std::get<0>(GetParam());
+ cv::Size sz_in = std::get<1>(GetParam());
+ auto compile_args = std::get<2>(GetParam());
+
+ int type1 = CV_MAKE_TYPE(depth, 1);
+ int type2 = CV_MAKE_TYPE(depth, 2);
+ initMatsRandU(type1, sz_in, type2);
+
+ // G-API code //////////////////////////////////////////////////////////////
+ cv::GMat in1, in2;
+ auto out = InferenceEngine::gapi::Merge2::on(in1, in2);
+ cv::GComputation c(cv::GIn(in1, in2), cv::GOut(out));
+
+ // compile graph, and test once
+
+ auto own_in_mat1 = cv::to_own(in_mat1);
+ auto own_in_mat2 = cv::to_own(in_mat2);
+ auto own_out_mat_gapi = cv::to_own(out_mat_gapi);
+
+ std::vector<cv::gapi::own::Mat> v_in = { own_in_mat1, own_in_mat2 };
+ std::vector<cv::gapi::own::Mat> v_out = { own_out_mat_gapi };
+
+ c.apply(v_in, v_out, std::move(compile_args));
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ test_ms([&](){ c.apply(v_in, v_out); },
+ 400, "Merge GAPI %s %dx%d", typeToString(type2).c_str(), sz_in.width, sz_in.height);
+#endif
+
+ // OpenCV code /////////////////////////////////////////////////////////////
+ {
+ std::vector<cv::Mat> in_mats_ocv = {in_mat1, in_mat2};
+ cv::merge(in_mats_ocv, out_mat_ocv);
+ }
+ // Comparison //////////////////////////////////////////////////////////////
+ {
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv != out_mat_gapi));
+ }
+}
+
+TEST_P(Merge3TestGAPI, AccuracyTest)
+{
+ int depth = std::get<0>(GetParam());
+ cv::Size sz_in = std::get<1>(GetParam());
+ auto compile_args = std::get<2>(GetParam());
+
+ int type1 = CV_MAKE_TYPE(depth, 1);
+ int type3 = CV_MAKE_TYPE(depth, 3);
+ initMatsRandU(type1, sz_in, type3);
+
+ cv::Scalar mean = cv::Scalar::all(127);
+ cv::Scalar stddev = cv::Scalar::all(40.f);
+
+ cv::Mat in_mat3(sz_in, type1);
+ cv::randn(in_mat3, mean, stddev);
+
+ // G-API code //////////////////////////////////////////////////////////////
+ cv::GMat in1, in2, in3;
+ auto out = InferenceEngine::gapi::Merge3::on(in1, in2, in3);
+ cv::GComputation c(cv::GIn(in1, in2, in3), cv::GOut(out));
+
+ // compile graph, and test once
+
+ auto own_in_mat1 = cv::to_own(in_mat1);
+ auto own_in_mat2 = cv::to_own(in_mat2);
+ auto own_in_mat3 = cv::to_own(in_mat3);
+ auto own_out_mat_gapi = cv::to_own(out_mat_gapi);
+
+ std::vector<cv::gapi::own::Mat> v_in = { own_in_mat1, own_in_mat2, own_in_mat3 };
+ std::vector<cv::gapi::own::Mat> v_out = { own_out_mat_gapi };
+
+ c.apply(v_in, v_out, std::move(compile_args));
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ test_ms([&](){ c.apply(v_in, v_out); },
+ 400, "Merge GAPI %s %dx%d", typeToString(type3).c_str(), sz_in.width, sz_in.height);
+#endif
+
+ // OpenCV code /////////////////////////////////////////////////////////////
+ {
+ std::vector<cv::Mat> in_mats_ocv = {in_mat1, in_mat2, in_mat3};
+ cv::merge(in_mats_ocv, out_mat_ocv);
+ }
+ // Comparison //////////////////////////////////////////////////////////////
+ {
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv != out_mat_gapi));
+ }
+}
+
+TEST_P(Merge4TestGAPI, AccuracyTest)
+{
+ int depth = std::get<0>(GetParam());
+ cv::Size sz_in = std::get<1>(GetParam());
+ auto compile_args = std::get<2>(GetParam());
+
+ int type1 = CV_MAKE_TYPE(depth, 1);
+ int type4 = CV_MAKE_TYPE(depth, 4);
+ initMatsRandU(type1, sz_in, type4);
+
+ cv::Scalar mean = cv::Scalar::all(127);
+ cv::Scalar stddev = cv::Scalar::all(40.f);
+
+ cv::Mat in_mat3(sz_in, type1);
+ cv::Mat in_mat4(sz_in, type1);
+ cv::randn(in_mat3, mean, stddev);
+ cv::randn(in_mat4, mean, stddev);
+
+ // G-API code //////////////////////////////////////////////////////////////
+ cv::GMat in1, in2, in3, in4;
+ auto out = InferenceEngine::gapi::Merge4::on(in1, in2, in3, in4);
+ cv::GComputation c(cv::GIn(in1, in2, in3, in4), cv::GOut(out));
+
+ // compile graph, and test once
+
+ auto own_in_mat1 = cv::to_own(in_mat1);
+ auto own_in_mat2 = cv::to_own(in_mat2);
+ auto own_in_mat3 = cv::to_own(in_mat3);
+ auto own_in_mat4 = cv::to_own(in_mat4);
+ auto own_out_mat_gapi = cv::to_own(out_mat_gapi);
+
+ std::vector<cv::gapi::own::Mat> v_in = { own_in_mat1, own_in_mat2, own_in_mat3, own_in_mat4 };
+ std::vector<cv::gapi::own::Mat> v_out = { own_out_mat_gapi };
+
+ c.apply(v_in, v_out, std::move(compile_args));
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ test_ms([&](){ c.apply(v_in, v_out); },
+ 400, "Merge GAPI %s %dx%d", typeToString(type4).c_str(), sz_in.width, sz_in.height);
+#endif
+
+ // OpenCV code /////////////////////////////////////////////////////////////
+ {
+ std::vector<cv::Mat> in_mats_ocv = {in_mat1, in_mat2, in_mat3, in_mat4};
+ cv::merge(in_mats_ocv, out_mat_ocv);
+ }
+ // Comparison //////////////////////////////////////////////////////////////
+ {
+ EXPECT_EQ(0, cv::countNonZero(out_mat_ocv != out_mat_gapi));
+ }
+}
+
+//----------------------------------------------------------------------
+
+TEST_P(ResizeTestIE, AccuracyTest)
+{
+ int type = 0, interp = 0;
+ cv::Size sz_in, sz_out;
+ double tolerance = 0.0;
+ std::pair<cv::Size, cv::Size> sizes;
+ std::tie(type, interp, sizes, tolerance) = GetParam();
+ std::tie(sz_in, sz_out) = sizes;
+
+ cv::Mat in_mat1(sz_in, type );
+ cv::Scalar mean = cv::Scalar::all(127);
+ cv::Scalar stddev = cv::Scalar::all(40.f);
+
+ cv::randn(in_mat1, mean, stddev);
+
+ cv::Mat out_mat(sz_out, type);
+ cv::Mat out_mat_ocv(sz_out, type);
+
+ // Inference Engine code ///////////////////////////////////////////////////
+
+ size_t channels = out_mat.channels();
+ CV_Assert(1 == channels || 3 == channels);
+
+ int depth = CV_MAT_DEPTH(type);
+ CV_Assert(CV_8U == depth || CV_32F == depth);
+
+ CV_Assert(cv::INTER_AREA == interp || cv::INTER_LINEAR == interp);
+
+ ASSERT_TRUE(in_mat1.isContinuous() && out_mat.isContinuous());
+
+ using namespace InferenceEngine;
+
+ size_t in_height = in_mat1.rows, in_width = in_mat1.cols;
+ size_t out_height = out_mat.rows, out_width = out_mat.cols;
+ InferenceEngine::SizeVector in_sv = { 1, channels, in_height, in_width };
+ InferenceEngine::SizeVector out_sv = { 1, channels, out_height, out_width };
+
+ // HWC blob: channels are interleaved
+ Precision precision = CV_8U == depth ? Precision::U8 : Precision::FP32;
+ TensorDesc in_desc(precision, in_sv, Layout::NHWC);
+ TensorDesc out_desc(precision, out_sv, Layout::NHWC);
+
+ Blob::Ptr in_blob, out_blob;
+ in_blob = make_blob_with_precision(in_desc , in_mat1.data);
+ out_blob = make_blob_with_precision(out_desc, out_mat.data);
+
+ PreProcessData preprocess;
+ preprocess.setRoiBlob(in_blob);
+
+ ResizeAlgorithm algorithm = cv::INTER_AREA == interp ? RESIZE_AREA : RESIZE_BILINEAR;
+
+ // test once to warm-up cache
+ preprocess.execute(out_blob, algorithm);
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ test_ms([&](){ preprocess.execute(out_blob, algorithm); },
+ 100, "Resize IE %s %s %dx%d -> %dx%d",
+ interpToString(interp).c_str(), typeToString(type).c_str(),
+ sz_in.width, sz_in.height, sz_out.width, sz_out.height);
+#endif
+
+ // OpenCV code /////////////////////////////////////////////////////////////
+ {
+ cv::resize(in_mat1, out_mat_ocv, sz_out, 0, 0, interp);
+ }
+ // Comparison //////////////////////////////////////////////////////////////
+ {
+ cv::Mat absDiff;
+ cv::absdiff(out_mat, out_mat_ocv, absDiff);
+ EXPECT_EQ(0, cv::countNonZero(absDiff > tolerance));
+ }
+}
+
+TEST_P(SplitTestIE, AccuracyTest)
+{
+ int type = std::get<0>(GetParam());
+ cv::Size size = std::get<1>(GetParam());
+
+ int depth = CV_MAT_DEPTH(type);
+ CV_Assert(CV_8U == depth || CV_32F == depth);
+
+ int type1 = CV_MAKE_TYPE(depth, 1);
+ int type4 = CV_MAKE_TYPE(depth, 4);
+
+ cv::Scalar mean = cv::Scalar::all(127);
+ cv::Scalar stddev = cv::Scalar::all(40.f);
+
+ cv::Mat in_mat(size, type);
+ cv::randn(in_mat, mean, stddev);
+
+ int channels = in_mat.channels();
+ CV_Assert(2 == channels || 3 == channels || 4 == channels);
+
+ size_t elemsize1 = in_mat.elemSize1();
+ int total = in_mat.total();
+
+ cv::Mat out_mat(size, type4);
+ CV_Assert(in_mat.isContinuous() && out_mat.isContinuous());
+
+ cv::Mat out_mat0(size, type1, out_mat.data + 0*total*elemsize1);
+ cv::Mat out_mat1(size, type1, out_mat.data + 1*total*elemsize1);
+ cv::Mat out_mat2(size, type1, out_mat.data + 2*total*elemsize1);
+ cv::Mat out_mat3(size, type1, out_mat.data + 3*total*elemsize1);
+
+ cv::Mat out_mats[] = {out_mat0, out_mat1, out_mat2, out_mat3};
+
+ std::vector<cv::Mat> out_mats_ocv(channels);
+
+ // Inference Engine code ///////////////////////////////////////////////////
+
+ using namespace InferenceEngine;
+
+ size_t width = size.width;
+ size_t height = size.height;
+ InferenceEngine::SizeVector sv = { 1, (size_t)channels, height, width };
+
+ Precision precision = CV_8U == depth ? Precision::U8 : Precision::FP32;
+ TensorDesc in_desc(precision, sv, Layout::NHWC); // interleaved
+ TensorDesc out_desc(precision, sv, Layout::NCHW); // color planes
+
+ Blob::Ptr in_blob, out_blob;
+ in_blob = make_blob_with_precision( in_desc, in_mat.data);
+ out_blob = make_blob_with_precision(out_desc, out_mat.data);
+
+ // test once
+ blob_copy(in_blob, out_blob);
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ test_ms([&]() { blob_copy(in_blob, out_blob); },
+ 400, "Split IE %s %dx%d", typeToString(type).c_str(), size.width, size.height);
+#endif
+
+ // OpenCV code /////////////////////////////////////////////////////////////
+
+ cv::split(in_mat, out_mats_ocv);
+
+ // Comparison //////////////////////////////////////////////////////////////
+
+ for (int i = 0; i < channels; i++)
+ {
+ EXPECT_EQ(0, cv::countNonZero(out_mats[i] != out_mats_ocv[i]));
+ }
+}
+
+TEST_P(MergeTestIE, AccuracyTest)
+{
+ int type = std::get<0>(GetParam());
+ cv::Size size = std::get<1>(GetParam());
+
+ int depth = CV_MAT_DEPTH(type);
+ CV_Assert(CV_8U == depth || CV_32F == depth);
+
+ int type1 = CV_MAKE_TYPE(depth, 1);
+ int type4 = CV_MAKE_TYPE(depth, 4);
+
+ cv::Mat out_mat(size, type), out_mat_ocv;
+
+ cv::Mat in_mat(size, type4);
+
+ int channels = out_mat.channels();
+ CV_Assert(2 == channels || 3 == channels || 4 == channels);
+
+ size_t elemsize1 = out_mat.elemSize1();
+ int total = out_mat.total();
+
+ cv::Mat in_mat0(size, type1, in_mat.data + 0*total*elemsize1);
+ cv::Mat in_mat1(size, type1, in_mat.data + 1*total*elemsize1);
+ cv::Mat in_mat2(size, type1, in_mat.data + 2*total*elemsize1);
+ cv::Mat in_mat3(size, type1, in_mat.data + 3*total*elemsize1);
+
+ cv::Mat in_mats[] = { in_mat0, in_mat1, in_mat2, in_mat3 };
+
+ cv::Scalar mean = cv::Scalar::all(127);
+ cv::Scalar stddev = cv::Scalar::all(40.f);
+
+ for (int i = 0; i < 4 ; i++)
+ {
+ cv::randn(in_mats[i], mean, stddev);
+ }
+
+ CV_Assert(in_mat.isContinuous() && out_mat.isContinuous());
+
+ // Inference Engine code ///////////////////////////////////////////////////
+
+ using namespace InferenceEngine;
+
+ size_t width = size.width;
+ size_t height = size.height;
+ InferenceEngine::SizeVector sv = { 1, (size_t)channels, height, width };
+
+ Precision precision = CV_8U == depth ? Precision::U8 : Precision::FP32;
+ TensorDesc in_desc(precision, sv, Layout::NCHW); // color planes
+ TensorDesc out_desc(precision, sv, Layout::NHWC); // interleaved
+
+ Blob::Ptr in_blob, out_blob;
+ in_blob = make_blob_with_precision( in_desc, in_mat.data);
+ out_blob = make_blob_with_precision(out_desc, out_mat.data);
+
+ // test once
+ blob_copy(in_blob, out_blob);
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ test_ms([&]() { blob_copy(in_blob, out_blob); },
+ 400, "Merge IE %s %dx%d", typeToString(type).c_str(), size.width, size.height);
+#endif
+
+ // OpenCV code /////////////////////////////////////////////////////////////
+
+ cv::merge(in_mats, channels, out_mat_ocv);
+
+ // Comparison //////////////////////////////////////////////////////////////
+
+ EXPECT_EQ(0, cv::countNonZero(out_mat != out_mat_ocv));
+}
+
+namespace
+{
+// FIXME: Copy-paste from cropRoi tests
+template <InferenceEngine::Precision::ePrecision PRC>
+InferenceEngine::Blob::Ptr img2Blob(cv::Mat &img, InferenceEngine::Layout layout) {
+ using namespace InferenceEngine;
+ using data_t = typename PrecisionTrait<PRC>::value_type;
+
+ const size_t channels = img.channels();
+ const size_t height = img.size().height;
+ const size_t width = img.size().width;
+
+ CV_Assert(cv::DataType<data_t>::depth == img.depth());
+
+ SizeVector dims = {1, channels, height, width};
+ Blob::Ptr resultBlob = make_shared_blob<data_t>(TensorDesc(PRC, dims, layout));;
+ resultBlob->allocate();
+
+ data_t* blobData = resultBlob->buffer().as<data_t*>();
+
+ switch (layout) {
+ case Layout::NCHW: {
+ for (size_t c = 0; c < channels; c++) {
+ for (size_t h = 0; h < height; h++) {
+ for (size_t w = 0; w < width; w++) {
+ blobData[c * width * height + h * width + w] = img.ptr<data_t>(h,w)[c];
+ }
+ }
+ }
+ }
+ break;
+ case Layout::NHWC: {
+ for (size_t h = 0; h < height; h++) {
+ for (size_t w = 0; w < width; w++) {
+ for (size_t c = 0; c < channels; c++) {
+ blobData[h * width * channels + w * channels + c] = img.ptr<data_t>(h,w)[c];
+ }
+ }
+ }
+ }
+ break;
+ default:
+ THROW_IE_EXCEPTION << "Inconsistent input layout for image processing: " << layout;
+ }
+ return resultBlob;
+}
+
+template <InferenceEngine::Precision::ePrecision PRC>
+void Blob2Img(const InferenceEngine::Blob::Ptr& blobP, cv::Mat& img, InferenceEngine::Layout layout) {
+ using namespace InferenceEngine;
+ using data_t = typename PrecisionTrait<PRC>::value_type;
+
+ const size_t channels = img.channels();
+ const size_t height = img.size().height;
+ const size_t width = img.size().width;
+
+ CV_Assert(cv::DataType<data_t>::depth == img.depth());
+
+ data_t* blobData = blobP->buffer().as<data_t*>();
+
+ switch (layout) {
+ case Layout::NCHW: {
+ for (size_t c = 0; c < channels; c++) {
+ for (size_t h = 0; h < height; h++) {
+ for (size_t w = 0; w < width; w++) {
+ img.ptr<data_t>(h,w)[c] = blobData[c * width * height + h * width + w];
+ }
+ }
+ }
+ }
+ break;
+ case Layout::NHWC: {
+ for (size_t h = 0; h < height; h++) {
+ for (size_t w = 0; w < width; w++) {
+ for (size_t c = 0; c < channels; c++) {
+ img.ptr<data_t>(h,w)[c] = blobData[h * width * channels + w * channels + c];
+ }
+ }
+ }
+ }
+ break;
+ default:
+ THROW_IE_EXCEPTION << "Inconsistent input layout for image processing: " << layout;
+ }
+}
+} // namespace
+
+TEST_P(PreprocTest, Performance)
+{
+ using namespace InferenceEngine;
+ Precision prec;
+ ResizeAlgorithm interp;
+ Layout in_layout, out_layout;
+ int ocv_chan = -1;
+ std::pair<cv::Size, cv::Size> sizes;
+ std::tie(prec, interp, in_layout, out_layout, ocv_chan, sizes) = GetParam();
+ cv::Size in_size, out_size;
+ std::tie(in_size, out_size) = sizes;
+
+ const int ocv_depth = prec == Precision::U8 ? CV_8U :
+ prec == Precision::FP32 ? CV_32F : -1;
+ const int ocv_type = CV_MAKETYPE(ocv_depth, ocv_chan);
+ initMatrixRandU(ocv_type, in_size, ocv_type, false);
+
+ cv::Mat out_mat(out_size, ocv_type);
+
+ Blob::Ptr in_blob, out_blob;
+ switch (prec)
+ {
+ case Precision::U8:
+ in_blob = img2Blob<Precision::U8>(in_mat1, in_layout);
+ out_blob = img2Blob<Precision::U8>(out_mat, out_layout);
+ break;
+
+ case Precision::FP32:
+ in_blob = img2Blob<Precision::FP32>(in_mat1, in_layout);
+ out_blob = img2Blob<Precision::FP32>(out_mat, out_layout);
+ break;
+
+ default:
+ FAIL() << "Unsupported configuration";
+ }
+
+ PreProcessData preprocess;
+ preprocess.setRoiBlob(in_blob);
+
+ // test once to warm-up cache
+ preprocess.execute(out_blob, interp);
+
+ switch (prec)
+ {
+ case Precision::U8: Blob2Img<Precision::U8> (out_blob, out_mat, out_layout); break;
+ case Precision::FP32: Blob2Img<Precision::FP32>(out_blob, out_mat, out_layout); break;
+ default: FAIL() << "Unsupported configuration";
+ }
+
+ cv::Mat ocv_out_mat(out_size, ocv_type);
+ auto cv_interp = interp == RESIZE_AREA ? cv::INTER_AREA : cv::INTER_LINEAR;
+ cv::resize(in_mat1, ocv_out_mat, out_size, 0, 0, cv_interp);
+
+ cv::Mat absDiff;
+ cv::absdiff(ocv_out_mat, out_mat, absDiff);
+ EXPECT_EQ(cv::countNonZero(absDiff > 1), 0);
+
+#if PERF_TEST
+ // iterate testing, and print performance
+ const auto type_str = depthToString(ocv_depth);
+ const auto interp_str = interp == RESIZE_AREA ? "AREA"
+ : interp == RESIZE_BILINEAR ? "BILINEAR" : "?";
+ const auto layout_to_str = [](const Layout &l) {
+ switch (l) {
+ case Layout::NCHW: return "NCHW";
+ case Layout::NHWC: return "NHWC";
+ default: return "?";
+ }
+ };
+ const auto in_layout_str = layout_to_str(in_layout);
+ const auto out_layout_str = layout_to_str(out_layout);
+
+ test_ms([&]() { preprocess.execute(out_blob, interp); },
+ 300,
+ "Preproc %s %d %s %s %dx%d %s %dx%d",
+ type_str.c_str(),
+ ocv_chan,
+ interp_str,
+ in_layout_str, in_size.width, in_size.height,
+ out_layout_str, out_size.width, out_size.height);
+#endif // PERF_TEST
+
+}
+
+} // opencv_test
+
+#endif //OPENCV_GAPI_CORE_TESTS_INL_HPP
diff --git a/inference-engine/tests/unit/opencv_test_gapi/common/gapi_tests_common.hpp b/inference-engine/tests/unit/opencv_test_gapi/common/gapi_tests_common.hpp
new file mode 100644
index 000000000..27b43e3c4
--- /dev/null
+++ b/inference-engine/tests/unit/opencv_test_gapi/common/gapi_tests_common.hpp
@@ -0,0 +1,106 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <iostream>
+
+#include "opencv2/core.hpp"
+#include "opencv2/gapi/cpu/core.hpp"
+
+#include <gtest/gtest.h>
+
+namespace
+{
+ std::ostream& operator<<(std::ostream& o, const cv::GCompileArg& arg)
+ {
+ return o << (arg.tag.empty() ? "empty" : arg.tag);
+ }
+}
+
+namespace opencv_test
+{
+
+class TestFunctional
+{
+public:
+ cv::Mat in_mat1;
+ cv::Mat in_mat2;
+ cv::Mat out_mat_gapi;
+ cv::Mat out_mat_ocv;
+
+ cv::Scalar sc;
+
+ void initMatsRandU(int type, cv::Size sz_in, int dtype, bool createOutputMatrices = true)
+ {
+ in_mat1 = cv::Mat(sz_in, type);
+ in_mat2 = cv::Mat(sz_in, type);
+
+ auto& rng = cv::theRNG();
+ sc = cv::Scalar(rng(100),rng(100),rng(100),rng(100));
+ cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255));
+ cv::randu(in_mat2, cv::Scalar::all(0), cv::Scalar::all(255));
+
+ if (createOutputMatrices && dtype != -1)
+ {
+ out_mat_gapi = cv::Mat (sz_in, dtype);
+ out_mat_ocv = cv::Mat (sz_in, dtype);
+ }
+ }
+
+ void initMatrixRandU(int type, cv::Size sz_in, int dtype, bool createOutputMatrices = true)
+ {
+ in_mat1 = cv::Mat(sz_in, type);
+
+ auto& rng = cv::theRNG();
+ sc = cv::Scalar(rng(100),rng(100),rng(100),rng(100));
+
+ cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255));
+
+ if (createOutputMatrices && dtype != -1)
+ {
+ out_mat_gapi = cv::Mat (sz_in, dtype);
+ out_mat_ocv = cv::Mat (sz_in, dtype);
+ }
+ }
+
+ void initMatsRandN(int type, cv::Size sz_in, int dtype, bool createOutputMatrices = true)
+ {
+ in_mat1 = cv::Mat(sz_in, type);
+ cv::randn(in_mat1, cv::Scalar::all(127), cv::Scalar::all(40.f));
+
+ if (createOutputMatrices && dtype != -1)
+ {
+ out_mat_gapi = cv::Mat(sz_in, dtype);
+ out_mat_ocv = cv::Mat(sz_in, dtype);
+ }
+ }
+
+ static cv::Mat nonZeroPixels(const cv::Mat& mat)
+ {
+ int channels = mat.channels();
+ std::vector<cv::Mat> split(channels);
+ cv::split(mat, split);
+ cv::Mat result;
+ for (int c=0; c < channels; c++)
+ {
+ if (c == 0)
+ result = split[c] != 0;
+ else
+ result = result | (split[c] != 0);
+ }
+ return result;
+ }
+
+ static int countNonZeroPixels(const cv::Mat& mat)
+ {
+ return cv::countNonZero( nonZeroPixels(mat) );
+ }
+
+};
+
+template<class T>
+class TestParams: public TestFunctional, public testing::TestWithParam<T>{};
+
+}
diff --git a/inference-engine/tests/unit/opencv_test_gapi/cpu/gapi_core_tests_fluid.cpp b/inference-engine/tests/unit/opencv_test_gapi/cpu/gapi_core_tests_fluid.cpp
new file mode 100644
index 000000000..31714b608
--- /dev/null
+++ b/inference-engine/tests/unit/opencv_test_gapi/cpu/gapi_core_tests_fluid.cpp
@@ -0,0 +1,244 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gapi_core_tests.hpp"
+
+#include "ie_preprocess_gapi_kernels.hpp"
+
+#include <opencv2/opencv.hpp>
+
+#include <gtest/gtest.h>
+
+namespace opencv_test
+{
+
+#define CORE_FLUID InferenceEngine::gapi::preprocKernels()
+
+#define TEST_SIZES \
+ cv::Size(3840, 2160), \
+ cv::Size(1920, 1080), \
+ cv::Size(1280, 720), \
+ cv::Size(1280, 960), \
+ cv::Size( 960, 720), \
+ cv::Size( 640, 480), \
+ cv::Size( 320, 200), \
+ cv::Size( 113, 71)
+
+#define TEST_RESIZE_DOWN \
+ std::make_pair(cv::Size(3840, 2160), cv::Size(1920, 1080)), \
+ std::make_pair(cv::Size(3840, 2160), cv::Size(1280, 720)), \
+ std::make_pair(cv::Size(1920, 1080), cv::Size(1280, 720)), \
+ std::make_pair(cv::Size(1920, 1080), cv::Size( 640, 480)), \
+ std::make_pair(cv::Size(1280, 720), cv::Size( 640, 480)), \
+ std::make_pair(cv::Size(1280, 720), cv::Size( 320, 200)), \
+ std::make_pair(cv::Size( 640, 480), cv::Size( 320, 200)), \
+ std::make_pair(cv::Size( 640, 480), cv::Size( 113, 71)), \
+ std::make_pair(cv::Size( 320, 200), cv::Size( 113, 71))
+
+#define TEST_RESIZE_UP \
+ std::make_pair(cv::Size(1920, 1080), cv::Size(3840, 2160)), \
+ std::make_pair(cv::Size(1280, 720), cv::Size(3840, 2160)), \
+ std::make_pair(cv::Size(1280, 720), cv::Size(1920, 1080)), \
+ std::make_pair(cv::Size( 640, 480), cv::Size(1920, 1080)), \
+ std::make_pair(cv::Size( 640, 480), cv::Size(1280, 720)), \
+ std::make_pair(cv::Size( 320, 200), cv::Size(1280, 720)), \
+ std::make_pair(cv::Size( 320, 200), cv::Size( 640, 480)), \
+ std::make_pair(cv::Size( 113, 71), cv::Size( 640, 480)), \
+ std::make_pair(cv::Size( 113, 71), cv::Size( 320, 200))
+
+#define TEST_RESIZE_HORZ \
+ std::make_pair(cv::Size(3840, 2160), cv::Size(1920, 2160)), \
+ std::make_pair(cv::Size(1920, 1080), cv::Size(3840, 1080)), \
+ std::make_pair(cv::Size(1920, 1080), cv::Size(1280, 1080)), \
+ std::make_pair(cv::Size(1280, 720), cv::Size(1920, 720)), \
+ std::make_pair(cv::Size(1280, 720), cv::Size( 640, 720)), \
+ std::make_pair(cv::Size( 640, 480), cv::Size(1280, 480)), \
+ std::make_pair(cv::Size( 640, 480), cv::Size( 320, 480)), \
+ std::make_pair(cv::Size( 320, 200), cv::Size( 640, 200)), \
+ std::make_pair(cv::Size( 320, 200), cv::Size( 113, 200)), \
+ std::make_pair(cv::Size( 113, 71), cv::Size( 320, 71))
+
+#define TEST_RESIZE_VERT \
+ std::make_pair(cv::Size(3840, 2160), cv::Size(3840, 1080)), \
+ std::make_pair(cv::Size(1920, 1080), cv::Size(1920, 2160)), \
+ std::make_pair(cv::Size(1920, 1080), cv::Size(1920, 720)), \
+ std::make_pair(cv::Size(1280, 720), cv::Size(1280, 1080)), \
+ std::make_pair(cv::Size(1280, 720), cv::Size(1280, 480)), \
+ std::make_pair(cv::Size( 640, 480), cv::Size( 640, 720)), \
+ std::make_pair(cv::Size( 640, 480), cv::Size( 640, 200)), \
+ std::make_pair(cv::Size( 320, 200), cv::Size( 320, 480)), \
+ std::make_pair(cv::Size( 320, 200), cv::Size( 320, 71)), \
+ std::make_pair(cv::Size( 113, 71), cv::Size( 113, 200))
+
+#define TEST_RESIZE_COPY \
+ std::make_pair(cv::Size(3840, 2160), cv::Size(3840, 2160)), \
+ std::make_pair(cv::Size(1920, 1080), cv::Size(1920, 1080)), \
+ std::make_pair(cv::Size(1280, 720), cv::Size(1280, 720)), \
+ std::make_pair(cv::Size( 640, 480), cv::Size( 640, 480)), \
+ std::make_pair(cv::Size( 320, 200), cv::Size( 320, 200)), \
+ std::make_pair(cv::Size( 113, 71), cv::Size( 113, 71))
+
+#define TEST_RESIZE_SPECIAL \
+ std::make_pair(cv::Size(300, 300), cv::Size(300, 199)), \
+ std::make_pair(cv::Size(300, 300), cv::Size(199, 300)), \
+ std::make_pair(cv::Size(300, 300), cv::Size(199, 199)), \
+ std::make_pair(cv::Size(199, 199), cv::Size(300, 300)), \
+ std::make_pair(cv::Size(199, 300), cv::Size(300, 300)), \
+ std::make_pair(cv::Size(300, 199), cv::Size(300, 300))
+
+#define TEST_RESIZE_PAIRS \
+ TEST_RESIZE_DOWN, \
+ TEST_RESIZE_UP, \
+ TEST_RESIZE_HORZ, \
+ TEST_RESIZE_VERT, \
+ TEST_RESIZE_COPY, \
+ TEST_RESIZE_SPECIAL
+
+using namespace testing;
+
+INSTANTIATE_TEST_CASE_P(ResizeTestFluid_U8, ResizeTestGAPI,
+ Combine(Values(CV_8UC1, CV_8UC3),
+ Values(cv::INTER_LINEAR, cv::INTER_AREA),
+ Values(TEST_RESIZE_PAIRS),
+ Values(1), // error not more than 1 unit
+ Values(cv::compile_args(CORE_FLUID))));
+
+INSTANTIATE_TEST_CASE_P(ResizeTestFluid_F32, ResizeTestGAPI,
+ Combine(Values(CV_32FC1, CV_32FC3),
+ Values(cv::INTER_LINEAR, cv::INTER_AREA),
+ Values(TEST_RESIZE_PAIRS),
+ Values(0.015), // accuracy like ~1.5%
+ Values(cv::compile_args(CORE_FLUID))));
+
+INSTANTIATE_TEST_CASE_P(Split2TestFluid, Split2TestGAPI,
+ Combine(Values(CV_8U, CV_32F),
+ Values(TEST_SIZES),
+ Values(cv::compile_args(CORE_FLUID))));
+
+INSTANTIATE_TEST_CASE_P(Split3TestFluid, Split3TestGAPI,
+ Combine(Values(CV_8U, CV_32F),
+ Values(TEST_SIZES),
+ Values(cv::compile_args(CORE_FLUID))));
+
+INSTANTIATE_TEST_CASE_P(Split4TestFluid, Split4TestGAPI,
+ Combine(Values(CV_8U, CV_32F),
+ Values(TEST_SIZES),
+ Values(cv::compile_args(CORE_FLUID))));
+
+INSTANTIATE_TEST_CASE_P(Merge2TestFluid, Merge2TestGAPI,
+ Combine(Values(CV_8U, CV_32F),
+ Values(TEST_SIZES),
+ Values(cv::compile_args(CORE_FLUID))));
+
+INSTANTIATE_TEST_CASE_P(Merge3TestFluid, Merge3TestGAPI,
+ Combine(Values(CV_8U, CV_32F),
+ Values(TEST_SIZES),
+ Values(cv::compile_args(CORE_FLUID))));
+
+INSTANTIATE_TEST_CASE_P(Merge4TestFluid, Merge4TestGAPI,
+ Combine(Values(CV_8U, CV_32F),
+ Values(TEST_SIZES),
+ Values(cv::compile_args(CORE_FLUID))));
+
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(ResizeTestFluid_U8, ResizeTestIE,
+ Combine(Values(CV_8UC1, CV_8UC3),
+ Values(cv::INTER_LINEAR, cv::INTER_AREA),
+ Values(TEST_RESIZE_PAIRS),
+ Values(1))); // error not more than 1 unit
+
+INSTANTIATE_TEST_CASE_P(ResizeTestFluid_F32, ResizeTestIE,
+ Combine(Values(CV_32FC1, CV_32FC3),
+ Values(cv::INTER_LINEAR, cv::INTER_AREA),
+ Values(TEST_RESIZE_PAIRS),
+ Values(0.05))); // error within 0.05 units
+
+INSTANTIATE_TEST_CASE_P(SplitTestFluid, SplitTestIE,
+ Combine(Values(CV_8UC2, CV_8UC3, CV_8UC4,
+ CV_32FC2, CV_32FC3, CV_32FC4),
+ Values(TEST_SIZES)));
+
+INSTANTIATE_TEST_CASE_P(MergeTestFluid, MergeTestIE,
+ Combine(Values(CV_8UC2, CV_8UC3, CV_8UC4,
+ CV_32FC2, CV_32FC3, CV_32FC4),
+ Values(TEST_SIZES)));
+
+//------------------------------------------------------------------------------
+
+namespace IE = InferenceEngine;
+
+static const auto FRAME_SIZES =
+ Values(std::make_pair(cv::Size(1920,1080),
+ cv::Size(1024,1024)), // person-vehicle-bike-detection-crossroad-0078
+ std::make_pair(cv::Size(1024, 768),
+ cv::Size( 992, 544)), // person-detection-retail-0001
+ std::make_pair(cv::Size(1280, 720),
+ cv::Size( 896, 512)), // road-segmentation-adas-0001
+ std::make_pair(cv::Size(3840, 2160),
+ cv::Size(2048, 1024)), // semantic-segmentation-adas-0001
+ std::make_pair(cv::Size(1270, 720),
+ cv::Size(2048, 1024)), // semantic-segmentation-adas-0001 (UPSCALE)
+ std::make_pair(cv::Size( 640, 480),
+ cv::Size( 544, 320))); // 320 - face-person-detection-retail-0002,
+ // 320 - person-detection-retail-10013
+ // 300 - face-detection-retail-0004
+
+static const auto PATCH_SIZES =
+ Values(std::make_pair(cv::Size(200,400),
+ cv::Size(128,384)), // person-reidentification-retail-0076
+ std::make_pair(cv::Size( 96,256),
+ cv::Size(128,384)), // person-reidentification-retail-0076 (UPSCALE)
+ std::make_pair(cv::Size(340,340),
+ cv::Size(320,256)), // vehicle-license-plate-detection-barrier-0007
+ std::make_pair(cv::Size(256,256),
+ cv::Size( 72,72)), // vehicle-attributes-recognition-barrier-0039
+ std::make_pair(cv::Size(96,96),
+ cv::Size(64,64)), // 60 - head-pose-estimation-adas-0001
+ // 62 - age-gender-recognition-retail-0013
+ // 64 - emotions-recognition-retail-0003
+ std::make_pair(cv::Size(128,48),
+ cv::Size( 94,24)), // license-plate-recognition-barrier-0001
+ std::make_pair(cv::Size(120,200),
+ cv::Size(80, 160))); // 80 - person-attributes-recognition-crossroad-0031
+ // 64 - person-reidentification-retail-0079
+
+INSTANTIATE_TEST_CASE_P(ReorderResize_Frame, PreprocTest,
+ Combine(Values(IE::Precision::U8, IE::Precision::FP32),
+ Values(IE::ResizeAlgorithm::RESIZE_BILINEAR), // AREA is not there yet
+ Values(IE::Layout::NHWC),
+ Values(IE::Layout::NCHW),
+ Values(1, 3),
+ FRAME_SIZES));
+
+INSTANTIATE_TEST_CASE_P(Scale3ch_Frame, PreprocTest,
+ Combine(Values(IE::Precision::U8, IE::Precision::FP32),
+ Values(IE::ResizeAlgorithm::RESIZE_BILINEAR), // AREA is not there yet
+ Values(IE::Layout::NHWC),
+ Values(IE::Layout::NHWC),
+ Values(3),
+ FRAME_SIZES));
+
+INSTANTIATE_TEST_CASE_P(ReorderResize_Patch, PreprocTest,
+ Combine(Values(IE::Precision::U8, IE::Precision::FP32),
+ Values(IE::ResizeAlgorithm::RESIZE_BILINEAR), // AREA is not there yet
+ Values(IE::Layout::NHWC),
+ Values(IE::Layout::NCHW, IE::Layout::NCHW),
+ Values(1, 3),
+ PATCH_SIZES));
+
+INSTANTIATE_TEST_CASE_P(Everything, PreprocTest,
+ Combine(Values(IE::Precision::U8, IE::Precision::FP32),
+ Values(IE::ResizeAlgorithm::RESIZE_BILINEAR, IE::ResizeAlgorithm::RESIZE_AREA),
+ Values(IE::Layout::NHWC, IE::Layout::NCHW),
+ Values(IE::Layout::NHWC, IE::Layout::NCHW),
+ Values(1, 2, 3, 4),
+ Values(std::make_pair(cv::Size(1920, 1080), cv::Size(1024,1024)),
+ std::make_pair(cv::Size(1280, 720), cv::Size(544,320)),
+ std::make_pair(cv::Size(640, 480), cv::Size(896, 512)),
+ std::make_pair(cv::Size(200, 400), cv::Size(128, 384)),
+ std::make_pair(cv::Size(256, 256), cv::Size(72, 72)),
+ std::make_pair(cv::Size(96, 256), cv::Size(128, 384)))));
+
+}
diff --git a/inference-engine/tests/unit/shape_infer/built_in_holder_test.cpp b/inference-engine/tests/unit/shape_infer/built_in_holder_test.cpp
index 04bb17d42..b8661bd8f 100644
--- a/inference-engine/tests/unit/shape_infer/built_in_holder_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/built_in_holder_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -42,7 +41,8 @@ protected:
"BatchNormalization",
"Input",
"Memory",
- "Const"
+ "Const",
+ "Gemm"
};
void TearDown() override {
diff --git a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_batch_test.cpp b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_batch_test.cpp
index d67d43a9a..9f57e35a3 100644
--- a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_batch_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_batch_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -7,7 +6,7 @@
#include <inference_engine/shape_infer/built-in/ie_built_in_holder.hpp>
#include <xml_net_builder.hpp>
#include <inference_engine/cnn_network_impl.hpp>
-#include <inference_engine/v2_format_parser.h>
+#include <inference_engine/ie_format_parser.h>
#include <xml_helper.hpp>
#include <inference_engine/shape_infer/ie_reshaper.hpp>
#include "built_in_shape_infer_general_test.hpp"
@@ -18,7 +17,7 @@ using namespace ShapeInfer;
class BuiltInShapeInferImplTestBatch : public BuiltInShapeInferImplTest {};
TEST_P(BuiltInShapeInferImplTestBatch, batch) {
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams.data, layerDataName);
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams.data, layerDataName);
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
if (canInfer) {
diff --git a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_conv_test.cpp b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_conv_test.cpp
index bf3d1eb77..07aaf7fdc 100644
--- a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_conv_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_conv_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -7,7 +6,7 @@
#include <inference_engine/shape_infer/built-in/ie_built_in_holder.hpp>
#include <xml_net_builder.hpp>
#include <inference_engine/cnn_network_impl.hpp>
-#include <inference_engine/v2_format_parser.h>
+#include <inference_engine/ie_format_parser.h>
#include <xml_helper.hpp>
#include <inference_engine/shape_infer/ie_reshaper.hpp>
#include "built_in_shape_infer_general_test.hpp"
@@ -98,7 +97,7 @@ TEST_P(BuiltInShapeInferConvImplTest, impl) {
ASSERT_NE(nullptr, impl);
if (!group) group = 1;
SizeVector weightsDim{kernel.x * kernel.y * out_channels * inOutShapes.inDims[0][1] / group};
- blobs["weights"] = make_shared_blob(Precision::UNSPECIFIED, weightsDim);
+ blobs["weights"] = make_shared_blob(Precision::fromType<size_t>(), weightsDim);
ASSERT_NO_THROW(sts = impl->inferShapes(inOutShapes.inDims, getMapParams(), blobs, outShapes, &resp));
ASSERT_EQ(int(OK), sts) << resp.msg;
ASSERT_EQ(inOutShapes.outDims, outShapes);
@@ -106,7 +105,7 @@ TEST_P(BuiltInShapeInferConvImplTest, impl) {
TEST_P(BuiltInShapeInferConvImplTest, batch) {
auto layerParams = getMapParams();
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams, dataName, 2);
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<2>(type, inOutShapes, &layerParams, dataName);
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
sts = cnnNetworkImplPtr->setBatchSizeReshape(BATCH, &resp);
ASSERT_EQ((int) OK, sts) << resp.msg;
@@ -116,7 +115,7 @@ TEST_P(BuiltInShapeInferConvImplTest, batch) {
TEST_P(BuiltInShapeInferConvImplTest, reshaper) {
auto layerParams = getMapParams();
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams, dataName, 2);
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<2>(type, inOutShapes, &layerParams, dataName);
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
auto inputShapes = setInputShapes(*cnnNetworkImplPtr, newInOutShapes.inDims);
reshaper->run(inputShapes);
@@ -129,7 +128,7 @@ TEST_P(BuiltInShapeInferConvImplTest, impl_IRv3) {
ASSERT_NE(nullptr, impl);
if (!group) group = 1;
SizeVector weightsDim{kernel.x * kernel.y * out_channels * inOutShapes.inDims[0][1] / group};
- blobs["weights"] = make_shared_blob(Precision::UNSPECIFIED, weightsDim);
+ blobs["weights"] = make_shared_blob(Precision::fromType<size_t>(), weightsDim);
ASSERT_NO_THROW(sts = impl->inferShapes(inOutShapes.inDims, getMapParams_IRv3(), blobs, outShapes, &resp));
ASSERT_EQ(int(OK), sts) << resp.msg;
ASSERT_EQ(inOutShapes.outDims, outShapes);
@@ -137,7 +136,7 @@ TEST_P(BuiltInShapeInferConvImplTest, impl_IRv3) {
TEST_P(BuiltInShapeInferConvImplTest, batch_IRv3) {
auto layerParams = getMapParams_IRv3();
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams, dataName);
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams, dataName);
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
sts = cnnNetworkImplPtr->setBatchSizeReshape(BATCH, &resp);
ASSERT_EQ((int) OK, sts) << resp.msg;
@@ -147,7 +146,7 @@ TEST_P(BuiltInShapeInferConvImplTest, batch_IRv3) {
TEST_P(BuiltInShapeInferConvImplTest, reshaper_IRv3) {
auto layerParams = getMapParams_IRv3();
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams, dataName);
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams, dataName);
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
auto inputShapes = setInputShapes(*cnnNetworkImplPtr, newInOutShapes.inDims);
reshaper->run(inputShapes);
diff --git a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_fake_test.cpp b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_fake_test.cpp
index bb2de3535..2b66d59f8 100644
--- a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_fake_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_fake_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -7,7 +6,7 @@
#include <inference_engine/shape_infer/built-in/ie_built_in_holder.hpp>
#include <xml_net_builder.hpp>
#include <inference_engine/cnn_network_impl.hpp>
-#include <inference_engine/v2_format_parser.h>
+#include <inference_engine/ie_format_parser.h>
#include <xml_helper.hpp>
#include <inference_engine/shape_infer/ie_reshaper.hpp>
#include "built_in_shape_infer_general_test.hpp"
@@ -19,7 +18,7 @@ class BuiltInShapeInferImplFakeTest : public BuiltInShapeInferImplTest {
};
TEST_P(BuiltInShapeInferImplFakeTest, reshaper) {
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams.data, layerDataName);
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams.data, layerDataName);
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
auto inputShapes = setInputShapes(*cnnNetworkImplPtr, newInOutShapes.inDims);
diff --git a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_general_test.cpp b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_general_test.cpp
index c7fc06eb5..a7d3a647d 100644
--- a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_general_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_general_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -31,7 +30,7 @@ TEST_P(BuiltInShapeInferImplTest, impl) {
}
TEST_P(BuiltInShapeInferImplTest, reshaper) {
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams.data, layerDataName);
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams.data, layerDataName);
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
auto inputShapes = setInputShapes(*cnnNetworkImplPtr.get(), newInOutShapes.inDims);
@@ -141,20 +140,15 @@ INSTANTIATE_TEST_CASE_P(
{{2, 5742, 6}}}),
NewInOutShapes({{{2, 34458}},
{{2, 5743, 6}}}),
- MapParams(MapStrStr(std::map<std::string, std::string>{{"dim", "0,-1,6"},
- {"in2out", "0-0"},
- {"num_axes", "-1"},
- {"axis", "0"}})),
+ MapParams(MapStrStr(std::map<std::string, std::string>{{"dim", "0,-1,6"}})),
LayerDataName("data"),
CanInfer(true)),
::testing::make_tuple(LayerType("Reshape"),
- InOutShapes({{{2, 1, 4, 5}},
- {{40}}}),
- NewInOutShapes({{{4, 1, 4, 5}},
- {{80}}}),
- MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"},
- {"num_axes", "-1"},
- {"in2out", "0-0,1-0,2-0,3-0"}})),
+ InOutShapes({{{1, 1, 300, 4}},
+ {{300, 4}}}),
+ NewInOutShapes({{{1, 1, 500, 4}},
+ {{500, 4}}}),
+ MapParams(MapStrStr(std::map<std::string, std::string>{{"dim", "-1,4"}})),
LayerDataName("data"),
CanInfer(true)),
::testing::make_tuple(LayerType("Flatten"),
@@ -162,9 +156,7 @@ INSTANTIATE_TEST_CASE_P(
{{40}}}),
NewInOutShapes({{{4, 1, 4, 5}},
{{80}}}),
- MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"},
- {"end_axis", "-1"},
- {"in2out", "0-0,1-0,2-0,3-0"}})),
+ MapParams(MapParams(MapStrStr())),
LayerDataName("data"),
CanInfer(true)),
::testing::make_tuple(LayerType("PriorBoxClustered"),
@@ -464,6 +456,215 @@ INSTANTIATE_TEST_CASE_P(
MapParams(MapStrStr({{"out_max_val", "0"},
{"top_k", "100"}})),
LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Gemm"),
+ InOutShapes({{{15, 10}, {10, 20}, {15, 20}},
+ {{15, 20}}}),
+ NewInOutShapes({{{20, 15}, {15, 25}, {20, 25}},
+ {{20, 25}}}),
+ MapParams(MapStrStr({{"alpha", "1"},
+ {"beta", "1"},
+ {"transpose_a", "false"},
+ {"transpose_b", "false"}})),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Gemm"),
+ InOutShapes({{{15, 10}, {10, 20}, {15, 20}},
+ {{15, 20}}}),
+ NewInOutShapes({{{20, 15}, {10, 25}, {20, 25}},
+ {{20, 25}}}),
+ MapParams(MapStrStr({{"alpha", "1"},
+ {"beta", "1"},
+ {"transpose_a", "false"},
+ {"transpose_b", "false"}})),
+ LayerDataName("data"),
+ CanInfer(false)),
+ ::testing::make_tuple(LayerType("Gemm"),
+ InOutShapes({{{15, 10}, {10, 20}, {15, 20}},
+ {{15, 20}}}),
+ NewInOutShapes({{{20, 15}, {15, 25}, {15, 25}},
+ {{20, 25}}}),
+ MapParams(MapStrStr({{"alpha", "1"},
+ {"beta", "1"},
+ {"transpose_a", "false"},
+ {"transpose_b", "false"}})),
+ LayerDataName("data"),
+ CanInfer(false)),
+ ::testing::make_tuple(LayerType("Gemm"),
+ InOutShapes({{{15, 10}, {10, 20}},
+ {{15, 20}}}),
+ NewInOutShapes({{{20, 15}, {15, 25}},
+ {{20, 25}}}),
+ MapParams(MapStrStr({{"alpha", "1"},
+ {"beta", "1"},
+ {"transpose_a", "false"},
+ {"transpose_b", "false"}})),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Gemm"),
+ InOutShapes({{{15, 10}, {10, 20}},
+ {{15, 20}}}),
+ NewInOutShapes({{{20, 15}, {10, 25}},
+ {{20, 25}}}),
+ MapParams(MapStrStr({{"alpha", "1"},
+ {"beta", "1"},
+ {"transpose_a", "false"},
+ {"transpose_b", "false"}})),
+ LayerDataName("data"),
+ CanInfer(false)),
+ ::testing::make_tuple(LayerType("Gemm"),
+ InOutShapes({{{3, 3, 15, 10}, {3, 3, 10, 20}, {3, 3, 15, 20}},
+ {{3, 3, 15, 20}}}),
+ NewInOutShapes({{{4, 1, 20, 15}, {4, 1, 15, 25}, {4, 1, 20, 25}},
+ {{4, 1, 20, 25}}}),
+ MapParams(MapStrStr({{"alpha", "1"},
+ {"beta", "1"},
+ {"transpose_a", "false"},
+ {"transpose_b", "false"}})),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Gemm"),
+ InOutShapes({{{3, 3, 15, 10}, {3, 1, 10, 20}, {3, 1, 15, 20}},
+ {{3, 3, 15, 20}}}),
+ NewInOutShapes({{{4, 2, 20, 15}, {4, 2, 15, 25}, {4, 1, 20, 25}},
+ {{4, 2, 20, 25}}}),
+ MapParams(MapStrStr({{"alpha", "1"},
+ {"beta", "1"},
+ {"transpose_a", "false"},
+ {"transpose_b", "false"}})),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Pad"),
+ InOutShapes({{{3, 3, 15, 10}},
+ {{9, 11, 25, 22}}}),
+ NewInOutShapes({{{4, 2, 20, 15}},
+ {{10, 10, 30, 27}}}),
+ MapParams(MapStrStr({{"pads_begin", "1,2,3,4"},
+ {"pads_end", "5,6,7,8"},
+ {"pad_mode", "edge"},
+ {"pad_value", "1.0f"}})),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Pad"),
+ InOutShapes({{{10, 10, 15, 10}},
+ {{16, 18, 25, 22}}}),
+ NewInOutShapes({{{20, 30, 40, 50}},
+ {{26, 38, 40, 50}}}),
+ MapParams(MapStrStr({{"pads_begin", "1,2,0,0"},
+ {"pads_end", "5,6,0,0"},
+ {"pad_mode", "reflect"},
+ {"pad_value", "1.0f"}})),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Pad"),
+ InOutShapes({{{10, 10, 15, 10}},
+ {{16, 18, 25, 22}}}),
+ NewInOutShapes({{{4, 2, 20, 15}},
+ {{10, 10, 30, 27}}}),
+ MapParams(MapStrStr({{"pads_begin", "1,2,3,4"},
+ {"pads_end", "5,6,7,8"},
+ {"pad_mode", "reflect"},
+ {"pad_value", "1.0f"}})),
+ LayerDataName("data"),
+ CanInfer(false))
+ )
+);
+
+INSTANTIATE_TEST_CASE_P(
+ BuiltInGeneralImpls2, BuiltInShapeInferImplTest,
+ ::testing::Values(
+ ::testing::make_tuple(LayerType("Gather"),
+ InOutShapes({{{7, 16}, {1, 25}},
+ {{1, 25, 16}}}),
+ NewInOutShapes({{{7, 16}, {12, 25}},
+ {{12, 25, 16}}}),
+ MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"}})),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Gather"),
+ InOutShapes({{{7, 16}, {1, 25}},
+ {{7, 1, 25}}}),
+ NewInOutShapes({{{7, 16}, {12, 25}},
+ {{7, 12, 25}}}),
+ MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "1"}})),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Gather"),
+ InOutShapes({{{7, 16}, {1, 25}},
+ {{7, 1, 25}}}),
+ NewInOutShapes({{{7, 16}, {12, 25}},
+ {{7, 12, 25}}}),
+ MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "-1"}})),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Reshape"),
+ InOutShapes({{{1, 2}},
+ {{1, 1}}}),
+ NewInOutShapes({{{1, 2}},
+ {{1, 1}}}),
+ MapParams(MapStrStr(std::map<std::string, std::string>{{"dim", "1,1"}})), // dim doesn't match input
+ LayerDataName("data"),
+ CanInfer(false)),
+ ::testing::make_tuple(LayerType("Flatten"),
+ InOutShapes({{{2, 1, 4, 5}},
+ {{40}}}),
+ NewInOutShapes({{{4, 1, 4, 5}},
+ {{80}}}),
+ MapParams(MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"},
+ {"end_axis", "-1"}}))),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Flatten"),
+ InOutShapes({{{2, 2, 4, 5}},
+ {{2, 8, 5}}}),
+ NewInOutShapes({{{4, 2, 4, 5}},
+ {{4, 8, 5}}}),
+ MapParams(MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "1"},
+ {"end_axis", "2"}}))),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Flatten"),
+ InOutShapes({{{2, 2, 4, 5}},
+ {{2, 40}}}),
+ NewInOutShapes({{{4, 2, 4, 5}},
+ {{4, 40}}}),
+ MapParams(MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "1"}}))),
+ LayerDataName("data"),
+ CanInfer(true)),
+ ::testing::make_tuple(LayerType("Flatten"),
+ InOutShapes({{{2, 2, 4, 5}},
+ {{4, 4, 5}}}),
+ NewInOutShapes({{{4, 2, 4, 5}},
+ {{8, 4, 5}}}),
+ MapParams(MapParams(MapStrStr(std::map<std::string, std::string>{{"end_axis", "1"}}))),
+ LayerDataName("data"),
CanInfer(true))
)
);
+
+class LayerValidatorNegativeTests : public BuiltInShapeInferImplTest {
+};
+
+TEST_P(LayerValidatorNegativeTests, reshaper) {
+ ASSERT_THROW(buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams.data, layerDataName),
+ InferenceEngine::details::InferenceEngineException);
+}
+
+// TODO: test using MR!1690
+INSTANTIATE_TEST_CASE_P(
+ Reshape, LayerValidatorNegativeTests,
+ ::testing::Combine(
+ ::testing::Values(LayerType("Reshape")),
+ ::testing::Values(InOutShapes({{{1, 1, 300, 4}},
+ {{300, 4}}})),
+ ::testing::Values(NewInOutShapes({{{1, 1, 500, 4}},
+ {{500, 4}}})),
+ ::testing::Values(
+ MapParams(MapStrStr(
+ std::map<std::string, std::string>{{"dim", "0,-2,6"}})), // can't be less the -1
+ MapParams(MapStrStr(
+ std::map<std::string, std::string>{{"dim", "0,-1,-1"}}))), // single -1 is expected
+ ::testing::Values(LayerDataName("data")),
+ ::testing::Values(CanInfer())
+ )
+);
diff --git a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_general_test.hpp b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_general_test.hpp
index ca02f5afd..5eac622be 100644
--- a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_general_test.hpp
+++ b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_general_test.hpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -8,56 +7,34 @@
#include <gtest/gtest.h>
#include <inference_engine/shape_infer/built-in/ie_built_in_holder.hpp>
#include <utility>
-#include <inference_engine/v2_format_parser.h>
+#include <inference_engine/ie_format_parser.h>
#include <xml_helper.hpp>
#include <xml_net_builder.hpp>
-
-#define PRETTY_PARAM(name, type) \
- class name \
- { \
- public: \
- typedef type param_type; \
- explicit name ( param_type arg = param_type ()) : val_(arg) {} \
- operator param_type () const {return val_;} \
- private: \
- param_type val_; \
- }; \
- static inline void PrintTo(name param, ::std::ostream* os) \
- { \
- *os << #name ": " << ::testing::PrintToString((name::param_type)(param)); \
- }
-
-struct MapStrStr {
- std::map<std::string, std::string> data{};
-
- explicit MapStrStr(std::map<std::string, std::string> _data) : data(std::move(_data)) {}
-
- MapStrStr() {}
-};
+#include <single_layer_common.hpp>
class BaseTestCreator {
protected:
std::string _type;
public:
- explicit BaseTestCreator(const std::string& type) : _type(type) {}
+ explicit BaseTestCreator(const std::string &type) : _type(type) {}
- virtual InferenceEngine::CNNLayerPtr create(const std::string& type) = 0;
+ virtual InferenceEngine::CNNLayerPtr create(const std::string &type) = 0;
- virtual bool shouldCreate(const std::string& type) = 0;
+ virtual bool shouldCreate(const std::string &type) = 0;
};
template<class LT>
class LayerTestCreator : public BaseTestCreator {
public:
- explicit LayerTestCreator(const std::string& type) : BaseTestCreator(type) {}
+ explicit LayerTestCreator(const std::string &type) : BaseTestCreator(type) {}
- InferenceEngine::CNNLayerPtr create(const std::string& type) override {
+ InferenceEngine::CNNLayerPtr create(const std::string &type) override {
InferenceEngine::LayerParams params;
params.type = type;
return std::make_shared<LT>(params);
}
- bool shouldCreate(const std::string& type) override {
+ bool shouldCreate(const std::string &type) override {
return type == _type;
}
};
@@ -66,12 +43,12 @@ struct param_size {
unsigned x;
unsigned y;
- friend std::ostream& operator<<(std::ostream& os, param_size const& paramSize) {
+ friend std::ostream &operator<<(std::ostream &os, param_size const &paramSize) {
os << "x=" << std::to_string(paramSize.x) << ", y=" << std::to_string(paramSize.y);
return os;
};
- std::string toSeparetedRow(const char* separator) {
+ std::string toSeparetedRow(const char *separator) {
std::string res = std::to_string(y) + separator + std::to_string(x);
return res;
}
@@ -123,7 +100,7 @@ protected:
holder = std::make_shared<InferenceEngine::ShapeInfer::BuiltInShapeInferHolder>();
}
- InferenceEngine::IShapeInferImpl::Ptr getShapeInferImpl(const std::string& type) {
+ InferenceEngine::IShapeInferImpl::Ptr getShapeInferImpl(const std::string &type) {
InferenceEngine::IShapeInferImpl::Ptr impl;
sts = holder->getShapeInferImpl(impl, type.c_str(), &resp);
if (sts != InferenceEngine::StatusCode::OK) THROW_IE_EXCEPTION << resp.msg;
@@ -139,7 +116,7 @@ protected:
template<class T>
class BuiltInShapeInferTestWithParam : public BuiltInShapeInferCommon,
public testing::WithParamInterface<T> {
- const std::vector<std::shared_ptr<BaseTestCreator>>& getCreators() const {
+ const std::vector<std::shared_ptr<BaseTestCreator>> &getCreators() const {
// there should be unique_ptr but it cant be used with initializer lists
static std::vector<std::shared_ptr<BaseTestCreator> > creators = {
std::make_shared<LayerTestCreator<InferenceEngine::PowerLayer>>("Power"),
@@ -166,20 +143,23 @@ class BuiltInShapeInferTestWithParam : public BuiltInShapeInferCommon,
std::make_shared<LayerTestCreator<InferenceEngine::ReshapeLayer>>("Reshape"),
std::make_shared<LayerTestCreator<InferenceEngine::TileLayer>>("Tile"),
std::make_shared<LayerTestCreator<InferenceEngine::BatchNormalizationLayer>>("BatchNormalization"),
+ std::make_shared<LayerTestCreator<InferenceEngine::GemmLayer>>("Gemm"),
+ std::make_shared<LayerTestCreator<InferenceEngine::PadLayer>>("Pad"),
+ std::make_shared<LayerTestCreator<InferenceEngine::GatherLayer>>("Gather")
};
return creators;
}
protected:
InferenceEngine::DataPtr
- getNotEmptyData(std::string const& name = "", const InferenceEngine::SizeVector& dims = {}) {
+ getNotEmptyData(std::string const &name = "", const InferenceEngine::SizeVector &dims = {}) {
InferenceEngine::TensorDesc desc(InferenceEngine::Precision::UNSPECIFIED, dims,
InferenceEngine::TensorDesc::getLayoutByDims(dims));
return std::make_shared<InferenceEngine::Data>(name, desc);
}
- InferenceEngine::CNNLayer::Ptr createLayer(const std::string& type) const {
- for (auto& creator : getCreators()) {
+ InferenceEngine::CNNLayer::Ptr createLayer(const std::string &type) const {
+ for (auto &creator : getCreators()) {
if (!creator->shouldCreate(type))
continue;
return creator->create(type);
@@ -188,63 +168,35 @@ protected:
return genericCreator.create(type);
}
- void initLayer(const InferenceEngine::CNNLayerPtr& layer, const testing::InOutData& inOutData) {
- for (const auto& in:inOutData.inDims) {
+ void initLayer(const InferenceEngine::CNNLayerPtr &layer, const testing::InOutData &inOutData) {
+ for (const auto &in:inOutData.inDims) {
auto data = getNotEmptyData("", in);
_savedData.push_back(data);
layer->insData.push_back(data);
}
- for (const auto& out:inOutData.outDims) {
+ for (const auto &out:inOutData.outDims) {
layer->outData.push_back(getNotEmptyData("", out));
}
}
- static testing::InOutData getFakeData(const testing::InOutData& inOutShapes) {
+ static testing::InOutData getFakeData(const testing::InOutData &inOutShapes) {
testing::InOutData initial = inOutShapes;
- for (auto& dims : initial.inDims) {
+ for (auto &dims : initial.inDims) {
std::fill(dims.begin(), dims.end(), 1);
}
- for (auto& dims : initial.outDims) {
+ for (auto &dims : initial.outDims) {
std::fill(dims.begin(), dims.end(), 1);
}
return initial;
}
- static InferenceEngine::details::CNNNetworkImplPtr buildSingleLayerNetwork(
- const std::string& layerType,
- const testing::InOutData& inOutShapes,
- std::map<std::string, std::string>* params,
- const std::string& layerDataName,
- int ir_version = 3) {
- testing::XMLHelper xmlHelper(new InferenceEngine::details::V2FormatParser(ir_version));
- std::string precision = InferenceEngine::Precision(InferenceEngine::Precision::FP32).name();
- auto netBuilder = testing::V2NetBuilder::buildNetworkWithOneInput("Mock", inOutShapes.inDims[0], precision);
- size_t inputsNumber = inOutShapes.inDims.size();
- for (int i = 1; i < inputsNumber; i++) {
- netBuilder.addInputLayer(precision, inOutShapes.inDims[i]);
- }
- netBuilder.addLayer(layerType, precision, params, inOutShapes, 0, 0, layerDataName);
- std::string testContent;
- if (inputsNumber > 1) {
- auto edgeBuilder = netBuilder.havingEdges();
- for (size_t i = 0; i < inputsNumber; i++) {
- edgeBuilder.connect(i, inputsNumber);
- }
- testContent = edgeBuilder.finish();
- } else {
- testContent = netBuilder.finish();
- }
- xmlHelper.loadContent(testContent);
- return xmlHelper.parseWithReturningNetwork();
- }
-
static InferenceEngine::ICNNNetwork::InputShapes
- setInputShapes(const InferenceEngine::ICNNNetwork& cnnNetwork,
- const std::vector<InferenceEngine::SizeVector>& shapesToSet) {
+ setInputShapes(const InferenceEngine::ICNNNetwork &cnnNetwork,
+ const std::vector<InferenceEngine::SizeVector> &shapesToSet) {
InferenceEngine::ICNNNetwork::InputShapes inputShapes;
InferenceEngine::InputsDataMap inputs;
cnnNetwork.getInputsInfo(inputs);
- for (const auto& pair : inputs) {
+ for (const auto &pair : inputs) {
auto info = pair.second;
if (info) {
auto data = info->getInputData();
@@ -254,14 +206,14 @@ protected:
}
}
int i = 0;
- for (auto& pair : inputShapes) {
+ for (auto &pair : inputShapes) {
pair.second = shapesToSet[i++];
}
return inputShapes;
}
- static void checkNetworkInOut(const InferenceEngine::ICNNNetwork& network,
- const testing::InOutData& inOutData) {
+ static void checkNetworkInOut(const InferenceEngine::ICNNNetwork &network,
+ const testing::InOutData &inOutData) {
InferenceEngine::InputsDataMap inputsDataMap;
InferenceEngine::OutputsDataMap outputsDataMap;
network.getInputsInfo(inputsDataMap);
@@ -276,6 +228,16 @@ protected:
}
}
+ template<int Version = 3>
+ static InferenceEngine::details::CNNNetworkImplPtr
+ buildSingleLayerNetwork(const std::string &layerType,
+ const testing::InOutData &inOutShapes,
+ std::map<std::string, std::string> *params,
+ const std::string &layerDataName = "data") {
+ auto *parser = new InferenceEngine::details::FormatParser(Version);
+ return buildSingleLayerNetworkCommon<Version>(parser, layerType, inOutShapes, params, layerDataName);
+ }
+
protected:
std::vector<InferenceEngine::SizeVector> outShapes;
std::map<std::string, std::string> params;
diff --git a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_pool_test.cpp b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_pool_test.cpp
index 8ea27c3da..487ff8469 100644
--- a/inference-engine/tests/unit/shape_infer/built_in_shape_infer_pool_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/built_in_shape_infer_pool_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -7,7 +6,7 @@
#include <inference_engine/shape_infer/built-in/ie_built_in_holder.hpp>
#include <xml_net_builder.hpp>
#include <inference_engine/cnn_network_impl.hpp>
-#include <inference_engine/v2_format_parser.h>
+#include <inference_engine/ie_format_parser.h>
#include <xml_helper.hpp>
#include <inference_engine/shape_infer/ie_reshaper.hpp>
#include "built_in_shape_infer_general_test.hpp"
@@ -78,7 +77,6 @@ protected:
};
TEST_P(BuiltInShapeInferPoolImplTest, body) {
- InferenceEngine::details::BaseCreator::version_ = 2;
auto impl = getShapeInferImpl(type);
ASSERT_NE(nullptr, impl);
ASSERT_NO_THROW(sts = impl->inferShapes(inOutShapes.inDims, getMapParams(), blobs, outShapes, &resp));
@@ -88,7 +86,7 @@ TEST_P(BuiltInShapeInferPoolImplTest, body) {
TEST_P(BuiltInShapeInferPoolImplTest, reshaper) {
auto layerParams = getMapParams();
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams, "pooling_data", 2);
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<2>(type, inOutShapes, &layerParams, "pooling_data");
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
auto inputShapes = setInputShapes(*cnnNetworkImplPtr, newInOutShapes.inDims);
reshaper->run(inputShapes);
@@ -97,7 +95,7 @@ TEST_P(BuiltInShapeInferPoolImplTest, reshaper) {
TEST_P(BuiltInShapeInferPoolImplTest, batch) {
auto layerParams = getMapParams();
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams, "pooling_data", 2);
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<2>(type, inOutShapes, &layerParams, "pooling_data");
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
sts = cnnNetworkImplPtr->setBatchSize(BATCH, &resp);
ASSERT_EQ((int)OK, sts) << resp.msg;
@@ -106,7 +104,6 @@ TEST_P(BuiltInShapeInferPoolImplTest, batch) {
}
TEST_P(BuiltInShapeInferPoolImplTest, body_IRv3) {
- InferenceEngine::details::BaseCreator::version_ = 3;
auto impl = getShapeInferImpl(type);
ASSERT_NE(nullptr, impl);
ASSERT_NO_THROW(sts = impl->inferShapes(inOutShapes.inDims, getMapParams_IRv3(), blobs, outShapes, &resp));
@@ -116,7 +113,7 @@ TEST_P(BuiltInShapeInferPoolImplTest, body_IRv3) {
TEST_P(BuiltInShapeInferPoolImplTest, reshaper_IRv3) {
auto layerParams = getMapParams_IRv3();
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams, "pooling_data");
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams, "pooling_data");
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
auto inputShapes = setInputShapes(*cnnNetworkImplPtr, newInOutShapes.inDims);
reshaper->run(inputShapes);
@@ -125,7 +122,7 @@ TEST_P(BuiltInShapeInferPoolImplTest, reshaper_IRv3) {
TEST_P(BuiltInShapeInferPoolImplTest, batch_IRv3) {
auto layerParams = getMapParams_IRv3();
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams, "pooling_data");
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams, "pooling_data");
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
sts = cnnNetworkImplPtr->setBatchSize(BATCH, &resp);
ASSERT_EQ((int)OK, sts) << resp.msg;
diff --git a/inference-engine/tests/unit/shape_infer/cpu_ext_shape_infer_general_test.cpp b/inference-engine/tests/unit/shape_infer/cpu_ext_shape_infer_general_test.cpp
index a4aaca7b9..4551dd766 100644
--- a/inference-engine/tests/unit/shape_infer/cpu_ext_shape_infer_general_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/cpu_ext_shape_infer_general_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -11,7 +10,8 @@
#include <cpp/ie_cnn_net_reader.h>
#include <test_model_path.hpp>
#include <inference_engine/debug.h>
-#include <extension/ext_list.hpp>
+#include <ie_extension.h>
+#include <tests_common.hpp>
#include "built_in_shape_infer_general_test.hpp"
using namespace InferenceEngine;
@@ -20,9 +20,12 @@ using namespace ShapeInfer;
class CPUExtShapeInferTests : public BuiltInShapeInferImplTest {
protected:
+ InferenceEngine::ShapeInferExtension shapeInferExt;
+ CPUExtShapeInferTests () : shapeInferExt(TestsCommon::make_so_name("cpu_extension")) {}
+
void SetUp() override {
BuiltInShapeInferImplTest::SetUp();
- holder = std::make_shared<InferenceEngine::Extensions::Cpu::CpuExtensions>();
+ holder = std::shared_ptr<IShapeInferExtension>(&shapeInferExt, [](IShapeInferExtension*){});
}
};
@@ -40,7 +43,7 @@ TEST_P(CPUExtShapeInferTests, impl) {
}
TEST_P(CPUExtShapeInferTests, reshaper) {
- auto cnnNetworkImplPtr = buildSingleLayerNetwork(type, inOutShapes, &layerParams.data, layerDataName);
+ auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams.data, layerDataName);
auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
auto inputShapes = setInputShapes(*cnnNetworkImplPtr.get(), newInOutShapes.inDims);
reshaper->AddExtension(holder);
diff --git a/inference-engine/tests/unit/shape_infer/input_controller_test.cpp b/inference-engine/tests/unit/shape_infer/input_controller_test.cpp
index 3aaeffceb..c6fc3756b 100644
--- a/inference-engine/tests/unit/shape_infer/input_controller_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/input_controller_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/shape_infer/input_reshape_launcher_test.cpp b/inference-engine/tests/unit/shape_infer/input_reshape_launcher_test.cpp
index b0543e33d..7d99fcbb9 100644
--- a/inference-engine/tests/unit/shape_infer/input_reshape_launcher_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/input_reshape_launcher_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/shape_infer/output_controller_test.cpp b/inference-engine/tests/unit/shape_infer/output_controller_test.cpp
index f69838c78..8083875e9 100644
--- a/inference-engine/tests/unit/shape_infer/output_controller_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/output_controller_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/shape_infer/reshape_launcher_test.cpp b/inference-engine/tests/unit/shape_infer/reshape_launcher_test.cpp
index 2802dcf95..372d3f43c 100644
--- a/inference-engine/tests/unit/shape_infer/reshape_launcher_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/reshape_launcher_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/shape_infer/reshaper_test.cpp b/inference-engine/tests/unit/shape_infer/reshaper_test.cpp
index 277a990d5..86364ea73 100644
--- a/inference-engine/tests/unit/shape_infer/reshaper_test.cpp
+++ b/inference-engine/tests/unit/shape_infer/reshaper_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -11,6 +10,7 @@
#include <mock_icnn_network.hpp>
#include <../graph_tools/graph_test_base.hpp>
#include <shape_infer/mock_reshaper_launcher.hpp>
+#include <shape_infer/ie_reshaper.hpp>
using namespace InferenceEngine;
using namespace InferenceEngine::details;
@@ -95,7 +95,7 @@ public:
ResponseDesc resp;
static const std::string TEST_NAME;
MockIShapeInferImpl::Ptr impl;
- Reshaper::Ptr reshaper;
+ ReshaperPtr reshaper;
};
const std::string ReshaperTest::TEST_NAME = "TEST_NAME";
diff --git a/inference-engine/tests/unit/stress_tests/stress_tests.cpp b/inference-engine/tests/unit/stress_tests/stress_tests.cpp
index 06341923f..5bb764f40 100644
--- a/inference-engine/tests/unit/stress_tests/stress_tests.cpp
+++ b/inference-engine/tests/unit/stress_tests/stress_tests.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
diff --git a/inference-engine/tests/unit/topology_verification_tests/v2_topology_verification_test.cpp b/inference-engine/tests/unit/topology_verification_tests/v2_topology_verification_test.cpp
index da84d72dd..34ff736ea 100644
--- a/inference-engine/tests/unit/topology_verification_tests/v2_topology_verification_test.cpp
+++ b/inference-engine/tests/unit/topology_verification_tests/v2_topology_verification_test.cpp
@@ -1,5 +1,4 @@
// Copyright (C) 2018 Intel Corporation
-//
// SPDX-License-Identifier: Apache-2.0
//
@@ -9,7 +8,7 @@
#include "xml_net_builder.hpp"
#include "xml_helper.hpp"
#include "pugixml.hpp"
-#include "inference_engine/v2_format_parser.h"
+#include "inference_engine/ie_format_parser.h"
#include <fstream>
#include <stdio.h>
#include "details/ie_exception.hpp"
@@ -22,7 +21,7 @@ class V2TopologyVerificationTests : public ::testing::Test {
protected:
virtual void TearDown() {}
virtual void SetUp() {
- xmlHelper.reset(new XMLHelper(new details::V2FormatParser(2)));
+ xmlHelper.reset(new XMLHelper(new details::FormatParser(2)));
}
public:
unique_ptr<CNNNetwork> cnnNetwork;