summaryrefslogtreecommitdiff
path: root/inference-engine/thirdparty/clDNN/tests/test_cases/convolution_grad_input_gpu_test.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'inference-engine/thirdparty/clDNN/tests/test_cases/convolution_grad_input_gpu_test.cpp')
-rw-r--r--inference-engine/thirdparty/clDNN/tests/test_cases/convolution_grad_input_gpu_test.cpp70
1 files changed, 70 insertions, 0 deletions
diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/convolution_grad_input_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/convolution_grad_input_gpu_test.cpp
index 3c86b4c7a..a3cbc0a75 100644
--- a/inference-engine/thirdparty/clDNN/tests/test_cases/convolution_grad_input_gpu_test.cpp
+++ b/inference-engine/thirdparty/clDNN/tests/test_cases/convolution_grad_input_gpu_test.cpp
@@ -25,6 +25,7 @@
#include <api/CPP/network.hpp>
#include <api/CPP/engine.hpp>
#include "test_utils/test_utils.h"
+#include "api/CPP/eltwise.hpp"
using namespace cldnn;
using namespace tests;
@@ -136,4 +137,73 @@ TEST(convolution_grad_input_f32_fw_gpu, basic_wsiz2x2_in2x2x1x2_bfyx_stride2_pad
{
EXPECT_FLOAT_EQ(expected_output_vec[i], output_ptr[i]);
}
+}
+
+TEST(convolution_grad_input_f32_fw_gpu, basic_wsiz2x2_in2x2x1x2_bfyx_stride2_fusion) {
+ // Filter : 2x2
+ // Input : 2x2x1x2
+ // Output : 2x2x1x2
+ // Stride : 2x2
+ //
+ // Input:
+ // 8 0.5 1 3
+ // 6 9 2 4
+ //
+ // Filter
+ // -2 2
+ // 7 -0.5
+ //
+ // Output:
+ // -4 3.5 -0.5 21
+ // 12 -18 4 -9
+
+ engine engine;
+
+ auto input = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 1, 2, 2 } });
+ auto weights = memory::allocate(engine, { data_types::f32, format::bfyx,{ 1, 1, 2, 2 } });
+ auto scale_in = memory::allocate(engine, { data_types::f32, format::bfyx,{ 1, 1, 1, 1 } });
+ auto elt_data = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 2, 1, 2 } });
+
+ set_values(input, { 8.f, 0.5f, 6.f, 9.f, 1.f, 3.f, 2.f, 4.f });
+ set_values(weights, { -2.f, 2.f, 7.f, -0.5f });
+ set_values(scale_in, { 1.0f });
+ set_values(elt_data, { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f });
+
+ topology topology(
+ input_layout("input", input.get_layout()),
+ data("weights", weights),
+ data("scale_in", scale_in),
+ data("elt_data", elt_data),
+ convolution_grad_input("conv", "input", { "weights" }, { 1, 1, 2, 2 }, { 0, 0, -1, -1 }),
+ eltwise("elt", "conv", "elt_data", eltwise_mode::sum),
+ scale("scale", "elt", "scale_in")
+ );
+
+ build_options options;
+ options.set_option(build_option::optimize_data(true));
+
+ network network(engine, topology, options);
+ network.set_input_data("input", input);
+
+ auto outputs = network.execute();
+ auto primitives = network.get_all_primitive_ids();
+ auto exec_prim = network.get_executed_primitive_ids();
+ EXPECT_EQ(outputs.size(), size_t(1));
+ EXPECT_EQ(outputs.begin()->first, "scale");
+ EXPECT_TRUE(std::find(primitives.begin(), primitives.end(), "elt") == primitives.end());
+ EXPECT_TRUE(std::find(exec_prim.begin(), exec_prim.end(), "elt") == exec_prim.end());
+
+ auto output_prim = outputs.begin()->second.get_memory();
+
+ auto output_ptr = output_prim.pointer<float>();
+
+ std::vector<float> expected_output_vec = {
+ -3.f, 5.5f, 15.f, -14.f,
+ 4.5f, 27.f, 11.f, 0.f
+ };
+
+ for (unsigned int i = 0; i < expected_output_vec.size(); i++)
+ {
+ EXPECT_FLOAT_EQ(expected_output_vec[i], output_ptr[i]);
+ }
} \ No newline at end of file