summaryrefslogtreecommitdiff
path: root/packaging/0003-backends-test-Add-ReduceSum-test-cases.patch
diff options
context:
space:
mode:
Diffstat (limited to 'packaging/0003-backends-test-Add-ReduceSum-test-cases.patch')
-rw-r--r--packaging/0003-backends-test-Add-ReduceSum-test-cases.patch399
1 files changed, 399 insertions, 0 deletions
diff --git a/packaging/0003-backends-test-Add-ReduceSum-test-cases.patch b/packaging/0003-backends-test-Add-ReduceSum-test-cases.patch
new file mode 100644
index 000000000..989be5b57
--- /dev/null
+++ b/packaging/0003-backends-test-Add-ReduceSum-test-cases.patch
@@ -0,0 +1,399 @@
+From 7a6c7409021a64749b8792ea069d81463c5ee98c Mon Sep 17 00:00:00 2001
+From: Inki Dae <inki.dae@samsung.com>
+Date: Mon, 7 Sep 2020 20:17:38 +0900
+Subject: [PATCH 03/10] backends/test: Add ReduceSum test cases
+
+Change-Id: Ic6d02e0e51908958cd1499f0d0f22146763574ee
+Signed-off-by: Inki Dae <inki.dae@samsung.com>
+---
+ src/backends/backendsCommon/test/CMakeLists.txt | 2 +
+ src/backends/backendsCommon/test/LayerTests.hpp | 1 +
+ .../test/layerTests/ReduceSumTestImpl.cpp | 293 +++++++++++++++++++++
+ .../test/layerTests/ReduceSumTestImpl.hpp | 33 +++
+ src/backends/reference/test/RefLayerTests.cpp | 7 +
+ 5 files changed, 336 insertions(+)
+ create mode 100644 src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
+ create mode 100644 src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.hpp
+
+diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
+index dd96d36..951a46d 100644
+--- a/src/backends/backendsCommon/test/CMakeLists.txt
++++ b/src/backends/backendsCommon/test/CMakeLists.txt
+@@ -127,6 +127,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
+ layerTests/PreluTestImpl.hpp
+ layerTests/QuantizeTestImpl.cpp
+ layerTests/QuantizeTestImpl.hpp
++ layerTests/ReduceSumTestImpl.cpp
++ layerTests/ReduceSumTestImpl.hpp
+ layerTests/ReshapeTestImpl.cpp
+ layerTests/ReshapeTestImpl.hpp
+ layerTests/ResizeTestImpl.cpp
+diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
+index 247ed12..25f411f 100644
+--- a/src/backends/backendsCommon/test/LayerTests.hpp
++++ b/src/backends/backendsCommon/test/LayerTests.hpp
+@@ -44,6 +44,7 @@
+ #include <backendsCommon/test/layerTests/Pooling2dTestImpl.hpp>
+ #include <backendsCommon/test/layerTests/PreluTestImpl.hpp>
+ #include <backendsCommon/test/layerTests/QuantizeTestImpl.hpp>
++#include <backendsCommon/test/layerTests/ReduceSumTestImpl.hpp>
+ #include <backendsCommon/test/layerTests/ReshapeTestImpl.hpp>
+ #include <backendsCommon/test/layerTests/ResizeTestImpl.hpp>
+ #include <backendsCommon/test/layerTests/RsqrtTestImpl.hpp>
+diff --git a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
+new file mode 100644
+index 0000000..4d698df
+--- /dev/null
++++ b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
+@@ -0,0 +1,293 @@
++//
++// Copyright © 2019 Arm Ltd. All rights reserved.
++// SPDX-License-Identifier: MIT
++//
++
++#include "ReduceSumTestImpl.hpp"
++
++#include <backendsCommon/test/DataTypeUtils.hpp>
++#include <backendsCommon/test/TensorCopyUtils.hpp>
++#include <backendsCommon/test/WorkloadTestUtils.hpp>
++
++#include <test/TensorHelpers.hpp>
++
++namespace
++{
++
++template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
++LayerTestResult<float, 4> ReduceSumTestCommon(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
++ const armnn::TensorInfo inputTensorInfo,
++ const armnn::TensorInfo outputTensorInfo,
++ const std::vector<float>& inputData,
++ const std::vector<float>& outputData,
++ int axis = 3)
++{
++ IgnoreUnused(memoryManager);
++ auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
++
++ LayerTestResult<float, 4> result(outputTensorInfo);
++ result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
++
++ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
++ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
++
++ armnn::ReduceSumQueueDescriptor descriptor;
++ unsigned int updated_idx = static_cast<uint32_t>(axis);
++ if (axis < 0) {
++ updated_idx = static_cast<uint32_t>(static_cast<int32_t>(inputTensorInfo.GetNumDimensions()) + axis);
++ }
++
++ descriptor.m_Parameters.m_Axis = updated_idx;
++ descriptor.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
++ armnn::WorkloadInfo info;
++
++ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
++ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
++
++ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduceSum(descriptor, info);
++
++ inputHandle->Allocate();
++ outputHandle->Allocate();
++
++ CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
++
++ workload->Execute();
++
++ CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
++
++ return result;
++}
++
++} // namespace
++
++template<armnn::DataType ArmnnType, typename T>
++LayerTestResult<float, 4> ReduceSumSimpleTest(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
++{
++ const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
++ const armnn::TensorShape outputShape{ 1, 1, 1, 1};
++
++ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
++
++ if (armnn::IsQuantizedType<T>())
++ {
++ inputTensorInfo.SetQuantizationScale(1.0f);
++ inputTensorInfo.SetQuantizationOffset(0);
++ }
++
++ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
++
++ std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
++ std::vector<float> outputValues({ 34.0f });
++
++ return ReduceSumTestCommon<ArmnnType>(workloadFactory, memoryManager,
++ inputTensorInfo, outputTensorInfo,
++ inputValues, outputValues, -1);
++}
++
++template<armnn::DataType ArmnnType, typename T>
++LayerTestResult<float, 4> ReduceSumMultiChannel_1Test(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
++{
++ const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
++ const armnn::TensorShape outputShape{ 1, 1, 2, 4};
++
++ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
++
++ if (armnn::IsQuantizedType<T>())
++ {
++ inputTensorInfo.SetQuantizationScale(1.0f);
++ inputTensorInfo.SetQuantizationOffset(0);
++ }
++
++ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
++
++ std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
++ 5.0f, 6.0f, 7.0f, 8.0f,
++
++ 10.0f, 20.0f, 30.0f, 40.0f,
++ 50.0f, 60.0f, 70.0f, 80.0f,
++
++ 100.0f, 200.0f, 300.0f, 400.0f,
++ 500.0f, 600.0f, 700.0f, 800.0f });
++ std::vector<float> outputValues({ 111.0f, 222.0f, 333.0f, 444.0f,
++ 555.0f, 666.0f, 777.0f, 888.0f });
++
++ return ReduceSumTestCommon<ArmnnType>(workloadFactory, memoryManager,
++ inputTensorInfo, outputTensorInfo,
++ inputValues, outputValues, 1);
++}
++
++template<armnn::DataType ArmnnType, typename T>
++LayerTestResult<float, 4> ReduceSumMultiChannel_2Test(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
++{
++ const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
++ const armnn::TensorShape outputShape{ 1, 1, 1, 4};
++
++ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
++
++ if (armnn::IsQuantizedType<T>())
++ {
++ inputTensorInfo.SetQuantizationScale(1.0f);
++ inputTensorInfo.SetQuantizationOffset(0);
++ }
++
++ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
++
++ std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
++ 5.0f, 6.0f, 7.0f, 8.0f,
++
++ 10.0f, 20.0f, 30.0f, 40.0f,
++ 50.0f, 60.0f, 70.0f, 80.0f,
++
++ 100.0f, 200.0f, 300.0f, 400.0f,
++ 500.0f, 600.0f, 700.0f, 800.0f });
++ std::vector<float> outputValues({ 666.0f, 888.0f, 1110.0f, 1332.0f });
++
++ return ReduceSumTestCommon<ArmnnType>(workloadFactory, memoryManager,
++ inputTensorInfo, outputTensorInfo,
++ inputValues, outputValues, 2);
++}
++
++template<armnn::DataType ArmnnType, typename T>
++LayerTestResult<float, 4> ReduceSumMultiBatchAndChannelTest(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
++{
++ const armnn::TensorShape inputShape{ 3, 2, 3, 4 };
++ const armnn::TensorShape outputShape{ 3, 1, 1, 4};
++
++ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
++
++ if (armnn::IsQuantizedType<T>())
++ {
++ inputTensorInfo.SetQuantizationScale(1.0f);
++ inputTensorInfo.SetQuantizationOffset(0);
++ }
++
++ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
++
++ std::vector<float> inputValues( {7, 8, 6, 1,
++ 1, 1, 8, 7,
++ 3, 7, 7, 7,
++
++ 6, 8, 4, 7,
++ 3, 8, 7, 3,
++ 5, 8, 8, 8,
++
++
++ 7, 8, 2, 7,
++ 3, 8, 5, 6,
++ 8, 4, 2, 7,
++
++ 1, 6, 7, 2,
++ 8, 3, 3, 1,
++ 7, 6, 2, 6,
++
++
++ 5, 3, 4, 8,
++ 7, 8, 2, 4,
++ 6, 6, 2, 8,
++
++ 2, 2, 7, 2,
++ 5, 3, 6, 3,
++ 6, 1, 8, 8});
++ std::vector<float> outputValues({ 25.0f, 40.0f, 40.0f, 33.0f,
++ 34.0f, 35.0f, 21.0f, 29.0f,
++ 31.0f, 23.0f, 29.0f, 33.0f});
++
++ return ReduceSumTestCommon<ArmnnType>(workloadFactory, memoryManager,
++ inputTensorInfo, outputTensorInfo,
++ inputValues, outputValues, 2);
++}
++
++template<armnn::DataType ArmnnType, typename T>
++LayerTestResult<float, 4> ReduceSumMultiBatchAndChannel_2Test(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
++{
++ const armnn::TensorShape inputShape{ 3, 2, 3, 4 };
++ const armnn::TensorShape outputShape{ 3, 2, 3, 1};
++
++ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
++
++ if (armnn::IsQuantizedType<T>())
++ {
++ inputTensorInfo.SetQuantizationScale(1.0f);
++ inputTensorInfo.SetQuantizationOffset(0);
++ }
++
++ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
++
++ std::vector<float> inputValues( {7, 8, 6, 1,
++ 1, 1, 8, 7,
++ 3, 7, 7, 7,
++
++ 6, 8, 4, 7,
++ 3, 8, 7, 3,
++ 5, 8, 8, 8,
++
++
++ 7, 8, 2, 7,
++ 3, 8, 5, 6,
++ 8, 4, 2, 7,
++
++ 1, 6, 7, 2,
++ 8, 3, 3, 1,
++ 7, 6, 2, 6,
++
++
++ 5, 3, 4, 8,
++ 7, 8, 2, 4,
++ 6, 6, 2, 8,
++
++ 2, 2, 7, 2,
++ 5, 3, 6, 3,
++ 6, 1, 8, 8});
++ std::vector<float> outputValues({ 22.0f, 17.0f, 24.0f,
++ 25.0f, 21.0f, 29.0f,
++
++ 24.0f, 22.0f, 21.0f,
++ 16.0f, 15.0f, 21.0f,
++
++ 20.0f, 21.0f, 22.0f,
++ 13.0f, 17.0f, 23.0f});
++
++ return ReduceSumTestCommon<ArmnnType>(workloadFactory, memoryManager,
++ inputTensorInfo, outputTensorInfo,
++ inputValues, outputValues, 3);
++}
++
++
++// Explicit template specializations
++
++template LayerTestResult<float, 4>
++ReduceSumSimpleTest<armnn::DataType::Float32>(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
++
++template LayerTestResult<float, 4>
++ReduceSumMultiChannel_1Test<armnn::DataType::Float32>(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
++
++template LayerTestResult<float, 4>
++ReduceSumMultiChannel_2Test<armnn::DataType::Float32>(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
++
++template LayerTestResult<float, 4>
++ReduceSumMultiBatchAndChannelTest<armnn::DataType::Float32>(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
++
++template LayerTestResult<float, 4>
++ReduceSumMultiBatchAndChannel_2Test<armnn::DataType::Float32>(
++ armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+diff --git a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.hpp
+new file mode 100644
+index 0000000..01d1a44
+--- /dev/null
++++ b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.hpp
+@@ -0,0 +1,33 @@
++//
++// Copyright © 2019 Arm Ltd. All rights reserved.
++// SPDX-License-Identifier: MIT
++//
++
++#pragma once
++
++#include "LayerTestResult.hpp"
++
++#include <ResolveType.hpp>
++
++#include <armnn/backends/IBackendInternal.hpp>
++#include <backendsCommon/WorkloadFactory.hpp>
++
++template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
++LayerTestResult<float, 4> ReduceSumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
++
++template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
++LayerTestResult<float, 4> ReduceSumMultiChannel_1Test(armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
++
++template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
++LayerTestResult<float, 4> ReduceSumMultiChannel_2Test(armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
++
++template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
++LayerTestResult<float, 4> ReduceSumMultiBatchAndChannelTest(armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
++
++template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
++LayerTestResult<float, 4> ReduceSumMultiBatchAndChannel_2Test(armnn::IWorkloadFactory& workloadFactory,
++ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
+index d8dab3d..9461e2a 100644
+--- a/src/backends/reference/test/RefLayerTests.cpp
++++ b/src/backends/reference/test/RefLayerTests.cpp
+@@ -1980,4 +1980,11 @@ ARMNN_AUTO_TEST_CASE(Neg3dQuantisedAsymm8, Neg3dTest<DataType::QAsymmU8>)
+ ARMNN_AUTO_TEST_CASE(Neg2dQuantisedSymm16, Neg2dTest<DataType::QSymmS16>)
+ ARMNN_AUTO_TEST_CASE(Neg3dQuantisedSymm16, Neg3dTest<DataType::QSymmS16>)
+
++// ReduceSum
++ARMNN_AUTO_TEST_CASE(ReduceSumFloat32, ReduceSumSimpleTest<DataType::Float32>)
++ARMNN_AUTO_TEST_CASE(ReduceSumMultiChannelFloat32, ReduceSumMultiChannel_1Test<DataType::Float32>)
++ARMNN_AUTO_TEST_CASE(ReduceSumMultiChannel2Float32, ReduceSumMultiChannel_2Test<DataType::Float32>)
++ARMNN_AUTO_TEST_CASE(ReduceSumMultiBatchAndChannelFloat32, ReduceSumMultiBatchAndChannelTest<DataType::Float32>)
++ARMNN_AUTO_TEST_CASE(ReduceSumMultiBatchAndChannel_2Float32, ReduceSumMultiBatchAndChannel_2Test<DataType::Float32>)
++
+ BOOST_AUTO_TEST_SUITE_END()
+--
+2.7.4
+