summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2019-11-19 12:24:19 +0000
committerJim Flynn Arm <jim.flynn@arm.com>2019-11-19 14:15:52 +0000
commit52ec3463086a12282d8b833521e9e32d1055c6a6 (patch)
tree4b312b58815d2552fb152753405aedfb238616f5
parent47e863dce36d9722ece3c4ceeb59c548edbb8e4f (diff)
downloadarmnn-52ec3463086a12282d8b833521e9e32d1055c6a6.tar.gz
armnn-52ec3463086a12282d8b833521e9e32d1055c6a6.tar.bz2
armnn-52ec3463086a12282d8b833521e9e32d1055c6a6.zip
IVGCVSW-3697 Add check for ArgMinMax QAsymm8 to ClLayerSupport
* Enable Neon EndToEnd tests for ArgMinMax QAsymm8 * Enable Neon Layer tests for ArgMinMax QAsymm8 Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Change-Id: Ifa7463ded4397cacb82fb3667006f08ecbe3cd32
-rw-r--r--src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp30
-rw-r--r--src/backends/cl/ClLayerSupport.cpp5
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp50
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp7
4 files changed, 88 insertions, 4 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index 4475fb7ab..18c9e54f2 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -70,7 +70,7 @@ LayerTestResult<int32_t, 3> ArgMaxSimpleTest(
armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- if(armnn::IsQuantizedType<T>())
+ if (armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(1.0f);
inputTensorInfo.SetQuantizationOffset(0);
@@ -97,7 +97,7 @@ LayerTestResult<int32_t, 3> ArgMinSimpleTest(
armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- if(armnn::IsQuantizedType<T>())
+ if (armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(1.0f);
inputTensorInfo.SetQuantizationOffset(0);
@@ -124,7 +124,7 @@ LayerTestResult<int32_t, 3> ArgMinChannelTest(
armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- if(armnn::IsQuantizedType<T>())
+ if (armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(1.0f);
inputTensorInfo.SetQuantizationOffset(0);
@@ -159,7 +159,7 @@ LayerTestResult<int32_t, 3> ArgMaxChannelTest(
armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- if(armnn::IsQuantizedType<T>())
+ if (armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(1.0f);
inputTensorInfo.SetQuantizationOffset(0);
@@ -195,6 +195,12 @@ LayerTestResult<int32_t, 3> ArgMaxHeightTest(
armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
+ if (armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ inputTensorInfo.SetQuantizationOffset(0);
+ }
+
std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
5.0f, 6.0f, 7.0f, 8.0f,
@@ -224,6 +230,12 @@ LayerTestResult<int32_t, 3> ArgMinWidthTest(
armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
+ if (armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ inputTensorInfo.SetQuantizationOffset(0);
+ }
+
std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
5.0f, 6.0f, 7.0f, 8.0f,
@@ -336,6 +348,11 @@ ArgMaxHeightTest<armnn::DataType::Signed32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
+ArgMaxHeightTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
ArgMinWidthTest<armnn::DataType::Float32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -344,3 +361,8 @@ template LayerTestResult<int32_t, 3>
ArgMinWidthTest<armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMinWidthTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 8cbf9bd11..612af6be2 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -184,6 +184,11 @@ bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
const ArgMinMaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
+ if (input.GetDataType() == DataType::QuantisedAsymm8)
+ {
+ return false;
+ }
+
FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
reasonIfUnsupported,
input,
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index e2b25a14f..e841821b5 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -454,4 +454,54 @@ BOOST_AUTO_TEST_CASE(NeonArgMinAxis3Test)
ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
+BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTestQuantisedAsymm8)
+{
+ ArgMaxEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTestQuantisedAsymm8)
+{
+ ArgMinEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0TestQuantisedAsymm8)
+{
+ ArgMaxAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonArgMinAxis0TestQuantisedAsymm8)
+{
+ ArgMinAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1TestQuantisedAsymm8)
+{
+ ArgMaxAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonArgMinAxis1TestQuantisedAsymm8)
+{
+ ArgMinAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2TestQuantisedAsymm8)
+{
+ ArgMaxAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonArgMinAxis2TestQuantisedAsymm8)
+{
+ ArgMinAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3TestQuantisedAsymm8)
+{
+ ArgMaxAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonArgMinAxis3TestQuantisedAsymm8)
+{
+ ArgMinAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index cd9a55d3e..26c55365c 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -972,6 +972,13 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannel, ArgMaxChannelTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(ArgMinQAsymm8, ArgMinSimpleTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxQAsymm8, ArgMaxSimpleTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymm8, ArgMinChannelTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QuantisedAsymm8>)
+
#if defined(ARMNNREF_ENABLED)
// The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available