summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtimes/neurun/src/backend/acl_cl/Convert.cc6
-rw-r--r--runtimes/neurun/src/backend/acl_cl/Convert.h3
-rw-r--r--runtimes/neurun/src/backend/acl_cl/StageGenerator.cc3
-rw-r--r--runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc13
-rw-r--r--runtimes/neurun/src/backend/acl_cl/TensorBuilder.h3
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc5
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h3
-rw-r--r--tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun1
-rw-r--r--tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu1
9 files changed, 26 insertions, 12 deletions
diff --git a/runtimes/neurun/src/backend/acl_cl/Convert.cc b/runtimes/neurun/src/backend/acl_cl/Convert.cc
index ed0a089c4..f457fa4e1 100644
--- a/runtimes/neurun/src/backend/acl_cl/Convert.cc
+++ b/runtimes/neurun/src/backend/acl_cl/Convert.cc
@@ -76,9 +76,11 @@ namespace acl_cl
}
::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::operand::Shape &shape,
- const ::neurun::model::operand::TypeInfo &typeInfo)
+ const ::neurun::model::operand::TypeInfo &typeInfo,
+ bool apply_dim_correction)
{
- return ::arm_compute::TensorInfo(asTensorShape(shape), 1, asDataType(typeInfo.type()),
+ return ::arm_compute::TensorInfo(asTensorShape(shape, apply_dim_correction), 1,
+ asDataType(typeInfo.type()),
asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
}
diff --git a/runtimes/neurun/src/backend/acl_cl/Convert.h b/runtimes/neurun/src/backend/acl_cl/Convert.h
index 1a233fb87..6d83dbb74 100644
--- a/runtimes/neurun/src/backend/acl_cl/Convert.h
+++ b/runtimes/neurun/src/backend/acl_cl/Convert.h
@@ -38,7 +38,8 @@ namespace acl_cl
bool apply_dim_correction = true);
::arm_compute::DataType asDataType(const ::neurun::model::operand::DataType &type);
::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::operand::Shape &shape,
- const ::neurun::model::operand::TypeInfo &typeInfo);
+ const ::neurun::model::operand::TypeInfo &typeInfo,
+ bool apply_dim_correction = true);
} // namespace acl_cl
} // namespace backend
diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
index 043bf6b16..33e9aab17 100644
--- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
+++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
@@ -607,6 +607,9 @@ void StageGenerator::visit(const model::operation::ReshapeNode &node)
auto tensors = _tensor_builder;
+ tensors->dimCorrection(input_index, false);
+ tensors->dimCorrection(output_index, false);
+
returnStage([tensors, param](IExecutionBuilder &builder) {
auto output_alloc = tensors->at(param.output_index).get();
auto input_alloc = tensors->at(param.input_index).get();
diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
index b5c038200..92b5c4b4c 100644
--- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
+++ b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
@@ -42,6 +42,7 @@ void TensorBuilder::registerTensorInfo(const model::operand::Index &ind,
assert(_tensors.size() == 0);
_tensor_info_map.insert({ind, info});
+ _apply_dim_correction_map.insert({ind, true});
}
void TensorBuilder::registerSubTensorInfo(const model::operand::Index &ind,
@@ -50,6 +51,7 @@ void TensorBuilder::registerSubTensorInfo(const model::operand::Index &ind,
assert(_tensors.size() == 0);
_subtensor_info_map.insert({ind, info});
+ _apply_dim_correction_map.insert({ind, true});
}
void TensorBuilder::notifyFirstUse(const model::operand::Index &)
@@ -75,7 +77,9 @@ void TensorBuilder::prepare(void)
{
auto ind = entry.first;
const auto &info = entry.second;
- auto tensor = std::make_shared<::neurun::backend::acl_cl::operand::CLTensor>(info);
+ const auto &tensor_info =
+ asTensorInfo(info.shape(), info.typeInfo(), _apply_dim_correction_map[ind]);
+ auto tensor = std::make_shared<::neurun::backend::acl_cl::operand::CLTensor>(tensor_info);
_tensors[ind] = tensor;
}
@@ -134,7 +138,7 @@ void TensorBuilder::prepare(void)
assert(info.type().offset() == parent_tensor->info()->quantization_info().offset);
assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
- auto shape = asTensorShape(info.shape());
+ auto shape = asTensorShape(info.shape(), _apply_dim_correction_map[current]);
// Only support axis: 3 (channel)
::arm_compute::Coordinates coordinates;
@@ -241,6 +245,11 @@ bool TensorBuilder::isSubTensorOf(const model::operand::Index &parent,
return true;
}
+void TensorBuilder::dimCorrection(const model::operand::Index &index, bool apply_dim_correction)
+{
+ _apply_dim_correction_map[index] = apply_dim_correction;
+}
+
} // namespace acl_cl
} // namespace backend
} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h
index 64d81721a..00b9857d6 100644
--- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h
+++ b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h
@@ -73,9 +73,12 @@ public:
*/
bool isSubTensorOf(const model::operand::Index &parent, const model::operand::Index &child);
+ void dimCorrection(const model::operand::Index &index, bool apply_dim_correction);
+
private:
std::unordered_map<model::operand::Index, compiler::TensorInfo> _tensor_info_map;
std::unordered_map<model::operand::Index, compiler::SubTensorInfo> _subtensor_info_map;
+ std::unordered_map<model::operand::Index, bool> _apply_dim_correction_map;
std::unordered_map<model::operand::Index,
std::shared_ptr<::neurun::backend::acl_cl::operand::CLTensor>>
_tensors;
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc
index a76a3c6cf..89111ab65 100644
--- a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc
+++ b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc
@@ -30,11 +30,10 @@ namespace acl_cl
namespace operand
{
-CLTensor::CLTensor(const compiler::TensorInfo &info)
+CLTensor::CLTensor(const arm_compute::TensorInfo &info)
: _cl_tensor(std::make_shared<arm_compute::CLTensor>())
{
- auto acl_cl_info = asTensorInfo(info.shape(), info.typeInfo());
- allocator()->init(acl_cl_info);
+ allocator()->init(info);
}
arm_compute::CLTensor *CLTensor::handle() const { return _cl_tensor.get(); }
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h
index 1df60742b..17be2345a 100644
--- a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h
+++ b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h
@@ -22,7 +22,6 @@
#include <arm_compute/runtime/CL/CLScheduler.h>
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
#include "ICLTensor.h"
-#include "compiler/TensorInfo.h"
namespace neurun
{
@@ -39,7 +38,7 @@ public:
CLTensor() = delete;
public:
- CLTensor(const compiler::TensorInfo &info);
+ CLTensor(const arm_compute::TensorInfo &info);
public:
arm_compute::CLTensor *handle() const override;
diff --git a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun
index f96db347c..63f057ba1 100644
--- a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun
+++ b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun
@@ -72,7 +72,6 @@ GeneratedTests.reduce_sum_ex*
GeneratedTests.topk_v2*
# Unhandled exception
GeneratedTests.fully_connected*
-GeneratedTests.reshape*
# Unexpected result
GeneratedTests.softmax*
GeneratedTests.split*
diff --git a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu
index f833d54a3..c2b4e026d 100644
--- a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu
+++ b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu
@@ -96,7 +96,6 @@ GeneratedTests.reduce_sum_ex*
GeneratedTests.topk_v2*
# Unhandled exception
GeneratedTests.fully_connected*
-GeneratedTests.reshape*
# Unexpected result
GeneratedTests.avg_pool_quant8_5
GeneratedTests.conv_quant8_2