summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc')
-rw-r--r--runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc27
1 files changed, 14 insertions, 13 deletions
diff --git a/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc b/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc
index b75ac90f0..3844317ab 100644
--- a/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc
+++ b/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc
@@ -18,23 +18,23 @@
#include <arm_compute/runtime/CL/CLScheduler.h>
-#include "backend/acl_cl/kernel/View.h"
-#include "logging.h"
+#include "util/feature/nchw/View.h"
+#include "util/logging.h"
namespace
{
-bool matchSizeExceptAxis(const ::arm_compute::ICLTensor *t1, const ::arm_compute::ICLTensor *t2,
- uint32_t axis)
+bool matchSizeExceptAxis(const ::neurun::backend::acl_cl::operand::ICLTensor *t1,
+ const ::neurun::backend::acl_cl::operand::ICLTensor *t2, uint32_t axis)
{
- assert(t1->info()->num_dimensions() <= 4);
- assert(t2->info()->num_dimensions() <= 4);
+ assert(t1->num_dimensions() <= 4);
+ assert(t2->num_dimensions() <= 4);
for (uint32_t i = 0; i < 4; i++)
{
if (axis == i)
continue;
- if (t1->info()->dimension(i) != t2->info()->dimension(i))
+ if (t1->dimension(i) != t2->dimension(i))
return false;
}
return true;
@@ -66,10 +66,10 @@ bool ConcatLayer::concatenationFloat32()
for (auto input : _input_allocs)
{
assert(matchSizeExceptAxis(_output_alloc, input, _axis));
- axis_sum += input->info()->dimension(_axis);
+ axis_sum += input->dimension(_axis);
}
- assert(_output_alloc->info()->dimension(_axis) == axis_sum);
+ assert(_output_alloc->dimension(_axis) == axis_sum);
}
VERBOSE(Concat_RUN) << "START Concat" << std::endl;
@@ -81,12 +81,12 @@ bool ConcatLayer::concatenationFloat32()
auto &queue = ::arm_compute::CLScheduler::get().queue();
_output_alloc->map(queue);
- ::internal::arm_compute::kernel::View<float> output_view{_output_alloc};
+ util::feature::nchw::View<float> output_view{_output_alloc};
for (auto input : _input_allocs)
{
input->map(queue);
- const ::internal::arm_compute::kernel::View<float> input_reader{input};
+ const util::feature::nchw::View<float> input_reader{input};
for (uint32_t n = 0; n < input_reader.shape().N; n++)
{
@@ -124,8 +124,9 @@ bool ConcatLayer::concatenationFloat32()
return true;
}
-void ConcatLayer::configure(const std::vector<::arm_compute::ICLTensor *> &input_allocs,
- int32_t axis, ::arm_compute::ICLTensor *output_alloc)
+void ConcatLayer::configure(
+ const std::vector<::neurun::backend::acl_cl::operand::ICLTensor *> &input_allocs, int32_t axis,
+ ::neurun::backend::acl_cl::operand::ICLTensor *output_alloc)
{
_input_allocs = input_allocs;
_output_alloc = output_alloc;