diff options
Diffstat (limited to 'runtimes/neurun/backend/acl_neon/kernel/ConcatLayer.cc')
-rw-r--r-- | runtimes/neurun/backend/acl_neon/kernel/ConcatLayer.cc | 151 |
1 files changed, 151 insertions, 0 deletions
diff --git a/runtimes/neurun/backend/acl_neon/kernel/ConcatLayer.cc b/runtimes/neurun/backend/acl_neon/kernel/ConcatLayer.cc new file mode 100644 index 000000000..f2d88fa91 --- /dev/null +++ b/runtimes/neurun/backend/acl_neon/kernel/ConcatLayer.cc @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ConcatLayer.h" + +#include "util/logging.h" + +namespace +{ + +inline bool matchSizeExceptAxis(const ::neurun::backend::acl_neon::operand::INETensor *t1, + const ::neurun::backend::acl_neon::operand::INETensor *t2, + uint32_t axis) +{ + assert(t1->num_dimensions() <= 4); + assert(t2->num_dimensions() <= 4); + + for (uint32_t i = 0; i < 4; i++) + { + if (axis == i) + continue; + if (t1->info()->dimension(i) != t2->info()->dimension(i)) + return false; + } + return true; +} + +} // namespace {anonymous} + +namespace neurun +{ +namespace backend +{ +namespace acl_neon +{ +namespace kernel +{ + +ConcatLayer::ConcatLayer() + : _input_allocs(), _output_alloc(nullptr), _axis(0), _input_type(arm_compute::DataType::F32) +{ + // DO NOTHING +} + +template <typename T> bool ConcatLayer::concatenate() +{ + // Input and output size check + { + // NOTE Support only tensor with dimension 4 or less + + uint32_t axis_sum = 0; + + for (auto input : _input_allocs) + { + assert(_output_alloc->layout() == input->layout()); + assert(matchSizeExceptAxis(_output_alloc, input, _axis)); + axis_sum += input->info()->dimension(_axis); + } + + assert(_output_alloc->info()->dimension(_axis) == axis_sum); + } + + VERBOSE(Concat_RUN) << "START Concat" << std::endl; + + // Perform operation + { + uint32_t axis_offset = 0; + + for (auto input : _input_allocs) + { + for (uint32_t i = 0; i < input->info()->dimension(0); i++) + { + for (uint32_t j = 0; j < input->info()->dimension(1); j++) + { + for (uint32_t k = 0; k < input->info()->dimension(2); k++) + { + for (uint32_t l = 0; l < input->info()->dimension(3); l++) + { + uint32_t io = (_axis == 0) ? axis_offset : 0; + uint32_t jo = (_axis == 1) ? axis_offset : 0; + uint32_t ko = (_axis == 2) ? axis_offset : 0; + uint32_t lo = (_axis == 3) ? axis_offset : 0; + T value = *reinterpret_cast<T *>(input->handle()->ptr_to_element({i, j, k, l})); + *reinterpret_cast<T *>(_output_alloc->handle()->ptr_to_element( + {i + io, j + jo, k + ko, l + lo})) = value; + } + } + } + } + if (_axis == 0) + axis_offset += input->info()->dimension(0); + if (_axis == 1) + axis_offset += input->info()->dimension(1); + if (_axis == 2) + axis_offset += input->info()->dimension(2); + if (_axis == 3) + axis_offset += input->info()->dimension(3); + } + } + + VERBOSE(Concat_RUN) << "End Concat" << std::endl; + + return true; +} + +void ConcatLayer::configure( + const std::vector<::neurun::backend::acl_neon::operand::INETensor *> &input_allocs, + int32_t axis, ::neurun::backend::acl_neon::operand::INETensor *output_alloc) +{ + _input_allocs = input_allocs; + _output_alloc = output_alloc; + + assert(axis < 4); + + // TODO Handle when axis is negative + assert(axis >= 0); + + _axis = axis; + + _input_type = input_allocs[0]->data_type(); +} + +void ConcatLayer::run() +{ + if (_input_type == arm_compute::DataType::F32) + { + concatenate<float>(); + } + else if (_input_type == arm_compute::DataType::QASYMM8) + { + concatenate<uint8_t>(); + } +} + +} // namespace kernel +} // namespace acl_neon +} // namespace backend +} // namespace neurun |