diff options
Diffstat (limited to 'runtimes/pure_arm_compute/src/internal/layers/SimplePadLayer.cc')
-rw-r--r-- | runtimes/pure_arm_compute/src/internal/layers/SimplePadLayer.cc | 172 |
1 files changed, 0 insertions, 172 deletions
diff --git a/runtimes/pure_arm_compute/src/internal/layers/SimplePadLayer.cc b/runtimes/pure_arm_compute/src/internal/layers/SimplePadLayer.cc deleted file mode 100644 index 64236603f..000000000 --- a/runtimes/pure_arm_compute/src/internal/layers/SimplePadLayer.cc +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "internal/layers/SimplePadLayer.h" -#include <arm_compute/runtime/CL/CLScheduler.h> - -namespace -{ -bool validate_arg(const ::arm_compute::ITensor *input, const ::arm_compute::ITensor *output, - const ::arm_compute::ITensor *padding_size, - const ::arm_compute::Coordinates &axises) -{ - const int input_batch = input->info()->tensor_shape()[axises[0]]; - const int input_height = input->info()->tensor_shape()[axises[1]]; - const int input_width = input->info()->tensor_shape()[axises[2]]; - const int input_depth = input->info()->tensor_shape()[axises[3]]; - - const int output_batch = output->info()->tensor_shape()[axises[0]]; - const int output_height = output->info()->tensor_shape()[axises[1]]; - const int output_width = output->info()->tensor_shape()[axises[2]]; - const int output_depth = output->info()->tensor_shape()[axises[3]]; - - auto pad_batch_up = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({0, 0})); - auto pad_batch_down = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({1, 0})); - auto pad_height_top = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({0, 1})); - auto pad_height_bottom = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({1, 1})); - auto pad_width_left = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({0, 2})); - auto pad_width_right = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({1, 2})); - auto pad_depth_front = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({0, 3})); - auto pad_depth_back = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({1, 3})); - - const int padded_batch = input_batch + pad_batch_up + pad_batch_down; - const int padded_height = input_height + pad_height_top + pad_height_bottom; - const int padded_width = input_width + pad_width_left + pad_width_right; - const int padded_depth = input_depth + pad_depth_front + pad_depth_back; - - return (padded_batch == output_batch) && (padded_height == output_height) && - (padded_width == output_width) && (padded_depth == output_depth); -} -} // namespace - -void SimplePadLayer::configure(::arm_compute::ITensor *input, ::arm_compute::ITensor *output, - ::arm_compute::ITensor *padding_size, - const ::arm_compute::Coordinates &axises) -{ - - const auto rank = axises.num_dimensions(); - assert(rank == 4); - assert(input != nullptr && output != nullptr && padding_size != nullptr); - - for (int i = 0; i < rank; ++i) - { - assert(axises[i] >= 0); - assert(axises[i] < rank); - } - - _input = input; - _output = output; - _padding_size = padding_size; - _axises = axises; -} - -template <typename T> -inline void ApplyPadding(const ::arm_compute::ITensor *input_data, - const ::arm_compute::TensorShape &input_shape, - const ::arm_compute::ITensor *padding_size, - ::arm_compute::ITensor *output_data, - const ::arm_compute::TensorShape &output_shape, - const ::arm_compute::Coordinates &axises, T zero_value) -{ - - assert(validate_arg(input_data, output_data, padding_size, axises) && - "Padded Input shape does not match to output shape"); - - const int input_batch = input_shape[axises[0]]; - const int input_height = input_shape[axises[1]]; - const int input_width = input_shape[axises[2]]; - const int input_depth = input_shape[axises[3]]; - - const int output_batch = output_shape[axises[0]]; - const int output_height = output_shape[axises[1]]; - const int output_width = output_shape[axises[2]]; - const int output_depth = output_shape[axises[3]]; - - // Padding size for Up, Top, Left and Front are required. - auto pad_batch_up = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({0, 0})); - auto pad_height_top = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({0, 1})); - auto pad_width_left = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({0, 2})); - auto pad_depth_front = *reinterpret_cast<const int32_t *>(padding_size->ptr_to_element({0, 3})); - - for (int out_b = 0; out_b < output_batch; ++out_b) - { - for (int out_h = 0; out_h < output_height; ++out_h) - { - for (int out_w = 0; out_w < output_width; ++out_w) - { - for (int out_d = 0; out_d < output_depth; ++out_d) - { - auto output_id = asARMComputeCoordinates( - ::arm_compute::Coordinates{out_b, out_h, out_w, out_d}, axises); - - if (out_b < pad_batch_up || out_b >= (input_batch + pad_batch_up) || - out_h < pad_height_top || out_h >= (input_height + pad_height_top) || - out_w < pad_width_left || out_w >= (input_width + pad_width_left) || - out_d < pad_depth_front || out_d >= (input_depth + pad_depth_front)) - { - *reinterpret_cast<T *>(output_data->ptr_to_element(output_id)) = zero_value; - } - else - { - auto input_id = asARMComputeCoordinates( - ::arm_compute::Coordinates{out_b - pad_batch_up, out_h - pad_height_top, - out_w - pad_width_left, out_d - pad_depth_front}, - axises); - *reinterpret_cast<T *>(output_data->ptr_to_element(output_id)) = - *reinterpret_cast<T *>(input_data->ptr_to_element(input_id)); - } - } - } - } - } -} -void SimplePadLayer::run() -{ - if (::internal::arm_compute::isGpuMode()) - { - auto &q = ::arm_compute::CLScheduler::get().queue(); - - CAST_CL(_input)->map(q); - CAST_CL(_output)->map(q); - CAST_CL(_padding_size)->map(q); - } - - switch (_input->info()->data_type()) - { - case ::arm_compute::DataType::U8: - case ::arm_compute::DataType::QASYMM8: - ApplyPadding<uint8_t>(_input, _input->info()->tensor_shape(), _padding_size, _output, - _output->info()->tensor_shape(), _axises, - _input->info()->quantization_info().offset); - break; - case ::arm_compute::DataType::F32: - ApplyPadding<float>(_input, _input->info()->tensor_shape(), _padding_size, _output, - _output->info()->tensor_shape(), _axises, 0.0f); - break; - default: - ARM_COMPUTE_ERROR("DataType not supported"); - break; - } - - if (::internal::arm_compute::isGpuMode()) - { - auto &q = ::arm_compute::CLScheduler::get().queue(); - - CAST_CL(_input)->unmap(q); - CAST_CL(_output)->unmap(q); - CAST_CL(_padding_size)->unmap(q); - } -} |