diff options
author | Michele Di Giorgio <michele.digiorgio@arm.com> | 2021-02-01 17:09:32 +0000 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2021-02-03 18:40:52 +0000 |
commit | e1314665fcfd2a32d6117a8fc16f67a83db3bb05 (patch) | |
tree | 63c72e418093bbd57a7e07e8791a9af4faa8ca8b | |
parent | be9f9f9139b759d314f4f2a6d2ee747079666504 (diff) | |
download | armcl-e1314665fcfd2a32d6117a8fc16f67a83db3bb05.tar.gz armcl-e1314665fcfd2a32d6117a8fc16f67a83db3bb05.tar.bz2 armcl-e1314665fcfd2a32d6117a8fc16f67a83db3bb05.zip |
Make CL Pooling kernels and functions state-less
Resolves COMPMID-4000
Change-Id: I64878f93c033b4928fdefbb964c37c67fdecfaab
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4971
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r-- | Android.bp | 3 | ||||
-rw-r--r-- | arm_compute/runtime/CL/functions/CLPoolingLayer.h | 34 | ||||
-rw-r--r-- | docs/00_introduction.dox | 6 | ||||
-rw-r--r-- | src/core/CL/CLKernels.h | 1 | ||||
-rw-r--r-- | src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h | 6 | ||||
-rw-r--r-- | src/core/CL/kernels/CLPoolingLayerKernel.h | 96 | ||||
-rw-r--r-- | src/core/gpu/cl/kernels/ClPoolingKernel.cpp (renamed from src/core/CL/kernels/CLPoolingLayerKernel.cpp) | 244 | ||||
-rw-r--r-- | src/core/gpu/cl/kernels/ClPoolingKernel.h | 79 | ||||
-rw-r--r-- | src/runtime/CL/functions/CLPoolingLayer.cpp | 83 | ||||
-rw-r--r-- | src/runtime/CL/tuners/BifrostTuner.cpp | 12 | ||||
-rw-r--r-- | src/runtime/gpu/cl/operators/ClPooling.cpp | 101 | ||||
-rw-r--r-- | src/runtime/gpu/cl/operators/ClPooling.h | 75 |
12 files changed, 448 insertions, 292 deletions
diff --git a/Android.bp b/Android.bp index f9761e235..bc5ae34bb 100644 --- a/Android.bp +++ b/Android.bp @@ -160,7 +160,6 @@ cc_library_static { "src/core/CL/kernels/CLPadLayerKernel.cpp", "src/core/CL/kernels/CLPermuteKernel.cpp", "src/core/CL/kernels/CLPixelWiseMultiplicationKernel.cpp", - "src/core/CL/kernels/CLPoolingLayerKernel.cpp", "src/core/CL/kernels/CLPriorBoxLayerKernel.cpp", "src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp", "src/core/CL/kernels/CLQuantizationLayerKernel.cpp", @@ -443,6 +442,7 @@ cc_library_static { "src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp", "src/core/gpu/cl/kernels/ClFloorKernel.cpp", "src/core/gpu/cl/kernels/ClHeightConcatenateKernel.cpp", + "src/core/gpu/cl/kernels/ClPoolingKernel.cpp", "src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp", "src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp", "src/core/gpu/cl/kernels/ClWidthConcatenateKernel.cpp", @@ -805,6 +805,7 @@ cc_library_static { "src/runtime/gpu/cl/operators/ClElementwiseUnary.cpp", "src/runtime/gpu/cl/operators/ClFloor.cpp", "src/runtime/gpu/cl/operators/ClLogicalNot.cpp", + "src/runtime/gpu/cl/operators/ClPooling.cpp", "src/runtime/gpu/cl/operators/ClSub.cpp", "utils/CommonGraphOptions.cpp", "utils/GraphUtils.cpp", diff --git a/arm_compute/runtime/CL/functions/CLPoolingLayer.h b/arm_compute/runtime/CL/functions/CLPoolingLayer.h index ef1f426c2..ec1c4eb76 100644 --- a/arm_compute/runtime/CL/functions/CLPoolingLayer.h +++ b/arm_compute/runtime/CL/functions/CLPoolingLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,25 +24,34 @@ #ifndef ARM_COMPUTE_CLPOOLINGLAYER_H #define ARM_COMPUTE_CLPOOLINGLAYER_H -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" +#include "arm_compute/runtime/IFunction.h" -#include "arm_compute/core/Error.h" #include "arm_compute/core/Types.h" +#include <memory> + namespace arm_compute { class CLCompileContext; class ICLTensor; class ITensorInfo; -/** Basic function to simulate a pooling layer with the specified pooling operation. This function calls the following OpenCL kernels: - * - * -# @ref CLFillBorderKernel (executed if padding size is different from zero) - * -# @ref CLPoolingLayerKernel - */ -class CLPoolingLayer : public ICLSimpleFunction +/** Basic function to run @ref opencl::ClPooling */ +class CLPoolingLayer : public IFunction { public: + /** Default Constructor */ + CLPoolingLayer(); + /** Default Destructor */ + ~CLPoolingLayer(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLPoolingLayer(const CLPoolingLayer &) = delete; + /** Default move constructor */ + CLPoolingLayer(CLPoolingLayer &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLPoolingLayer &operator=(const CLPoolingLayer &) = delete; + /** Default move assignment operator */ + CLPoolingLayer &operator=(CLPoolingLayer &&) = default; /** Set the input and output tensors. * * @param[in,out] input Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. @@ -70,6 +79,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices = nullptr); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr<Impl> _impl; }; } // namespace arm_compute #endif /* ARM_COMPUTE_CLPOOLINGLAYER_H */ diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox index 6a4d03b52..40b7f9efe 100644 --- a/docs/00_introduction.dox +++ b/docs/00_introduction.dox @@ -168,7 +168,7 @@ v20.11 Public major release - CLBatchConcatenateLayerKernel - CLElementwiseOperationKernel - @ref CLBatchNormalizationLayerKernel - - @ref CLPoolingLayerKernel + - CLPoolingLayerKernel - @ref CLWinogradInputTransformKernel - @ref CLGEMMLowpMatrixMultiplyNativeKernel - @ref CLGEMMLowpMatrixAReductionKernel @@ -660,7 +660,7 @@ v19.11 Public major release - @ref NEDepthwiseConvolutionLayer - Added FP16 mixed-precision support for: - @ref CLGEMMMatrixMultiplyReshapedKernel - - @ref CLPoolingLayerKernel + - CLPoolingLayerKernel - Added FP32 and FP16 ELU activation for: - @ref CLActivationLayer - @ref NEActivationLayer @@ -1293,7 +1293,7 @@ v17.03 Sources preview v17.02.1 Sources preview - New OpenCL kernels / functions: - CLLogits1DMaxKernel, CLLogits1DShiftExpSumKernel, @ref CLLogits1DNormKernel / @ref CLSoftmaxLayer - - @ref CLPoolingLayerKernel / @ref CLPoolingLayer + - CLPoolingLayerKernel / @ref CLPoolingLayer - @ref CLIm2ColKernel, @ref CLCol2ImKernel, CLConvolutionLayerWeightsReshapeKernel / @ref CLConvolutionLayer - @ref CLRemapKernel / @ref CLRemap - @ref CLGaussianPyramidHorKernel, @ref CLGaussianPyramidVertKernel / @ref CLGaussianPyramid, @ref CLGaussianPyramidHalf, @ref CLGaussianPyramidOrb diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h index d31c87688..ac051684a 100644 --- a/src/core/CL/CLKernels.h +++ b/src/core/CL/CLKernels.h @@ -107,7 +107,6 @@ #include "src/core/CL/kernels/CLPadLayerKernel.h" #include "src/core/CL/kernels/CLPermuteKernel.h" #include "src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h" -#include "src/core/CL/kernels/CLPoolingLayerKernel.h" #include "src/core/CL/kernels/CLPriorBoxLayerKernel.h" #include "src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h" #include "src/core/CL/kernels/CLQuantizationLayerKernel.h" diff --git a/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h b/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h index 86267ec0f..cc96cf1a1 100644 --- a/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h +++ b/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Arm Limited. + * Copyright (c) 2020-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -53,7 +53,7 @@ public: * @param[in] compile_context The compile context to be used. * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. * @param[in] indices Tensor containing the offset to store the input elements in the output tensor. - * @ref CLPoolingLayerKernel with indices should precede this function in order to + * @ref opencl::ClPooling with indices should precede this function in order to * properly reconstruct the output tensor. * The tensor shape of this tensor has to be equal to the input tensor shape. Data type supported: U32. * @param[out] output Destination tensor. Data types supported: Same as @p input. @@ -65,7 +65,7 @@ public: * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. * @param[in] output Destination tensor info. Data types supported: Same as @p input. * @param[in] indices TensorInfo associated to the tensor containing the offset to store the input elements in the output tensor. - * @ref CLPoolingLayerKernel with indices should precede this function in order to + * @ref opencl::ClPooling with indices should precede this function in order to * properly reconstruct the output tensor. * The tensor shape of this tensor has to be equal to the input tensor shape. Data type supported: U32. * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo. diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.h b/src/core/CL/kernels/CLPoolingLayerKernel.h deleted file mode 100644 index d88402a79..000000000 --- a/src/core/CL/kernels/CLPoolingLayerKernel.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2017-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_CLPOOLINGLAYERKERNEL_H -#define ARM_COMPUTE_CLPOOLINGLAYERKERNEL_H - -#include "src/core/CL/ICLKernel.h" - -#include "arm_compute/core/Error.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Interface for the pooling layer kernel */ -class CLPoolingLayerKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLPoolingLayerKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLPoolingLayerKernel(const CLPoolingLayerKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLPoolingLayerKernel &operator=(const CLPoolingLayerKernel &) = delete; - /** Allow instances of this class to be moved */ - CLPoolingLayerKernel(CLPoolingLayerKernel &&) = default; - /** Allow instances of this class to be moved */ - CLPoolingLayerKernel &operator=(CLPoolingLayerKernel &&) = default; - /** Default destructor */ - ~CLPoolingLayerKernel() = default; - - /** Set the input and output tensors. - * - * - * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[out] output Destination tensor. Data types supported: Same as @p input. - * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo. - * @param[out] indices (optional) The indices of the maximal values. Data type supported: U32. - */ - void configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices = nullptr); - /** Set the input and output tensors. - * - * - * @param[in] compile_context The compile context to be used. - * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[out] output Destination tensor. Data types supported: Same as @p input. - * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo. - * @param[out] indices (optional) The indices of the maximal values. Data type supported: U32. - */ - void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices = nullptr); - /** Static function to check if given info will lead to a valid configuration of @ref CLPoolingLayerKernel - * - * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[in] output Destination tensor info. Data types supported: Same as @p input. - * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo. - * @param[in] indices (optional) The indices of the maximal values. Data type supported: U32. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices = nullptr); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - BorderSize border_size() const override; - -public: - const ICLTensor *_input; - ICLTensor *_output; - ICLTensor *_indices; - PoolingLayerInfo _pool_info; - DataLayout _data_layout; - BorderSize _border_size; - unsigned int _num_elems_processed_per_iteration; -}; -} // namespace arm_compute -#endif /*ARM_COMPUTE_CLPOOLINGLAYERKERNEL_H */ diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/gpu/cl/kernels/ClPoolingKernel.cpp index 79843cd29..567fec2a3 100644 --- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp +++ b/src/core/gpu/cl/kernels/ClPoolingKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,86 +21,84 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/core/CL/kernels/CLPoolingLayerKernel.h" +#include "src/core/gpu/cl/kernels/ClPoolingKernel.h" #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/CL/OpenCL.h" -#include "arm_compute/core/Helpers.h" #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" -#include "src/core/CL/ICLKernel.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" +#include "support/Cast.h" #include "support/StringSupport.h" -#include <set> -#include <string> -#include <tuple> - namespace arm_compute { +namespace opencl +{ +namespace kernels +{ using namespace arm_compute::misc::shape_calculator; namespace { // Internal window config info -using CLPoolingConfig = std::pair<unsigned int, BorderSize>; //num_elems_processed_per_iteration, border_size +using ClPoolingConfig = std::pair<unsigned int, BorderSize>; //num_elems_processed_per_iteration, border_size -void auto_init(const ITensorInfo *input, ITensorInfo *output, ITensorInfo *indices, PoolingLayerInfo pool_info) +void auto_init(const ITensorInfo *src, ITensorInfo *dst, ITensorInfo *indices, PoolingLayerInfo pool_info) { - TensorShape out_shape = compute_pool_shape(*input, pool_info); - auto_init_if_empty(*output, input->clone()->set_tensor_shape(out_shape)); + TensorShape out_shape = compute_pool_shape(*src, pool_info); + auto_init_if_empty(*dst, src->clone()->set_tensor_shape(out_shape)); if(indices) { - auto_init_if_empty(*indices, input->clone()->set_tensor_shape(out_shape).set_data_type(DataType::U32)); + auto_init_if_empty(*indices, src->clone()->set_tensor_shape(out_shape).set_data_type(DataType::U32)); } } -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices) +Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices) { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MSG((is_data_type_quantized_asymmetric(input->data_type()) && pool_info.pool_type == PoolingType::L2), + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((is_data_type_quantized_asymmetric(src->data_type()) && pool_info.pool_type == PoolingType::L2), "Unsupported combination of parameters!"); // Check indices if(indices) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_info.pool_type != PoolingType::MAX, "Pooling indices only supported for MAX pooling method"); ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_info.pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2"); if(indices->total_size() != 0) { - TensorInfo idx_info(TensorInfo(compute_pool_shape(*input, pool_info), 1, DataType::U32)); + TensorInfo idx_info(TensorInfo(compute_pool_shape(*src, pool_info), 1, DataType::U32)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(indices, &idx_info); } } - // Checks performed when output is configured - if(output->total_size() != 0) + // Checks performed when dst is configured + if(dst->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); - TensorInfo out_info(TensorInfo(compute_pool_shape(*input, pool_info), 1, output->data_type())); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &out_info); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst); + TensorInfo out_info(TensorInfo(compute_pool_shape(*src, pool_info), 1, dst->data_type())); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &out_info); } return Status{}; } -std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &pool_info, ITensorInfo *indices = nullptr) +std::tuple<Status, Window, ClPoolingConfig> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices = nullptr) { - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); // Get data layout - const DataLayout data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->data_layout() : pool_info.data_layout; + const DataLayout data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout; const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); @@ -108,8 +106,8 @@ std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITenso int pool_stride_y = 0; unsigned int pooled_w = 0; unsigned int pooled_h = 0; - int pool_size_x = pool_info.is_global_pooling ? input->dimension(idx_width) : pool_info.pool_size.width; - int pool_size_y = pool_info.is_global_pooling ? input->dimension(idx_height) : pool_info.pool_size.height; + int pool_size_x = pool_info.is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width; + int pool_size_y = pool_info.is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height; const PadStrideInfo pad_stride_info = pool_info.pad_stride_info; std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride(); const int pool_pad_right = pad_stride_info.pad_right(); @@ -118,14 +116,14 @@ std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITenso const int pool_pad_bottom = pad_stride_info.pad_bottom(); BorderSize border_size = BorderSize(); - auto_init(input, output, indices, pool_info); - pooled_w = output->tensor_shape()[idx_width]; - pooled_h = output->tensor_shape()[idx_height]; + auto_init(src, dst, indices, pool_info); + pooled_w = dst->tensor_shape()[idx_width]; + pooled_h = dst->tensor_shape()[idx_height]; - const DataType data_type = input->data_type(); + const DataType data_type = src->data_type(); - const int input_width = input->dimension(idx_width); - const int input_height = input->dimension(idx_height); + const int src_width = src->dimension(idx_width); + const int src_height = src->dimension(idx_height); unsigned int num_elems_processed_per_iteration = 0; bool window_changed = false; @@ -146,46 +144,46 @@ std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITenso const int num_iterations_x = (pooled_w + num_elems_processed_per_iteration - 1) / num_elems_processed_per_iteration; // Upper limit for the number of right/bottom border elements that are accessed - const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_left + num_elems_read_per_iteration) - input_width; - const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_top + pool_size_y) - input_height; + const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_left + num_elems_read_per_iteration) - src_width; + const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_top + pool_size_y) - src_height; border_size.right = std::max(upper_bound_w, pool_pad_right); border_size.bottom = std::max(upper_bound_h, pool_pad_bottom); - win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); + win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration)); - AccessWindowRectangle input_access(input, -pool_pad_left, -pool_pad_top, num_elems_read_per_iteration, pool_size_y, - pool_stride_x, pool_stride_y); - AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); + AccessWindowRectangle src_access(src, -pool_pad_left, -pool_pad_top, num_elems_read_per_iteration, pool_size_y, + pool_stride_x, pool_stride_y); + AccessWindowHorizontal dst_access(dst, 0, num_elems_processed_per_iteration); // Update indices window if(indices) { AccessWindowHorizontal indices_access(indices, 0, num_elems_processed_per_iteration); - window_changed = update_window_and_padding(win, input_access, output_access, indices_access); + window_changed = update_window_and_padding(win, src_access, dst_access, indices_access); indices_access.set_valid_region(win, ValidRegion(Coordinates(), indices->tensor_shape())); } else { - window_changed = update_window_and_padding(win, input_access, output_access); + window_changed = update_window_and_padding(win, src_access, dst_access); } - output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); + dst_access.set_valid_region(win, ValidRegion(Coordinates(), dst->tensor_shape())); break; } case DataLayout::NHWC: { // Initialize border size border_size = BorderSize(); - num_elems_processed_per_iteration = adjust_vec_size(4, output->dimension(0)); - win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); + num_elems_processed_per_iteration = adjust_vec_size(4, dst->dimension(0)); + win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration)); if(indices != nullptr) { indices->set_valid_region(ValidRegion(Coordinates(), indices->tensor_shape())); } - output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape())); + dst->set_valid_region(ValidRegion(Coordinates(), dst->tensor_shape())); break; } default: @@ -193,37 +191,29 @@ std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITenso } Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_tuple(err, win, CLPoolingConfig(num_elems_processed_per_iteration, border_size)); + return std::make_tuple(err, win, ClPoolingConfig(num_elems_processed_per_iteration, border_size)); } } // namespace -CLPoolingLayerKernel::CLPoolingLayerKernel() - : _input(nullptr), _output(nullptr), _indices(nullptr), _pool_info(), _data_layout(DataLayout::UNKNOWN), _border_size(0), _num_elems_processed_per_iteration(1) +ClPoolingKernel::ClPoolingKernel() + : _pool_info(), _data_layout(DataLayout::UNKNOWN), _border_size(0), _num_elems_processed_per_iteration(1) { } -BorderSize CLPoolingLayerKernel::border_size() const +BorderSize ClPoolingKernel::border_size() const { return _border_size; } -void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices) +void ClPoolingKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices) { - configure(CLKernelLibrary::get().get_compile_context(), input, output, pool_info, indices); -} + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); -void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - auto padding_info = get_padding_info({ input, output, indices }); + auto padding_info = get_padding_info({ src, dst, indices }); // Set instance variables - _input = input; - _output = output; _pool_info = pool_info; - _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->info()->data_layout() : pool_info.data_layout; - _indices = indices; + _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout; int pool_stride_x = 0; int pool_stride_y = 0; const PoolingType pool_type = pool_info.pool_type; @@ -231,8 +221,8 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT); const int idx_channel = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL); const int idx_batch_size = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::BATCHES); - const int pool_size_x = pool_info.is_global_pooling ? input->info()->dimension(idx_width) : pool_info.pool_size.width; - const int pool_size_y = pool_info.is_global_pooling ? input->info()->dimension(idx_height) : pool_info.pool_size.height; + const int pool_size_x = pool_info.is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width; + const int pool_size_y = pool_info.is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height; const PadStrideInfo pad_stride_info = pool_info.pad_stride_info; const bool exclude_padding = pool_info.exclude_padding; std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride(); @@ -241,36 +231,36 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co // Set build options CLBuildOptions build_opts; - const DataType data_type = input->info()->data_type(); + const DataType data_type = src->data_type(); // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), output->info(), pool_info, (indices ? indices->info() : nullptr)); + auto win_config = validate_and_configure_window(src, dst, pool_info, indices); ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config)); ICLKernel::configure_internal(std::get<1>(win_config)); - CLPoolingConfig pooling_config = std::get<2>(win_config); + ClPoolingConfig pooling_config = std::get<2>(win_config); _num_elems_processed_per_iteration = pooling_config.first; _border_size = pooling_config.second; build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(_num_elems_processed_per_iteration)); // Tensor paddings are used to calculate the indicies for MAX pooling - if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && _indices && is_data_type_float(data_type)) + if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && indices && is_data_type_float(data_type)) { - build_opts.add_option("-DPAD_TENSOR_LEFT=" + support::cpp11::to_string(input->info()->padding().left)); - build_opts.add_option("-DPAD_TENSOR_RIGHT=" + support::cpp11::to_string(input->info()->padding().right)); - build_opts.add_option("-DPAD_TENSOR_TOP=" + support::cpp11::to_string(input->info()->padding().top)); - build_opts.add_option("-DPAD_TENSOR_BOTTOM=" + support::cpp11::to_string(input->info()->padding().bottom)); - build_opts.add_option("-DTENSOR_CHANNEL=" + support::cpp11::to_string(input->info()->dimension(idx_channel))); - build_opts.add_option("-DTENSOR_WIDTH=" + support::cpp11::to_string(input->info()->dimension(idx_width))); - build_opts.add_option("-DTENSOR_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(idx_height))); + build_opts.add_option("-DPAD_TENSOR_LEFT=" + support::cpp11::to_string(src->padding().left)); + build_opts.add_option("-DPAD_TENSOR_RIGHT=" + support::cpp11::to_string(src->padding().right)); + build_opts.add_option("-DPAD_TENSOR_TOP=" + support::cpp11::to_string(src->padding().top)); + build_opts.add_option("-DPAD_TENSOR_BOTTOM=" + support::cpp11::to_string(src->padding().bottom)); + build_opts.add_option("-DTENSOR_CHANNEL=" + support::cpp11::to_string(src->dimension(idx_channel))); + build_opts.add_option("-DTENSOR_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width))); + build_opts.add_option("-DTENSOR_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height))); } - if(is_data_type_quantized_asymmetric(data_type) && input->info()->quantization_info() != output->info()->quantization_info()) + if(is_data_type_quantized_asymmetric(data_type) && src->quantization_info() != dst->quantization_info()) { - const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform(); - const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform(); + const UniformQuantizationInfo iq_info = src->quantization_info().uniform(); + const UniformQuantizationInfo oq_info = dst->quantization_info().uniform(); build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iq_info.offset)); build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset)); @@ -278,10 +268,10 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale)); } - // Check output dimensions - auto_init(input->info(), output->info(), indices ? indices->info() : nullptr, pool_info); + // Check dst dimensions + auto_init(src, dst, indices, pool_info); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), pool_info, (indices) ? indices->info() : nullptr)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, pool_info, indices)); build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); build_opts.add_option("-DPOOL_" + string_from_pooling_type(pool_type)); @@ -312,8 +302,8 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co build_opts.add_option("-DINITIAL_VALUE=0"); } - build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(input->info()->dimension(idx_width) + (exclude_padding ? 0 : pool_pad_left))); - build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(idx_height) + (exclude_padding ? 0 : pool_pad_top))); + build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width) + (exclude_padding ? 0 : pool_pad_left))); + build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height) + (exclude_padding ? 0 : pool_pad_top))); // Create kernel switch(_data_layout) @@ -334,14 +324,14 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co if((pool_size_x == 3) && (pool_size_y == 3) && !is_data_type_quantized_asymmetric(data_type)) { // Check if we have pool3x3 with stride_x less equal than 3. In these cases, run an optimized OpenCL kernel where - // each thread computes 4 output elements + // each thread computes 4 dst elements const bool is_pool3x3_stride_le3 = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3); std::string kernel_name = ((is_pool3x3_stride_le3) ? "pooling_layer_optimized_" : "pooling_layer_") + support::cpp11::to_string(pool_size_x); _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); } - else if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && _indices && is_data_type_float(data_type)) + else if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && indices && is_data_type_float(data_type)) { // For max pooling with pool2x2, store indicies which will be used in max unpooling if(data_type == DataType::F32) @@ -368,8 +358,8 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co const auto use_fp_mixed_precision = (data_type == DataType::F16) && pool_info.fp_mixed_precision && pool_type != PoolingType::MAX; // Wider accumulation is required to avoid accuracy loss - // Case 1: Floating point mixed precision (fp16 input data and fp32 accumulation) - // Cast 2: Quantized (int8/uint8 input data and int32 accumulation ) + // Case 1: Floating point mixed precision (fp16 src data and fp32 accumulation) + // Cast 2: Quantized (int8/uint8 src data and int32 accumulation ) DataType acc_data_type = data_type; if(use_fp_mixed_precision) @@ -384,15 +374,15 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(acc_data_type)); build_opts.add_option_if(use_fp_mixed_precision, "-DFP_MIXED_PRECISION"); build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING"); - build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(input->info()->dimension(idx_width))); - build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(idx_height))); - build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(output->info()->dimension(idx_height))); - build_opts.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(output->info()->dimension(idx_channel))); - build_opts.add_option("-DDST_BATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(idx_batch_size))); - build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % _num_elems_processed_per_iteration)); + build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width))); + build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height))); + build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst->dimension(idx_height))); + build_opts.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(dst->dimension(idx_channel))); + build_opts.add_option("-DDST_BATCH_SIZE=" + support::cpp11::to_string(dst->dimension(idx_batch_size))); + build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % _num_elems_processed_per_iteration)); if(pool_info.pool_size == Size2D(2, 2) && is_data_type_float(data_type)) { - build_opts.add_option_if(_indices != nullptr && pool_type == PoolingType::MAX, "-DEXTRACT_MAX_INDEX"); + build_opts.add_option_if(indices != nullptr && pool_type == PoolingType::MAX, "-DEXTRACT_MAX_INDEX"); std::string kernel_name = "pooling_layer_2x2_nhwc"; _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); @@ -414,34 +404,38 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co _config_id += "_"; _config_id += lower_string(string_from_data_layout(_data_layout)); _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(idx_width)); + _config_id += support::cpp11::to_string(dst->dimension(idx_width)); _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(idx_height)); + _config_id += support::cpp11::to_string(dst->dimension(idx_height)); _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(idx_channel)); + _config_id += support::cpp11::to_string(dst->dimension(idx_channel)); _config_id += "_"; - _config_id += lower_string(string_from_data_layout(input->info()->data_layout())); + _config_id += lower_string(string_from_data_layout(src->data_layout())); - ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info)); + ARM_COMPUTE_ERROR_ON(src->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info)); } -Status CLPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices) +Status ClPoolingKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, pool_info, indices)); - ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get(), pool_info))); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, pool_info, indices)); + ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(src->clone().get(), dst->clone().get(), pool_info))); return Status{}; } -void CLPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue) +void ClPoolingKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) { ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); unsigned int pool_stride_x = 0; unsigned int pool_stride_y = 0; std::tie(pool_stride_x, pool_stride_y) = _pool_info.pad_stride_info.stride(); + const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC)); + auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST_0)); + auto indices = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST_1)); + // Collapse window Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); @@ -452,7 +446,7 @@ void CLPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue) Window slice = window_collapsed.first_slice_window_3D(); do { - // Upsample input by pool size + // Upsample src by pool size Window in_slice(slice); in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start() - _pool_info.pad_stride_info.pad_left(), (in_slice.x().end() - _pool_info.pad_stride_info.pad_left()) * pool_stride_x, @@ -461,13 +455,13 @@ void CLPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue) (in_slice.y().end() - _pool_info.pad_stride_info.pad_top()) * pool_stride_y, pool_stride_y)); - // Set inputs + // Set srcs unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, in_slice); - add_3D_tensor_argument(idx, _output, slice); - if(_indices && is_data_type_float(_input->info()->data_type()) && (_pool_info.pool_size == Size2D(2, 2))) + add_3D_tensor_argument(idx, src, in_slice); + add_3D_tensor_argument(idx, dst, slice); + if(indices && is_data_type_float(src->info()->data_type()) && (_pool_info.pool_size == Size2D(2, 2))) { - add_3D_tensor_argument(idx, _indices, slice); + add_3D_tensor_argument(idx, indices, slice); } enqueue(queue, *this, slice, lws_hint()); } @@ -476,23 +470,23 @@ void CLPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue) } case DataLayout::NHWC: { - const size_t batch_size = _output->info()->tensor_shape().total_size_upper(3); + const size_t batch_size = dst->info()->tensor_shape().total_size_upper(3); Window slice = window_collapsed.first_slice_window_4D(); Window in_slice = window_collapsed.first_slice_window_4D(); - in_slice.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(0), _num_elems_processed_per_iteration)); - in_slice.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1), pool_stride_x)); - in_slice.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2), pool_stride_y)); + in_slice.set(Window::DimX, Window::Dimension(0, src->info()->dimension(0), _num_elems_processed_per_iteration)); + in_slice.set(Window::DimY, Window::Dimension(0, src->info()->dimension(1), pool_stride_x)); + in_slice.set(Window::DimZ, Window::Dimension(0, src->info()->dimension(2), pool_stride_y)); in_slice.set(3, Window::Dimension(0, batch_size, 1)); do { - // Set inputs + // Set srcs unsigned int idx = 0; - add_4D_tensor_argument(idx, _input, in_slice); - add_4D_tensor_argument(idx, _output, slice); - if(_indices && is_data_type_float(_input->info()->data_type()) && (_pool_info.pool_type == PoolingType::MAX) && (_pool_info.pool_size == Size2D(2, 2))) + add_4D_tensor_argument(idx, src, in_slice); + add_4D_tensor_argument(idx, dst, slice); + if(indices && is_data_type_float(src->info()->data_type()) && (_pool_info.pool_type == PoolingType::MAX) && (_pool_info.pool_size == Size2D(2, 2))) { - add_4D_tensor_argument(idx, _indices, slice); + add_4D_tensor_argument(idx, indices, slice); } enqueue(queue, *this, slice, lws_hint()); } @@ -503,4 +497,6 @@ void CLPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue) ARM_COMPUTE_ERROR("Not implemented"); } } +} // namespace kernels +} // namespace opencl } // namespace arm_compute diff --git a/src/core/gpu/cl/kernels/ClPoolingKernel.h b/src/core/gpu/cl/kernels/ClPoolingKernel.h new file mode 100644 index 000000000..c1ce859e2 --- /dev/null +++ b/src/core/gpu/cl/kernels/ClPoolingKernel.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2017-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CL_POOLING_KERNEL_H +#define ARM_COMPUTE_CL_POOLING_KERNEL_H + +#include "src/core/common/Macros.h" +#include "src/core/gpu/cl/ClCompileContext.h" +#include "src/core/gpu/cl/IClKernel.h" + +namespace arm_compute +{ +namespace opencl +{ +namespace kernels +{ +/** Interface for the pooling layer kernel */ +class ClPoolingKernel : public IClKernel +{ +public: + /** Default constructor */ + ClPoolingKernel(); + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClPoolingKernel); + + /** Configure kernel for a given list of arguments + * + * + * @param[in] compile_context The compile context to be used. + * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[out] dst Destination tensor info. Data types supported: same as @p src. + * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo. + * @param[out] indices (optional) The indices of the maximal values. Data type supported: U32. + */ + void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices = nullptr); + /** Static function to check if given info will lead to a valid configuration of @ref ClPoolingKernel + * + * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[in] dst Destination tensor info. Data types supported: same as @p src. + * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo. + * @param[in] indices (optional) The indices of the maximal values. Data type supported: U32. + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices = nullptr); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; + BorderSize border_size() const override; + +public: + PoolingLayerInfo _pool_info; + DataLayout _data_layout; + BorderSize _border_size; + unsigned int _num_elems_processed_per_iteration; +}; +} // namespace kernels +} // namespace opencl +} // namespace arm_compute +#endif /*ARM_COMPUTE_CL_POOLING_KERNEL_H */ diff --git a/src/runtime/CL/functions/CLPoolingLayer.cpp b/src/runtime/CL/functions/CLPoolingLayer.cpp index f3a2dbdd5..fbaec1d2d 100644 --- a/src/runtime/CL/functions/CLPoolingLayer.cpp +++ b/src/runtime/CL/functions/CLPoolingLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -23,13 +23,27 @@ */ #include "arm_compute/runtime/CL/functions/CLPoolingLayer.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/runtime/CL/CLScheduler.h" -#include "src/core/CL/kernels/CLFillBorderKernel.h" -#include "src/core/CL/kernels/CLPoolingLayerKernel.h" +#include "src/core/CL/ICLKernel.h" +#include "src/runtime/gpu/cl/operators/ClPooling.h" namespace arm_compute { +struct CLPoolingLayer::Impl +{ + const ICLTensor *src{ nullptr }; + ICLTensor *dst{ nullptr }; + ICLTensor *indices{ nullptr }; + std::unique_ptr<opencl::ClPooling> op{ nullptr }; +}; + +CLPoolingLayer::CLPoolingLayer() + : _impl(std::make_unique<Impl>()) +{ +} +CLPoolingLayer::~CLPoolingLayer() = default; + void CLPoolingLayer::configure(ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices) { configure(CLKernelLibrary::get().get_compile_context(), input, output, pool_info, indices); @@ -37,56 +51,25 @@ void CLPoolingLayer::configure(ICLTensor *input, ICLTensor *output, const Poolin void CLPoolingLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices) { - ARM_COMPUTE_ERROR_ON_NULLPTR(input); - // Configure pooling kernel - auto k = std::make_unique<CLPoolingLayerKernel>(); - k->set_target(CLScheduler::get().target()); - k->configure(compile_context, input, output, pool_info, indices); - _kernel = std::move(k); - - const DataType data_type = input->info()->data_type(); + _impl->src = input; + _impl->dst = output; + _impl->indices = indices; - // Configure border depending on operation required (quantize border in case of asymmetric data_type) - BorderMode border_mode{}; - PixelValue pixel_value(0.f); - if(is_data_type_quantized_asymmetric(data_type) && !pool_info.exclude_padding) - { - pixel_value = PixelValue(0, data_type, input->info()->quantization_info()); - } - - // Data layout - const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->info()->data_layout() : pool_info.data_layout; - - switch(data_layout) - { - case DataLayout::NCHW: - border_mode = (PoolingType::MAX == pool_info.pool_type) ? BorderMode::REPLICATE : BorderMode::CONSTANT; - break; - case DataLayout::NHWC: - border_mode = BorderMode::CONSTANT; - if(PoolingType::MAX == pool_info.pool_type) - { - if(is_data_type_quantized(data_type)) - { - std::tie(pixel_value, std::ignore) = get_min_max(data_type); - } - else - { - pixel_value = PixelValue(std::numeric_limits<float>::lowest()); - } - } - break; - default: - ARM_COMPUTE_ERROR("Data layout not supported"); - } - _border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, pixel_value); - - // Tune kernels - CLScheduler::get().tune_kernel_static(*_kernel); + _impl->op = std::make_unique<opencl::ClPooling>(); + _impl->op->configure(compile_context, input->info(), output->info(), pool_info, (indices) ? indices->info() : nullptr); } Status CLPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices) { - return CLPoolingLayerKernel::validate(input, output, pool_info, indices); + return opencl::ClPooling::validate(input, output, pool_info, indices); +} + +void CLPoolingLayer::run() +{ + ITensorPack pack; + pack.add_tensor(TensorType::ACL_SRC, _impl->src); + pack.add_tensor(TensorType::ACL_DST_0, _impl->dst); + pack.add_tensor(TensorType::ACL_DST_1, _impl->indices); + _impl->op->run(pack); } } // namespace arm_compute diff --git a/src/runtime/CL/tuners/BifrostTuner.cpp b/src/runtime/CL/tuners/BifrostTuner.cpp index 8badd57b9..7a06de6d1 100644 --- a/src/runtime/CL/tuners/BifrostTuner.cpp +++ b/src/runtime/CL/tuners/BifrostTuner.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,8 @@ #include "src/core/CL/CLKernels.h" #include "support/Cast.h" +#include "src/core/gpu/cl/kernels/ClPoolingKernel.h" + namespace arm_compute { namespace tuners @@ -208,7 +210,7 @@ void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k) k.set_lws_hint(lws_hint); } -void tune_pooling_kernel(CLPoolingLayerKernel &k) +void tune_pooling_kernel(opencl::kernels::ClPoolingKernel &k) { cl::NDRange lws_hint = k.lws_hint(); const GPUTarget gpu_target = k.get_target(); @@ -217,7 +219,7 @@ void tune_pooling_kernel(CLPoolingLayerKernel &k) // On Bifrost, this works for up to 35x35xC filters, for which the pooling_layer_3_optimized // kernel is launched with gws=(9, 33, C). In any case, the hint will be ignored if it is // invalid (e.g. exceeds the maximum workgroup size that the kernel can be launched with). - if(k._input->info()->data_layout() == DataLayout::NCHW) + if(k._pool_info.data_layout == DataLayout::NCHW) { if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, @@ -279,9 +281,9 @@ void BifrostTuner::tune_kernel_static(ICLKernel &kernel) { tune_gemm_kernel(*utils::cast::polymorphic_downcast<CLGEMMMatrixMultiplyKernel *>(&kernel)); } - else if(dynamic_cast<CLPoolingLayerKernel *>(&kernel) != nullptr) + else if(dynamic_cast<opencl::kernels::ClPoolingKernel *>(&kernel) != nullptr) { - tune_pooling_kernel(*utils::cast::polymorphic_downcast<CLPoolingLayerKernel *>(&kernel)); + tune_pooling_kernel(*utils::cast::polymorphic_downcast<opencl::kernels::ClPoolingKernel *>(&kernel)); } else if(dynamic_cast<CLScaleKernel *>(&kernel) != nullptr) { diff --git a/src/runtime/gpu/cl/operators/ClPooling.cpp b/src/runtime/gpu/cl/operators/ClPooling.cpp new file mode 100644 index 000000000..8610eb984 --- /dev/null +++ b/src/runtime/gpu/cl/operators/ClPooling.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/runtime/gpu/cl/operators/ClPooling.h" + +#include "arm_compute/runtime/CL/CLScheduler.h" + +#include "src/core/CL/kernels/CLFillBorderKernel.h" +#include "src/core/gpu/cl/ClCompileContext.h" +#include "src/core/gpu/cl/kernels/ClPoolingKernel.h" + +namespace arm_compute +{ +namespace opencl +{ +void ClPooling::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info, ITensorInfo *indices) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src); + // Configure pooling kernel + auto k = std::make_unique<kernels::ClPoolingKernel>(); + k->set_target(CLScheduler::get().target()); + k->configure(compile_context, src, dst, info, indices); + _pooling = std::move(k); + + const DataType data_type = src->data_type(); + + // Configure border depending on operation required (quantize border in case of asymmetric data_type) + BorderMode border_mode{}; + PixelValue pixel_value(0.f); + if(is_data_type_quantized_asymmetric(data_type) && !info.exclude_padding) + { + pixel_value = PixelValue(0, data_type, src->quantization_info()); + } + + // Data layout + const auto data_layout = info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : info.data_layout; + + switch(data_layout) + { + case DataLayout::NCHW: + border_mode = (PoolingType::MAX == info.pool_type) ? BorderMode::REPLICATE : BorderMode::CONSTANT; + break; + case DataLayout::NHWC: + border_mode = BorderMode::CONSTANT; + if(PoolingType::MAX == info.pool_type) + { + if(is_data_type_quantized(data_type)) + { + std::tie(pixel_value, std::ignore) = get_min_max(data_type); + } + else + { + pixel_value = PixelValue(std::numeric_limits<float>::lowest()); + } + } + break; + default: + ARM_COMPUTE_ERROR("Data layout not supported"); + } + auto b = std::make_unique<CLFillBorderKernel>(); + b->configure(compile_context, src, _pooling->border_size(), border_mode, pixel_value); + _border_handler = std::move(b); + + // Tune kernels + CLScheduler::get().tune_kernel_static(*_pooling); +} + +Status ClPooling::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info, const ITensorInfo *indices) +{ + return kernels::ClPoolingKernel::validate(src, dst, info, indices); +} + +void ClPooling::run(ITensorPack &tensors) +{ + ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided"); + + CLScheduler::get().enqueue_op(*_border_handler.get(), tensors, false); + CLScheduler::get().enqueue_op(*_pooling.get(), tensors, false); +} +} // namespace opencl +} // namespace arm_compute diff --git a/src/runtime/gpu/cl/operators/ClPooling.h b/src/runtime/gpu/cl/operators/ClPooling.h new file mode 100644 index 000000000..99de6d0dc --- /dev/null +++ b/src/runtime/gpu/cl/operators/ClPooling.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CL_POOLING_H +#define ARM_COMPUTE_CL_POOLING_H + +#include "src/core/gpu/cl/ClCompileContext.h" +#include "src/runtime/gpu/cl/IClOperator.h" + +#include <memory> + +namespace arm_compute +{ +namespace opencl +{ +/** Basic function to simulate a pooling layer with the specified pooling operation. This function calls the following OpenCL kernels: + * + * -# @ref CLFillBorderKernel (executed if padding size is different from zero) + * -# @ref opencl::ClPooling + */ +class ClPooling : public IClOperator +{ +public: + /** Constructor */ + ClPooling() = default; + /** Configure operator for a given list of arguments + * + * @param[in] compile_context The compile context to be used. + * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[out] dst Destination tensor info. Data type supported: same as @p src + * @param[in] info Pooling layer parameters. + * @param[out] indices (optional) The indices info of the maximal values. Data type supported: U32. + */ + void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info, ITensorInfo *indices = nullptr); + /** Static function to check if given info will lead to a valid configuration of @ref ClPooling + * + * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[out] dst Destination tensor info. Data type supported: same as @p src + * @param[in] info Pooling layer parameters. + * @param[out] indices (optional) The indices info of the maximal values. Data type supported: U32. + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info, const ITensorInfo *indices = nullptr); + + // Inherited method overridden + void run(ITensorPack &tensors) override; + +private: + std::unique_ptr<ICLKernel> _pooling{ nullptr }; + std::unique_ptr<ICLKernel> _border_handler{ nullptr }; +}; +} // namespace opencl +} // namespace arm_compute +#endif /* ARM_COMPUTE_CL_POOLING_H */ |