summaryrefslogtreecommitdiff
path: root/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-09-05 21:49:46 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-09-05 21:49:46 +0900
commit74476a2d0296bdad70a2f7f90bc7419a8b05bffd (patch)
tree3f991636c1e9423d38eb16a384c20b569b0d678e /runtime/onert/backend/cpu/ops/ConvolutionLayer.cc
parent042b262b3633b6c0f577aed6cb4b980ad0c1dcf3 (diff)
downloadnnfw-74476a2d0296bdad70a2f7f90bc7419a8b05bffd.tar.gz
nnfw-74476a2d0296bdad70a2f7f90bc7419a8b05bffd.tar.bz2
nnfw-74476a2d0296bdad70a2f7f90bc7419a8b05bffd.zip
Diffstat (limited to 'runtime/onert/backend/cpu/ops/ConvolutionLayer.cc')
-rw-r--r--runtime/onert/backend/cpu/ops/ConvolutionLayer.cc21
1 files changed, 14 insertions, 7 deletions
diff --git a/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc b/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc
index 2d5bbef1e..c057267d3 100644
--- a/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc
+++ b/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc
@@ -31,7 +31,8 @@ namespace ops
ConvolutionLayer::ConvolutionLayer()
: _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr),
_paddingType(ir::PaddingType::EXPLICIT), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
- _paddingBottom(0), _strideWidth(0), _strideHeight(0), _activation(ir::Activation::NONE),
+ _paddingBottom(0), _strideWidth(0), _strideHeight(0), _dilationWidthFactor(1),
+ _dilationHeightFactor(1), _activation(ir::Activation::NONE),
_conv_kernel(new nnfw::cker::Conv()), _prepare(false)
{
// DO NOTHING
@@ -50,8 +51,8 @@ void ConvolutionLayer::convFloat32()
op_params.padding_values.height = _paddingTop;
op_params.stride_width = _strideWidth;
op_params.stride_height = _strideHeight;
- op_params.dilation_width_factor = 1;
- op_params.dilation_height_factor = 1;
+ op_params.dilation_width_factor = _dilationWidthFactor;
+ op_params.dilation_height_factor = _dilationHeightFactor;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
@@ -78,8 +79,8 @@ void ConvolutionLayer::convQuant8()
nnfw::cker::ConvParams op_params;
op_params.stride_width = _strideWidth;
op_params.stride_height = _strideHeight;
- op_params.dilation_width_factor = 1;
- op_params.dilation_height_factor = 1;
+ op_params.dilation_width_factor = _dilationWidthFactor;
+ op_params.dilation_height_factor = _dilationHeightFactor;
op_params.padding_type = getPaddingType(_paddingType);
op_params.padding_values.width = _paddingLeft;
op_params.padding_values.height = _paddingTop;
@@ -104,6 +105,8 @@ void ConvolutionLayer::configure(const IPortableTensor *input, const IPortableTe
const uint32_t paddingLeft, const uint32_t paddingRight,
const uint32_t paddingTop, const uint32_t paddingBottom,
const uint32_t strideWidth, const uint32_t strideHeight,
+ const uint32_t dilationWidthFactor,
+ const uint32_t dilationHeightFactor,
const ir::Activation activation, IPortableTensor *output)
{
_input = input;
@@ -116,6 +119,8 @@ void ConvolutionLayer::configure(const IPortableTensor *input, const IPortableTe
_paddingBottom = paddingBottom;
_strideWidth = strideWidth;
_strideHeight = strideHeight;
+ _dilationWidthFactor = dilationWidthFactor;
+ _dilationHeightFactor = dilationHeightFactor;
_activation = activation;
_output = output;
}
@@ -145,7 +150,8 @@ void ConvolutionLayer::run()
param_padding.param.bottom = _paddingBottom;
const auto padding =
- ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height);
+ ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
+ _dilationWidthFactor, _dilationHeightFactor);
_paddingLeft = padding.left;
_paddingRight = padding.right;
@@ -176,7 +182,8 @@ void ConvolutionLayer::prepare()
{
bool is_transposed = false;
kernel.prepare(getTensorShape(_kernel), reinterpret_cast<const float *>(_kernel->buffer()),
- getPaddingType(_paddingType), is_transposed);
+ getPaddingType(_paddingType), is_transposed, _dilationWidthFactor,
+ _dilationHeightFactor);
// Decrease reference of _kernel(weights) only when _kernel is constant
if (is_transposed)