diff options
author | Chunseok Lee <chunseok.lee@samsung.com> | 2020-03-05 15:10:09 +0900 |
---|---|---|
committer | Chunseok Lee <chunseok.lee@samsung.com> | 2020-03-05 15:22:53 +0900 |
commit | d91a039e0eda6fd70dcd22672b8ce1817c1ca50e (patch) | |
tree | 62668ec548cf31fadbbf4e99522999ad13434a25 /runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc | |
parent | bd11b24234d7d43dfe05a81c520aa01ffad06e42 (diff) | |
download | nnfw-d91a039e0eda6fd70dcd22672b8ce1817c1ca50e.tar.gz nnfw-d91a039e0eda6fd70dcd22672b8ce1817c1ca50e.tar.bz2 nnfw-d91a039e0eda6fd70dcd22672b8ce1817c1ca50e.zip |
catch up to tizen_5.5 and remove unness dir
- update to tizen_5.5
- remove dirs
Diffstat (limited to 'runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc')
-rw-r--r-- | runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc | 139 |
1 files changed, 139 insertions, 0 deletions
diff --git a/runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc b/runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc new file mode 100644 index 000000000..efeabbbae --- /dev/null +++ b/runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ConvolutionLayer.h" + +#include <cker/operation/Conv.h> + +#include "OperationUtils.h" + +namespace neurun +{ +namespace backend +{ +namespace cpu +{ +namespace kernel +{ +ConvolutionLayer::ConvolutionLayer() + : _inputData(), _kernelData(), _outputData(), _biasData(), _inputShape(), _kernelShape(), + _outputShape(), _biasShape(), _paddingLeft(0), _paddingTop(0), _paddingRight(0), + _paddingBottom(0), _strideWidth(0), _strideHeight(0), _activation(model::Activation::NONE), + _inputType(OperandType::FLOAT32) +{ + // DO NOTHING +} + +void ConvolutionLayer::convFloat32() +{ + float output_activation_min, output_activation_max; + CalculateActivationRangeFloat(_activation, &output_activation_min, &output_activation_max); + + nnfw::cker::ConvParams op_params; + op_params.padding_values.width = _paddingLeft; + op_params.padding_values.height = _paddingTop; + op_params.stride_width = _strideWidth; + op_params.stride_height = _strideHeight; + op_params.dilation_width_factor = 1; + op_params.dilation_height_factor = 1; + op_params.float_activation_min = output_activation_min; + op_params.float_activation_max = output_activation_max; + + nnfw::cker::Conv(op_params, convertShapeToCkerShape(_inputShape), _inputData.f, + convertShapeToCkerShape(_kernelShape), _kernelData.f, + convertShapeToCkerShape(_biasShape), _biasData.f, + convertShapeToCkerShape(_outputShape), _outputData.f); +} + +void ConvolutionLayer::convQuant8() +{ + int32_t output_activation_min = 0; + int32_t output_activation_max = 0; + CalculateActivationRangeUint8(_activation, _outputShape, &output_activation_min, + &output_activation_max); + + float real_multiplier = 0.0; + int32_t output_multiplier = 0; + int32_t output_shift = 0; + GetQuantizedConvolutionMultiplier(_inputShape, _kernelShape, _biasShape, _outputShape, + &real_multiplier); + QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift); + + nnfw::cker::ConvParams op_params; + op_params.stride_width = _strideWidth; + op_params.stride_height = _strideHeight; + op_params.dilation_width_factor = 1; + op_params.dilation_height_factor = 1; + op_params.padding_values.width = _paddingLeft; + op_params.padding_values.height = _paddingTop; + op_params.input_offset = -_inputShape.offset; + op_params.weights_offset = -_kernelShape.offset; + op_params.output_offset = _outputShape.offset; + op_params.output_multiplier = output_multiplier; + op_params.output_shift = output_shift; + op_params.quantized_activation_min = output_activation_min; + op_params.quantized_activation_max = output_activation_max; + + nnfw::cker::Conv(op_params, convertShapeToCkerShape(_inputShape), _inputData.u8, + convertShapeToCkerShape(_kernelShape), _kernelData.u8, + convertShapeToCkerShape(_biasShape), _biasData.i32, + convertShapeToCkerShape(_outputShape), _outputData.u8); +} + +void ConvolutionLayer::configure(uint8_t *inputData, const Shape inputShape, uint8_t *kernelData, + const Shape kernelShape, uint8_t *biasData, const Shape biasShape, + const uint32_t paddingLeft, const uint32_t paddingRight, + const uint32_t paddingTop, const uint32_t paddingBottom, + const uint32_t strideWidth, const uint32_t strideHeight, + const model::Activation activation, uint8_t *outputData, + const Shape outputShape) +{ + _inputData.u8 = inputData; + _inputShape = inputShape; + _inputType = inputShape.type; + _kernelData.u8 = kernelData; + _kernelShape = kernelShape; + _biasData.u8 = biasData; + _biasShape = biasShape; + _paddingLeft = paddingLeft; + _paddingRight = paddingRight; + _paddingTop = paddingTop; + _paddingBottom = paddingBottom; + _strideWidth = strideWidth; + _strideHeight = strideHeight; + _activation = activation; + _outputData.u8 = outputData; + _outputShape = outputShape; +} + +void ConvolutionLayer::run() +{ + if (_inputType == OperandType::FLOAT32) + { + convFloat32(); + } + else if (_inputType == OperandType::QUANT8_ASYMM) + { + convQuant8(); + } +} + +#undef ANDROID_NN_CONV_PARAMETERS + +} // namespace kernel +} // namespace cpu +} // namespace backend +} // namespace neurun |