diff options
Diffstat (limited to 'runtime/neurun/backend/cpu/kernel/OperationUtils.h')
-rw-r--r-- | runtime/neurun/backend/cpu/kernel/OperationUtils.h | 152 |
1 files changed, 152 insertions, 0 deletions
diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.h b/runtime/neurun/backend/cpu/kernel/OperationUtils.h new file mode 100644 index 000000000..b9e8c8974 --- /dev/null +++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__ +#define __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__ + +#include <iostream> +#include <limits> +#include <vector> + +#include <cker/Shape.h> + +#include "ir/Operand.h" +#include "ir/DataType.h" +#include <ir/InternalType.h> + +using OperandType = neurun::ir::DataType; + +namespace neurun +{ +namespace backend +{ +namespace cpu +{ +namespace kernel +{ + +struct TensorDescriptor +{ + OperandType type; + std::vector<uint32_t> dimensions; + float scale; + int32_t offset; +}; + +union DataPtr { + uint8_t *u8; + int8_t *i8; + int32_t *i32; + float *f; + void *v; +}; + +uint32_t getNumberOfDimensions(const TensorDescriptor &descr); + +uint32_t getNumberOfElements(const TensorDescriptor &descr); + +uint32_t getSizeOfDimension(const TensorDescriptor &descr, uint32_t dimensionIdx); + +inline nnfw::cker::Shape convertToExtendedCkerShape(const TensorDescriptor &descr) +{ + std::vector<int32_t> raw_shape; + raw_shape.resize(4); + + uint32_t src = 4 - descr.dimensions.size(); + for (uint32_t i = 0; i < 4; ++i) + { + if (i < src) + { + raw_shape[i] = 1; + } + else + { + raw_shape[i] = descr.dimensions[i - src]; + } + } + + return nnfw::cker::GetShape(raw_shape); +} + +inline nnfw::cker::Shape convertTensorDescriptorToCkerShape(const TensorDescriptor &descr) +{ + std::vector<int32_t> raw_shape; + raw_shape.resize(4); + + for (uint32_t i = 0; i < 4; ++i) + { + if (i >= descr.dimensions.size()) + { + raw_shape[i] = 1; + } + else + { + raw_shape[i] = descr.dimensions[i]; + } + } + + return nnfw::cker::GetShape(raw_shape); +} + +inline int32_t getAxis(uint32_t rank, int32_t axis, ir::Layout frontend_layout) +{ + auto ret = axis; + + if (axis < 0) + { + ret += rank; + } + + // NCHW -> NHWC + if (frontend_layout == ir::Layout::NCHW) + { + int32_t permutation[4] = {0, 3, 1, 2}; + ret = permutation[ret]; + } + + return ret; +} + +void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift); + +void GetQuantizedConvolutionMultiplier(const TensorDescriptor &inputDescr, + const TensorDescriptor &filterDescr, + const TensorDescriptor &biasDescr, + const TensorDescriptor &outputDescr, float *multiplier); + +void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier, + int *left_shift); + +void CalculateActivationRangeFloat(ir::Activation activation, float *activation_min, + float *activation_max); + +void CalculateActivationRangeUint8(ir::Activation activation, const TensorDescriptor &outputDescr, + int32_t *act_min, int32_t *act_max); + +bool HaveSameShapes(const TensorDescriptor *input1, const TensorDescriptor *input2); + +int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift); + +TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout); + +uint32_t sizeOfData(OperandType type, const std::vector<uint32_t> &dimensions); + +} // namespace kernel +} // namespace cpu +} // namespace backend +} // namespace neurun + +#endif // __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__ |