diff options
Diffstat (limited to 'runtime/onert/backend/cpu/ops')
96 files changed, 0 insertions, 8741 deletions
diff --git a/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc b/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc deleted file mode 100644 index 2fd284c91..000000000 --- a/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ArgMinMaxLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/ArgMinMax.h> -#include <assert.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ -namespace -{ -template <typename T> std::function<bool(T, T)> GetComparefunction(bool is_arg_max) -{ - if (is_arg_max) - { - return std::greater<T>(); - } - else - { - return std::less<T>(); - } -} -} - -void ArgMinMaxLayer::configure(const IPortableTensor *input, IPortableTensor *output, - const IPortableTensor *axis, bool is_arg_max) -{ - _input = input; - _output = output; - _axis = axis; - _is_arg_max = is_arg_max; -} - -void ArgMinMaxLayer::run() -{ - if (_axis->total_size() != sizeof(int32_t)) - { - throw std::runtime_error("ArgMinMax: wrong shape of axis"); - } - auto axis = *reinterpret_cast<const int32_t *>(_axis->buffer()); - if (axis < 0) - { - axis += _input->num_dimensions(); - } -#define TF_LITE_ARG_MIN_MAX(input_type, axis_type, output_type) \ - ArgMinMax(getTensorShape(_input), reinterpret_cast<const input_type *>(_input->buffer()), \ - getTensorShape(_output), reinterpret_cast<output_type *>(_output->buffer()), axis, \ - GetComparefunction<input_type>(_is_arg_max)); - if (_output->data_type() == ir::DataType::INT32) - { - switch (_input->data_type()) - { - case ir::DataType::FLOAT32: - TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); - break; - case ir::DataType::QUANT_UINT8_ASYMM: - case ir::DataType::UINT8: - TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t); - break; - case ir::DataType::INT32: - TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t); - break; - default: - throw std::runtime_error("ArgMinMax: unsupported data type"); - } - } - else if (_output->data_type() == ir::DataType::INT64) - { - switch (_input->data_type()) - { - case ir::DataType::FLOAT32: - TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t); - break; - case ir::DataType::QUANT_UINT8_ASYMM: - case ir::DataType::UINT8: - TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t); - break; - case ir::DataType::INT32: - TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t); - break; - default: - throw std::runtime_error("ArgMinMax: unsupported data type"); - } - } - else - { - throw std::runtime_error("ArgMinMax: unsupported data type"); - } - -#undef TF_LITE_ARG_MIN_MAX -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.h b/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.h deleted file mode 100644 index 4c864cb98..000000000 --- a/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_ARGMINMAXLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_ARGMINMAXLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class ArgMinMaxLayer : public ::onert::exec::IFunction -{ -public: - ArgMinMaxLayer() : _input(nullptr), _output(nullptr), _axis(nullptr), _is_arg_max(true) {} - -public: - void configure(const IPortableTensor *indices, IPortableTensor *output, - const IPortableTensor *axis, bool is_arg_max); - - void run() override; - -private: - const IPortableTensor *_input; - IPortableTensor *_output; - const IPortableTensor *_axis; - bool _is_arg_max; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_ARGMINMAXLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc b/runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc deleted file mode 100644 index 7ef023788..000000000 --- a/runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "BatchMatMulLayer.h" - -#include <cker/operation/BatchMatMul.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -BatchMatMulLayer::BatchMatMulLayer() - : _lhs(nullptr), _rhs(nullptr), _output(nullptr), _adj_x(false), _adj_y(false), - _kernel(new nnfw::cker::BatchMatMul()) -{ - // DO NOTHING -} - -BatchMatMulLayer::~BatchMatMulLayer() = default; - -void BatchMatMulLayer::batchMatMulFloat32() -{ - nnfw::cker::BatchMatMul &batchmatmul_kernel = *_kernel; - nnfw::cker::Shape lhs_shape = getTensorShape(_lhs); - nnfw::cker::Shape rhs_shape = getTensorShape(_rhs); - nnfw::cker::Shape output_shape = getTensorShape(_output); - - // TODO implement for constant input - - batchmatmul_kernel.prepare(lhs_shape, rhs_shape, _adj_x, _adj_y); - batchmatmul_kernel(lhs_shape, reinterpret_cast<const float *>(_lhs->buffer()), rhs_shape, - reinterpret_cast<const float *>(_rhs->buffer()), _adj_x, _adj_y, output_shape, - reinterpret_cast<float *>(_output->buffer())); -} - -void BatchMatMulLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs, bool adj_x, - bool adj_y, IPortableTensor *output) -{ - assert(lhs != nullptr); - assert(rhs != nullptr); - assert(output != nullptr); - - _lhs = lhs; - _rhs = rhs; - _adj_x = adj_x; - _adj_y = adj_y; - _output = output; -} - -void BatchMatMulLayer::run() -{ - if (_lhs->data_type() == OperandType::FLOAT32) - { - batchMatMulFloat32(); - } - else - { - throw std::runtime_error{"BatchMatMul: unsupported data type"}; - } -} - -#undef AVGPOOLING_PARAMETERS - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/BatchMatMulLayer.h b/runtime/onert/backend/cpu/ops/BatchMatMulLayer.h deleted file mode 100644 index 6770e218b..000000000 --- a/runtime/onert/backend/cpu/ops/BatchMatMulLayer.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_BATCH_MATMUL_LAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_BATCH_MATMUL_LAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace nnfw -{ -namespace cker -{ -class BatchMatMul; -} -} // namespace nnfw - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class BatchMatMulLayer : public ::onert::exec::IFunction -{ -public: - BatchMatMulLayer(); - ~BatchMatMulLayer(); - -public: - void batchMatMulFloat32(); - - void configure(const IPortableTensor *lhs, const IPortableTensor *rhs, bool adj_x, bool adj_y, - IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_lhs; - const IPortableTensor *_rhs; - IPortableTensor *_output; - - bool _adj_x; - bool _adj_y; - - std::unique_ptr<nnfw::cker::BatchMatMul> _kernel; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_BATCH_MATMUL_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc b/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc deleted file mode 100644 index f2f10eb9d..000000000 --- a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "BatchToSpaceNDLayer.h" - -#include <cker/operation/BatchToSpaceND.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -BatchToSpaceNDLayer::BatchToSpaceNDLayer() - : _input(nullptr), _output(nullptr), _block_shape(nullptr), _crops(nullptr) -{ - // DO NOTHING -} - -template <typename T> void BatchToSpaceNDLayer::batchToSpaceNDGeneric() -{ - const int32_t NNapiCrops[]{0, 0, 0, 0}; - const int32_t *_crops_buffer; - - if (_crops == nullptr) - { - _crops_buffer = NNapiCrops; - } - else - { - _crops_buffer = reinterpret_cast<const int32_t *>(_crops->buffer()); - } - nnfw::cker::BatchToSpaceND<T>( - getTensorShape(_input), reinterpret_cast<const T *>(_input->buffer()), - reinterpret_cast<const int32_t *>(_block_shape->buffer()), _crops_buffer, - getTensorShape(_output), reinterpret_cast<T *>(_output->buffer())); -} - -void BatchToSpaceNDLayer::configure(const IPortableTensor *input, IPortableTensor *output, - IPortableTensor *block_shape, IPortableTensor *crops) -{ - _output = output; - _input = input; - _block_shape = block_shape; - _crops = crops; -} - -void BatchToSpaceNDLayer::run() -{ - if (_output->data_type() == OperandType::FLOAT32) - { - batchToSpaceNDGeneric<float>(); - } - else if (_output->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - batchToSpaceNDGeneric<uint8_t>(); - } - else - { - throw std::runtime_error{"NYI"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h b/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h deleted file mode 100644 index 6e25b241b..000000000 --- a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_BATCHTOSPACEND_LAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_BATCHTOSPACEND_LAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class BatchToSpaceNDLayer : public ::onert::exec::IFunction -{ -public: - BatchToSpaceNDLayer(); - -public: - template <typename T> void batchToSpaceNDGeneric(); - - void configure(const IPortableTensor *input, IPortableTensor *output, - IPortableTensor *block_shape, IPortableTensor *crops); - - void run() override; - -private: - const IPortableTensor *_input; - IPortableTensor *_output; - IPortableTensor *_block_shape; - IPortableTensor *_crops; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_BATCHTOSPACEND_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc b/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc deleted file mode 100644 index 8e51daad5..000000000 --- a/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "BinaryArithmeticLayer.h" - -#include <cker/operation/BinaryArithmeticOps.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -namespace -{ - -template <nnfw::cker::BinaryArithmeticOpType arithmetic_type, typename T> -void eval(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output, - nnfw::cker::BinaryArithmeticOpParam op_params) -{ - const auto lhs_shape = getTensorShape(lhs); - const auto rhs_shape = getTensorShape(rhs); - const bool need_broadcast = nnfw::cker::ProcessBroadcastShapes(lhs_shape, rhs_shape, &op_params); - if (need_broadcast) - { - nnfw::cker::BroadcastBinaryArithmeticOp<arithmetic_type>( - op_params, lhs_shape, reinterpret_cast<const T *>(lhs->buffer()), rhs_shape, - reinterpret_cast<const T *>(rhs->buffer()), getTensorShape(output), - reinterpret_cast<T *>(output->buffer())); - return; - } - - nnfw::cker::BinaryArithmeticOp<arithmetic_type>( - op_params, lhs_shape, reinterpret_cast<const T *>(lhs->buffer()), rhs_shape, - reinterpret_cast<const T *>(rhs->buffer()), getTensorShape(output), - reinterpret_cast<T *>(output->buffer())); -} - -template <nnfw::cker::BinaryArithmeticOpType arithmetic_type> -std::function<void(const IPortableTensor *, const IPortableTensor *, IPortableTensor *)> -generateKernelGeneric(const IPortableTensor *lhs, const ir::Activation activation, - nnfw::cker::BinaryArithmeticOpParam op_params) -{ - switch (lhs->data_type()) - { - case OperandType::FLOAT32: - { - float output_activation_min = 0, output_activation_max = 0; - CalculateActivationRange(activation, &output_activation_min, &output_activation_max); - op_params.float_activation_max = output_activation_max; - op_params.float_activation_min = output_activation_min; - return std::bind(&eval<arithmetic_type, float>, std::placeholders::_1, std::placeholders::_2, - std::placeholders::_3, op_params); - break; - } - case OperandType::INT32: - { - int32_t output_activation_min = 0, output_activation_max = 0; - CalculateActivationRange(activation, &output_activation_min, &output_activation_max); - op_params.quantized_activation_max = output_activation_max; - op_params.quantized_activation_min = output_activation_min; - return std::bind(eval<arithmetic_type, int32_t>, std::placeholders::_1, std::placeholders::_2, - std::placeholders::_3, op_params); - break; - } - default: - throw std::runtime_error{"BinaryArithmetic(generic): Unsupported data type"}; - } -} - -void setAddOrSubQuant8Params(const IPortableTensor *lhs, const IPortableTensor *rhs, - IPortableTensor *output, ir::Activation activation, - nnfw::cker::BinaryArithmeticOpParam *params) -{ - int32_t output_activation_min, output_activation_max; - CalculateActivationRangeUint8(activation, output, &output_activation_min, &output_activation_max); - nnfw::cker::BinaryArithmeticOpParam &op_params = *params; - op_params.quantized_activation_max = output_activation_max; - op_params.quantized_activation_min = output_activation_min; - // Parameters for scaled quantized computation - op_params.left_shift = 20; - // Zero-points of input and output tensors - op_params.input1_offset = -lhs->data_offset(); - op_params.input2_offset = -rhs->data_offset(); - op_params.output_offset = output->data_offset(); - assert((op_params.input1_offset >= 0) && (op_params.input1_offset <= 255)); - assert((op_params.input2_offset >= 0) && (op_params.input2_offset <= 255)); - assert((op_params.output_offset >= 0) && (op_params.output_offset <= 255)); - - // Compute normalized scale for _lhs and _rhs values, - // and represent in 32-bit fixed point - const double norm_max_scale = 2 * std::max(lhs->data_scale(), rhs->data_scale()); - const double real_lhs_scale = lhs->data_scale() / norm_max_scale; - const double real_rhs_scale = rhs->data_scale() / norm_max_scale; - // output scale is used to normalize final result, so we invert the scale here - const double real_output_scale = - norm_max_scale / (output->data_scale() * (1 << op_params.left_shift)); - - // Represent the scales as fixed int32_t multipliers, and int32_t shifts - QuantizeMultiplier(real_lhs_scale, &op_params.input1_multiplier, &op_params.input1_shift); - QuantizeMultiplier(real_rhs_scale, &op_params.input2_multiplier, &op_params.input2_shift); - QuantizeMultiplier(real_output_scale, &op_params.output_multiplier, &op_params.output_shift); -} - -void setMulQuant8Params(const IPortableTensor *lhs, const IPortableTensor *rhs, - IPortableTensor *output, ir::Activation activation, - nnfw::cker::BinaryArithmeticOpParam *params) -{ - int32_t output_activation_min, output_activation_max; - CalculateActivationRangeUint8(activation, output, &output_activation_min, &output_activation_max); - nnfw::cker::BinaryArithmeticOpParam &op_params = *params; - - op_params.quantized_activation_max = output_activation_max; - op_params.quantized_activation_min = output_activation_min; - op_params.input1_offset = -lhs->data_offset(); - op_params.input2_offset = -rhs->data_offset(); - op_params.output_offset = output->data_offset(); - - double real_multiplier = lhs->data_scale() * rhs->data_scale() / output->data_scale(); - QuantizeMultiplier(real_multiplier, &op_params.output_multiplier, &op_params.output_shift); -} - -} // namespace - -void BinaryArithmeticLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs, - IPortableTensor *output, const ir::Activation activation, - const ArithmeticType arithmetic_type) -{ - assert(lhs != nullptr); - assert(rhs != nullptr); - assert(output != nullptr); - - _lhs = lhs; - _rhs = rhs; - _output = output; - - nnfw::cker::BinaryArithmeticOpParam op_params; - switch (arithmetic_type) - { - case ArithmeticType::kAdd: - if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - setAddOrSubQuant8Params(_lhs, _rhs, _output, activation, &op_params); - _kernel = std::bind(&eval<nnfw::cker::BinaryArithmeticOpType::ADD, uint8_t>, - std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, - op_params); - } - else - { - _kernel = generateKernelGeneric<nnfw::cker::BinaryArithmeticOpType::ADD>(_lhs, activation, - op_params); - } - break; - case ArithmeticType::kSub: - if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - setAddOrSubQuant8Params(_lhs, _rhs, _output, activation, &op_params); - op_params.input2_multiplier *= -1; - _kernel = std::bind(&eval<nnfw::cker::BinaryArithmeticOpType::SUB, uint8_t>, - std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, - op_params); - } - else - { - _kernel = generateKernelGeneric<nnfw::cker::BinaryArithmeticOpType::SUB>(_lhs, activation, - op_params); - } - break; - case ArithmeticType::kMul: - if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - nnfw::cker::BinaryArithmeticOpParam op_params; - setMulQuant8Params(_lhs, _rhs, _output, activation, &op_params); - _kernel = std::bind(&eval<nnfw::cker::BinaryArithmeticOpType::MUL, uint8_t>, - std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, - op_params); - } - else - { - _kernel = generateKernelGeneric<nnfw::cker::BinaryArithmeticOpType::MUL>(_lhs, activation, - op_params); - } - break; - case ArithmeticType::kDiv: - if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - throw std::runtime_error{ - "BinaryArithmetic(Div): Div operation does not support quantization"}; - } - else if (_lhs->data_type() == OperandType::INT32) - { - throw std::runtime_error{"BinaryArithmetic(Div): Unsupported data type"}; - } - else - { - _kernel = generateKernelGeneric<nnfw::cker::BinaryArithmeticOpType::DIV>(_lhs, activation, - op_params); - } - break; - default: - throw std::runtime_error{"BinaryArithmetic: Unsupported BinaryArithmetic type"}; - } -} - -void BinaryArithmeticLayer::run() { _kernel(_lhs, _rhs, _output); } - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.h b/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.h deleted file mode 100644 index d6b33ad07..000000000 --- a/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_BINARYARITHMETICLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_BINARYARITHMETICLAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -enum class ArithmeticType -{ - kAdd, - kSub, - kMul, - kDiv, -}; - -class BinaryArithmeticLayer : public ::onert::exec::IFunction -{ -public: - BinaryArithmeticLayer() : _lhs(nullptr), _rhs(nullptr), _output(nullptr) - { - // DO NOTHING - } - -public: - void configure(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output, - const ir::Activation activation, const ArithmeticType arithmetic_type); - - void run() override; - -private: - const IPortableTensor *_lhs; - const IPortableTensor *_rhs; - IPortableTensor *_output; - - std::function<void(const IPortableTensor *, const IPortableTensor *, IPortableTensor *)> _kernel; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_BINARYARITHMETICLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/BroadcastToLayer.cc b/runtime/onert/backend/cpu/ops/BroadcastToLayer.cc deleted file mode 100644 index d9c1bbfc5..000000000 --- a/runtime/onert/backend/cpu/ops/BroadcastToLayer.cc +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "BroadcastToLayer.h" - -#include <cker/operation/BroadcastTo.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -BroadcastToLayer::BroadcastToLayer() : _input(nullptr), _shape(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void BroadcastToLayer::configure(const IPortableTensor *input, const IPortableTensor *shape, - IPortableTensor *output) -{ - _input = input; - _shape = shape; - _output = output; -} - -void BroadcastToLayer::run() -{ - // NOTE : It was implemented follows tf.broadcast_to operation works and - // Api Document(https://www.tensorflow.org/api_docs/python/tf/broadcast_to) - - switch (_output->data_type()) - { - // ToDo : It need to support INT8 and UINT8 also when will be applied quantization. - case OperandType::FLOAT32: - nnfw::cker::BroadcastTo<float>( - getTensorShape(_input), reinterpret_cast<float *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); - break; - case OperandType::INT32: - nnfw::cker::BroadcastTo<int32_t>( - getTensorShape(_input), reinterpret_cast<int32_t *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<int32_t *>(_output->buffer())); - break; - case OperandType::UINT32: - nnfw::cker::BroadcastTo<uint32_t>( - getTensorShape(_input), reinterpret_cast<uint32_t *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<uint32_t *>(_output->buffer())); - break; - default: - throw std::runtime_error{"BroadcastToLayer: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/BroadcastToLayer.h b/runtime/onert/backend/cpu/ops/BroadcastToLayer.h deleted file mode 100644 index 8e8433fc9..000000000 --- a/runtime/onert/backend/cpu/ops/BroadcastToLayer.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_BROADCASTLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_BROADCASTLAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class BroadcastToLayer : public ::onert::exec::IFunction -{ -public: - BroadcastToLayer(); - -public: - void configure(const IPortableTensor *input, const IPortableTensor *shape, - IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_shape; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_BROADCASTLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/CompareLayer.cc b/runtime/onert/backend/cpu/ops/CompareLayer.cc deleted file mode 100644 index adf902aaf..000000000 --- a/runtime/onert/backend/cpu/ops/CompareLayer.cc +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "CompareLayer.h" - -#include "OperationUtils.h" - -#include <assert.h> -#include <cker/operation/Comparison.h> -using namespace nnfw::cker; -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -namespace -{ - -using OpType = onert::ir::operation::Comparison::ComparisonType; -using namespace onert::backend::cpu; - -// Assumes these enum values to be in the order like this -static_assert(static_cast<int>(OpType::Equal) == 0, "An OpType value has changed!"); -static_assert(static_cast<int>(OpType::NotEqual) == 1, "An OpType value has changed!"); -static_assert(static_cast<int>(OpType::Greater) == 2, "An OpType value has changed!"); -static_assert(static_cast<int>(OpType::GreaterEqual) == 3, "An OpType value has changed!"); -static_assert(static_cast<int>(OpType::Less) == 4, "An OpType value has changed!"); -static_assert(static_cast<int>(OpType::LessEqual) == 5, "An OpType value has changed!"); - -template <typename T> -void compareQuant8(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output, - OpType op_type) -{ - nnfw::cker::ComparisonParams params; - params.left_shift = 8; - params.input1_offset = -lhs->data_offset(); - params.input2_offset = -rhs->data_offset(); - const double norm_max_scale = - 2 * std::max(std::abs(lhs->data_scale()), std::abs(rhs->data_scale())); - const double adjusted_lhs_scale = lhs->data_scale() / norm_max_scale; - const double adjusted_rhs_scale = rhs->data_scale() / norm_max_scale; - QuantizeMultiplierSmallerThanOneExp(adjusted_lhs_scale, ¶ms.input1_multiplier, - ¶ms.input1_shift); - QuantizeMultiplierSmallerThanOneExp(adjusted_rhs_scale, ¶ms.input2_multiplier, - ¶ms.input2_shift); - params.is_broadcast = !HaveSameShapes(lhs, rhs); - - using CompareFunction = - void (*)(ComparisonParams & params, const Shape &input1_shape, const T *input1_data, - const Shape &input2_shape, const T *input2_data, const Shape &output_shape, - bool *output_data); - - static const CompareFunction broadcast_fns[] = { - Broadcast4DSlowEqualWithScaling, Broadcast4DSlowNotEqualWithScaling, - Broadcast4DSlowGreaterWithScaling, Broadcast4DSlowGreaterEqualWithScaling, - Broadcast4DSlowLessWithScaling, Broadcast4DSlowLessEqualWithScaling, - }; - static const CompareFunction non_broadcast_fns[] = { - EqualWithScaling, NotEqualWithScaling, GreaterWithScaling, - GreaterEqualWithScaling, LessWithScaling, LessEqualWithScaling, - }; - - static_assert(sizeof(broadcast_fns) == sizeof(non_broadcast_fns), - "Sizes of broadcast_fns and non_broadcast_fns must match!"); - - auto index = static_cast<int>(op_type); - if (index < 0 || index >= static_cast<int>(sizeof(broadcast_fns) / sizeof(broadcast_fns[0]))) - throw std::runtime_error{"Invalid OpType for CompareLayer"}; - - CompareFunction fn = (params.is_broadcast ? broadcast_fns[index] : non_broadcast_fns[index]); - - fn(params, getExtendedTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast<const T *>(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast<bool *>(output->buffer())); -} - -template <typename T> -void compareScalar(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output, - OpType op_type) -{ - bool requires_broadcast = !HaveSameShapes(lhs, rhs); - - using CompareFunction = - void (*)(const Shape &input1_shape, const T *input1_data, const Shape &input2_shape, - const T *input2_data, const Shape &output_shape, bool *output_data); - - static const CompareFunction broadcast_fns[] = { - Broadcast4DSlowEqual, Broadcast4DSlowNotEqual, Broadcast4DSlowGreater, - Broadcast4DSlowGreaterEqual, Broadcast4DSlowLess, Broadcast4DSlowLessEqual, - }; - static const CompareFunction non_broadcast_fns[] = { - EqualNoScaling, NotEqualNoScaling, GreaterNoScaling, - GreaterEqualNoScaling, LessNoScaling, LessEqualNoScaling, - }; - - static_assert(sizeof(broadcast_fns) == sizeof(non_broadcast_fns), - "Sizes of broadcast_fns and non_broadcast_fns must match!"); - - auto index = static_cast<int>(op_type); - if (index < 0 || index >= static_cast<int>(sizeof(broadcast_fns) / sizeof(broadcast_fns[0]))) - throw std::runtime_error{"Invalid OpType for CompareLayer"}; - - CompareFunction fn = (requires_broadcast ? broadcast_fns[index] : non_broadcast_fns[index]); - - fn(getExtendedTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()), - getExtendedTensorShape(rhs), reinterpret_cast<const T *>(rhs->buffer()), - getExtendedTensorShape(output), reinterpret_cast<bool *>(output->buffer())); -} - -} // namespace - -CompareLayer::CompareLayer() - : _lhs(nullptr), _rhs(nullptr), _output(nullptr), - _op_type(ir::operation::Comparison::ComparisonType::Equal) -{ - // DO NOTHING -} - -void CompareLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs, - const OpType op_type, IPortableTensor *output) -{ - _lhs = lhs; - _rhs = rhs; - _op_type = op_type; - _output = output; -} - -void CompareLayer::run() -{ - if (_lhs->data_type() == OperandType::FLOAT32) - { - compareScalar<float>(_lhs, _rhs, _output, _op_type); - } - else if (_lhs->data_type() == OperandType::INT32) - { - compareScalar<int32_t>(_lhs, _rhs, _output, _op_type); - } - else if (_lhs->data_type() == OperandType::INT64) - { - compareScalar<int64_t>(_lhs, _rhs, _output, _op_type); - } - else if (_lhs->data_type() == OperandType::BOOL8) - { - compareScalar<uint8_t>(_lhs, _rhs, _output, _op_type); - } - else if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - compareQuant8<uint8_t>(_lhs, _rhs, _output, _op_type); - } - else - { - throw std::runtime_error{"Compare: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/CompareLayer.h b/runtime/onert/backend/cpu/ops/CompareLayer.h deleted file mode 100644 index add360ef8..000000000 --- a/runtime/onert/backend/cpu/ops/CompareLayer.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_COMPARELAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_COMPARELAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> -#include <ir/operation/Comparison.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class CompareLayer : public ::onert::exec::IFunction -{ -public: - CompareLayer(); - -public: - void configure(const IPortableTensor *lhs, const IPortableTensor *rhs, - const ir::operation::Comparison::ComparisonType op_type, IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_lhs; - const IPortableTensor *_rhs; - IPortableTensor *_output; - ir::operation::Comparison::ComparisonType _op_type; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_COMPARELAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ConcatLayer.cc b/runtime/onert/backend/cpu/ops/ConcatLayer.cc deleted file mode 100644 index d26ed7378..000000000 --- a/runtime/onert/backend/cpu/ops/ConcatLayer.cc +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ConcatLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Concatenation.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -ConcatLayer::ConcatLayer() : _inputs(), _output(nullptr), _axis(0) -{ - // DO NOTHING -} - -template <typename T> void ConcatLayer::concatenationGeneral() -{ - uint32_t num_inputs = _inputs.size(); - - nnfw::cker::ConcatenationParams op_params; - op_params.axis = _axis; - op_params.inputs_count = num_inputs; - - std::vector<nnfw::cker::Shape *> inputDimsPtr; - std::vector<nnfw::cker::Shape> inputDims; - inputDimsPtr.reserve(num_inputs); - inputDims.reserve(num_inputs); - - for (uint32_t i = 0; i < num_inputs; i++) - { - inputDims.push_back(getTensorShape(_inputs[i])); - inputDimsPtr.push_back(&inputDims[i]); - } - - std::vector<const T *> inputDataPtrs; - - for (const auto input : _inputs) - { - inputDataPtrs.emplace_back(reinterpret_cast<const T *>(input->buffer())); - } - - nnfw::cker::Concatenation<T>(op_params, inputDimsPtr.data(), inputDataPtrs.data(), - getTensorShape(_output), reinterpret_cast<T *>(_output->buffer())); -} -void ConcatLayer::concatenationQuant8() -{ - uint32_t num_inputs = _inputs.size(); - - std::vector<int32_t> input_zeropoints(num_inputs); - std::vector<float> input_scales(num_inputs); - for (uint32_t i = 0; i < num_inputs; i++) - { - input_zeropoints[i] = _inputs[i]->data_offset(); - input_scales[i] = _inputs[i]->data_scale(); - } - - nnfw::cker::ConcatenationParams op_params; - op_params.axis = _axis; - op_params.inputs_count = num_inputs; - op_params.input_zeropoint = input_zeropoints.data(); - op_params.input_scale = input_scales.data(); - op_params.output_zeropoint = _output->data_offset(); - op_params.output_scale = _output->data_scale(); - - std::vector<nnfw::cker::Shape *> inputDimsPtr; - std::vector<nnfw::cker::Shape> inputDims; - inputDimsPtr.reserve(num_inputs); - inputDims.reserve(num_inputs); - for (uint32_t i = 0; i < num_inputs; i++) - { - inputDims.push_back(getTensorShape(_inputs[i])); - inputDimsPtr.push_back(&inputDims[i]); - } - - std::vector<const uint8_t *> inputDataPtrs; - for (const auto input : _inputs) - { - inputDataPtrs.emplace_back(reinterpret_cast<const uint8_t *>(input->buffer())); - } - - nnfw::cker::ConcatenationWithScaling(op_params, inputDimsPtr.data(), inputDataPtrs.data(), - getTensorShape(_output), - reinterpret_cast<uint8_t *>(_output->buffer())); -} - -void ConcatLayer::configure(const std::vector<const IPortableTensor *> &inputs, int32_t axis, - IPortableTensor *output) -{ - assert(inputs.size() > 0); - assert(output != nullptr); - - _inputs = inputs; - _axis = axis; - _output = output; -} - -void ConcatLayer::run() -{ - if (_output->data_type() == OperandType::FLOAT32) - { - concatenationGeneral<float>(); - } - else if (_output->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - concatenationQuant8(); - } - else if (_output->data_type() == OperandType::INT32) - { - concatenationGeneral<int32_t>(); - } - else if (_output->data_type() == OperandType::INT64) - { - concatenationGeneral<int64_t>(); - } - else - throw std::runtime_error("Concat: unsupported data type"); -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ConcatLayer.h b/runtime/onert/backend/cpu/ops/ConcatLayer.h deleted file mode 100644 index 0787199d6..000000000 --- a/runtime/onert/backend/cpu/ops/ConcatLayer.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_CONCATLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_CONCATLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class ConcatLayer : public ::onert::exec::IFunction -{ -public: - ConcatLayer(); - -public: - template <typename T> void concatenationGeneral(); - - void concatenationQuant8(); - - void configure(const std::vector<const IPortableTensor *> &inputs, int32_t axis, - IPortableTensor *output); - - void run() override; - -private: - std::vector<const IPortableTensor *> _inputs; - IPortableTensor *_output; - int32_t _axis; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_CONCATLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc b/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc deleted file mode 100644 index c057267d3..000000000 --- a/runtime/onert/backend/cpu/ops/ConvolutionLayer.cc +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ConvolutionLayer.h" - -#include "../Tensor.h" -#include "ir/Padding.h" -#include <cker/operation/Conv.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ -ConvolutionLayer::ConvolutionLayer() - : _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr), - _paddingType(ir::PaddingType::EXPLICIT), _paddingLeft(0), _paddingTop(0), _paddingRight(0), - _paddingBottom(0), _strideWidth(0), _strideHeight(0), _dilationWidthFactor(1), - _dilationHeightFactor(1), _activation(ir::Activation::NONE), - _conv_kernel(new nnfw::cker::Conv()), _prepare(false) -{ - // DO NOTHING -} - -ConvolutionLayer::~ConvolutionLayer() = default; - -void ConvolutionLayer::convFloat32() -{ - float output_activation_min = 0, output_activation_max = 0; - CalculateActivationRange(_activation, &output_activation_min, &output_activation_max); - - nnfw::cker::ConvParams op_params; - op_params.padding_type = getPaddingType(_paddingType); - op_params.padding_values.width = _paddingLeft; - op_params.padding_values.height = _paddingTop; - op_params.stride_width = _strideWidth; - op_params.stride_height = _strideHeight; - op_params.dilation_width_factor = _dilationWidthFactor; - op_params.dilation_height_factor = _dilationHeightFactor; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - nnfw::cker::Conv &kernel = *_conv_kernel; - kernel(op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_kernel), reinterpret_cast<const float *>(_kernel->buffer()), - getTensorShape(_bias), reinterpret_cast<const float *>(_bias->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); -} - -void ConvolutionLayer::convQuant8() -{ - int32_t output_activation_min = 0; - int32_t output_activation_max = 0; - CalculateActivationRangeUint8(_activation, _output, &output_activation_min, - &output_activation_max); - - double real_multiplier = 0.0; - int32_t output_multiplier = 0; - int32_t output_shift = 0; - GetQuantizedConvolutionMultiplier(_input, _kernel, _bias, _output, &real_multiplier); - QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift); - - nnfw::cker::ConvParams op_params; - op_params.stride_width = _strideWidth; - op_params.stride_height = _strideHeight; - op_params.dilation_width_factor = _dilationWidthFactor; - op_params.dilation_height_factor = _dilationHeightFactor; - op_params.padding_type = getPaddingType(_paddingType); - op_params.padding_values.width = _paddingLeft; - op_params.padding_values.height = _paddingTop; - op_params.input_offset = -_input->data_offset(); - op_params.weights_offset = -_kernel->data_offset(); - op_params.output_offset = _output->data_offset(); - op_params.output_multiplier = output_multiplier; - op_params.output_shift = output_shift; - op_params.quantized_activation_min = output_activation_min; - op_params.quantized_activation_max = output_activation_max; - op_params.is_replaced_weights = true; - - nnfw::cker::Conv &kernel = *_conv_kernel; - kernel(op_params, getTensorShape(_input), reinterpret_cast<const uint8_t *>(_input->buffer()), - getTensorShape(_kernel), reinterpret_cast<const uint8_t *>(_kernel->buffer()), - getTensorShape(_bias), reinterpret_cast<const int32_t *>(_bias->buffer()), - getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer())); -} - -void ConvolutionLayer::configure(const IPortableTensor *input, const IPortableTensor *kernel, - const IPortableTensor *bias, const ir::PaddingType paddingType, - const uint32_t paddingLeft, const uint32_t paddingRight, - const uint32_t paddingTop, const uint32_t paddingBottom, - const uint32_t strideWidth, const uint32_t strideHeight, - const uint32_t dilationWidthFactor, - const uint32_t dilationHeightFactor, - const ir::Activation activation, IPortableTensor *output) -{ - _input = input; - _kernel = kernel; - _bias = bias; - _paddingType = paddingType; - _paddingLeft = paddingLeft; - _paddingRight = paddingRight; - _paddingTop = paddingTop; - _paddingBottom = paddingBottom; - _strideWidth = strideWidth; - _strideHeight = strideHeight; - _dilationWidthFactor = dilationWidthFactor; - _dilationHeightFactor = dilationHeightFactor; - _activation = activation; - _output = output; -} - -void ConvolutionLayer::run() -{ - prepare(); - - if (_input->is_dynamic() || _kernel->is_dynamic()) - { - const auto ifm_shape = _input->getShape().asFeature(_input->layout()); - const auto ofm_shape = _output->getShape().asFeature(_input->layout()); - // Kernel format is [depth_out, kernel_height, kernel_width, depth_in]. - const auto ker_shape = _kernel->getShape(); - const auto ker_height = ker_shape.dim(1); - const auto ker_width = ker_shape.dim(2); - - ir::Stride stride; - stride.vertical = _strideWidth; - stride.horizontal = _strideWidth; - - ir::Padding param_padding; - param_padding.type = _paddingType; - param_padding.param.left = _paddingLeft; - param_padding.param.right = _paddingRight; - param_padding.param.top = _paddingTop; - param_padding.param.bottom = _paddingBottom; - - const auto padding = - ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height, - _dilationWidthFactor, _dilationHeightFactor); - - _paddingLeft = padding.left; - _paddingRight = padding.right; - _paddingTop = padding.top; - _paddingBottom = padding.bottom; - } - if (_input->data_type() == OperandType::FLOAT32) - { - convFloat32(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - convQuant8(); - } - else - { - throw std::runtime_error{"Conv: unsupported data type"}; - } -} - -void ConvolutionLayer::prepare() -{ - if (_prepare) - return; - - nnfw::cker::Conv &kernel = *_conv_kernel; - if (_input->data_type() == OperandType::FLOAT32 && _kernel->is_constant()) - { - bool is_transposed = false; - kernel.prepare(getTensorShape(_kernel), reinterpret_cast<const float *>(_kernel->buffer()), - getPaddingType(_paddingType), is_transposed, _dilationWidthFactor, - _dilationHeightFactor); - - // Decrease reference of _kernel(weights) only when _kernel is constant - if (is_transposed) - { - auto kernel_tensor = dynamic_cast<const Tensor *>(_kernel); - if (kernel_tensor) - // TODO Remove const_cast - const_cast<Tensor *>(kernel_tensor)->decrease_ref(); - } - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM && _kernel->is_constant() && - !_input->is_dynamic() && !_output->is_dynamic()) - { - kernel.prepareQuant(getTensorShape(_input), getTensorShape(_kernel), getTensorShape(_output), - _strideWidth, _strideHeight); - } - _prepare = true; -} - -#undef ANDROID_NN_CONV_PARAMETERS - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ConvolutionLayer.h b/runtime/onert/backend/cpu/ops/ConvolutionLayer.h deleted file mode 100644 index 398892e65..000000000 --- a/runtime/onert/backend/cpu/ops/ConvolutionLayer.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_CONVOLUTIONLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_CONVOLUTIONLAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> -#include <functional> -#include <memory> - -namespace nnfw -{ -namespace cker -{ -class Conv; -} -} // namespace nnfw - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class ConvolutionLayer : public ::onert::exec::IFunction -{ -public: - ConvolutionLayer(); - ~ConvolutionLayer(); - -public: - void convFloat32(); - - void convQuant8(); - - void configure(const IPortableTensor *input, const IPortableTensor *kernel, - const IPortableTensor *bias, ir::PaddingType _paddingType, - const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, - const uint32_t paddingBottom, const uint32_t strideWidth, - const uint32_t strideHeight, const uint32_t dilationWidthFactor, - const uint32_t dilationHeightFactor, const ir::Activation activation, - IPortableTensor *output); - - void run() override; - - void prepare() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_kernel; - const IPortableTensor *_bias; - IPortableTensor *_output; - - ir::PaddingType _paddingType; - uint32_t _paddingLeft; - uint32_t _paddingTop; - uint32_t _paddingRight; - uint32_t _paddingBottom; - - uint32_t _strideWidth; - uint32_t _strideHeight; - uint32_t _dilationWidthFactor; - uint32_t _dilationHeightFactor; - - ir::Activation _activation; - - std::unique_ptr<nnfw::cker::Conv> _conv_kernel; - - bool _prepare; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_CONVOLUTIONLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.cc b/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.cc deleted file mode 100644 index e67c3f390..000000000 --- a/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.cc +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "DepthwiseConvolutionLayer.h" - -#include <cker/operation/DepthwiseConv.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -DepthwiseConvolutionLayer::DepthwiseConvolutionLayer() - : _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr), _paddingLeft(0), - _paddingTop(0), _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0), - _multiplier(0), _activation(ir::Activation::NONE) -{ - // DO NOTHING -} - -void DepthwiseConvolutionLayer::convFloat32() -{ - float output_activation_min = 0, output_activation_max = 0; - CalculateActivationRange(_activation, &output_activation_min, &output_activation_max); - - nnfw::cker::DepthwiseConvParams op_params; - op_params.stride_width = _strideWidth; - op_params.stride_height = _strideHeight; - op_params.dilation_width_factor = 1; - op_params.dilation_height_factor = 1; - op_params.padding_values.width = _paddingLeft; - op_params.padding_values.height = _paddingTop; - op_params.depth_multiplier = _multiplier; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - nnfw::cker::DepthwiseConv( - op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_kernel), reinterpret_cast<const float *>(_kernel->buffer()), - getTensorShape(_bias), reinterpret_cast<const float *>(_bias->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); -} - -void DepthwiseConvolutionLayer::convQuant8() -{ - int32_t output_activation_min = 0; - int32_t output_activation_max = 0; - CalculateActivationRangeUint8(_activation, _output, &output_activation_min, - &output_activation_max); - - double real_multiplier = 0.0; - int32_t output_multiplier = 0; - int32_t output_shift = 0; - GetQuantizedConvolutionMultiplier(_input, _kernel, _bias, _output, &real_multiplier); - QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift); - - nnfw::cker::DepthwiseConvParams op_params; - op_params.stride_width = _strideWidth; - op_params.stride_height = _strideHeight; - op_params.dilation_width_factor = 1; - op_params.dilation_height_factor = 1; - op_params.padding_values.width = _paddingLeft; - op_params.padding_values.height = _paddingTop; - op_params.depth_multiplier = _multiplier; - op_params.input_offset = -_input->data_offset(); - op_params.weights_offset = -_kernel->data_offset(); - op_params.output_offset = _output->data_offset(); - op_params.output_multiplier = output_multiplier; - op_params.output_shift = output_shift; - op_params.quantized_activation_min = output_activation_min; - op_params.quantized_activation_max = output_activation_max; - - nnfw::cker::DepthwiseConv( - op_params, getTensorShape(_input), reinterpret_cast<const uint8_t *>(_input->buffer()), - getTensorShape(_kernel), reinterpret_cast<const uint8_t *>(_kernel->buffer()), - getTensorShape(_bias), reinterpret_cast<const int32_t *>(_bias->buffer()), - getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer())); -} - -void DepthwiseConvolutionLayer::configure(const IPortableTensor *input, - const IPortableTensor *kernel, - const IPortableTensor *bias, const uint32_t paddingLeft, - const uint32_t paddingRight, const uint32_t paddingTop, - const uint32_t paddingBottom, const uint32_t strideWidth, - const uint32_t strideHeight, const uint32_t multiplier, - const ir::Activation activation, IPortableTensor *output) -{ - _input = input; - _kernel = kernel; - _bias = bias; - _paddingLeft = paddingLeft; - _paddingRight = paddingRight; - _paddingTop = paddingTop; - _paddingBottom = paddingBottom; - _strideWidth = strideWidth; - _strideHeight = strideHeight; - _multiplier = multiplier; - _activation = activation; - _output = output; -} - -void DepthwiseConvolutionLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - convFloat32(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - convQuant8(); - } - else - { - throw std::runtime_error{"DepthwiseConv: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.h b/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.h deleted file mode 100644 index c898255a3..000000000 --- a/runtime/onert/backend/cpu/ops/DepthwiseConvolutionLayer.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_KERNEL_CPU_DEPTHWISECONVOLUTIONLAYER_H__ -#define __ONERT_KERNEL_CPU_DEPTHWISECONVOLUTIONLAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class DepthwiseConvolutionLayer : public ::onert::exec::IFunction -{ -public: - DepthwiseConvolutionLayer(); - -public: - void convFloat32(); - - void convQuant8(); - - void configure(const IPortableTensor *input, const IPortableTensor *kernel, - const IPortableTensor *bias, const uint32_t paddingLeft, - const uint32_t paddingRight, const uint32_t paddingTop, - const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH, - const uint32_t multiplier, const ir::Activation activation, - IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_kernel; - const IPortableTensor *_bias; - IPortableTensor *_output; - - uint32_t _paddingLeft; - uint32_t _paddingTop; - uint32_t _paddingRight; - uint32_t _paddingBottom; - - uint32_t _strideWidth; - uint32_t _strideHeight; - - uint32_t _multiplier; - - ir::Activation _activation; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_KERNEL_CPU_DEPTHWISECONVOLUTIONLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/EinsumLayer.cc b/runtime/onert/backend/cpu/ops/EinsumLayer.cc deleted file mode 100644 index 8c16740a3..000000000 --- a/runtime/onert/backend/cpu/ops/EinsumLayer.cc +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "EinsumLayer.h" - -#include <cker/operation/Einsum.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -EinsumLayer::EinsumLayer() - : _inputs(), _output(nullptr), _equation(), _einsum_kernel(new nnfw::cker::Einsum()) -{ - // DO NOTHING -} - -EinsumLayer::~EinsumLayer() = default; - -void EinsumLayer::einsumFloat32() -{ - uint32_t num_inputs = _inputs.size(); - nnfw::cker::Einsum &kernel = *_einsum_kernel; - - kernel.prepare(_equation); - - std::vector<nnfw::cker::Shape> inputShapes; - std::vector<const float *> inputFloatPtrs; - - for (uint32_t i = 0; i < num_inputs; i++) - { - inputShapes.emplace_back(getTensorShape(_inputs[i])); - inputFloatPtrs.emplace_back(reinterpret_cast<const float *>(_inputs[i]->buffer())); - } - - kernel(_equation, inputShapes, inputFloatPtrs, getTensorShape(_output), - reinterpret_cast<float *>(_output->buffer())); -} - -void EinsumLayer::run() -{ - if (_output->data_type() == OperandType::FLOAT32) - { - einsumFloat32(); - } - else - { - throw std::runtime_error{"Einsum: unsupported data type"}; - } -} - -void EinsumLayer::configure(const std::vector<const IPortableTensor *> &inputs, - std::string equation, IPortableTensor *output) -{ - assert(inputs.size() > 0); - assert(output != nullptr); - - _inputs = inputs; - _equation = equation; - _output = output; -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/EinsumLayer.h b/runtime/onert/backend/cpu/ops/EinsumLayer.h deleted file mode 100644 index a93f87e77..000000000 --- a/runtime/onert/backend/cpu/ops/EinsumLayer.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_EINSUM_LAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_EINSUM_LAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> -#include <functional> -#include <memory> - -namespace nnfw -{ -namespace cker -{ -class Einsum; -} -} // namespace nnfw - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class EinsumLayer : public ::onert::exec::IFunction -{ -public: - EinsumLayer(); - ~EinsumLayer(); - -public: - void einsumFloat32(); - - void configure(const std::vector<const IPortableTensor *> &inputs, std::string equation, - IPortableTensor *output); - - void run() override; - -private: - std::vector<const IPortableTensor *> _inputs; - IPortableTensor *_output; - - std::string _equation; - - std::unique_ptr<nnfw::cker::Einsum> _einsum_kernel; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_EINSUM_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc b/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc deleted file mode 100644 index c1d63172b..000000000 --- a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ElementwiseActivationLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Logistic.h> -#include <cker/operation/ReLU.h> -#include <cker/operation/ReLU6.h> -#include <cker/operation/Tanh.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -ElementwiseActivationLayer::ElementwiseActivationLayer() - : _input(nullptr), _output(nullptr), _kernel() -{ - // DO NOTHING -} - -void ElementwiseActivationLayer::PopulateLookupTable(const ElementwiseActivationType op_type) -{ - const auto input_scale = static_cast<double>(_input->data_scale()); - const auto input_zero_point = static_cast<int32_t>(_input->data_offset()); - const auto output_scale = static_cast<double>(_output->data_scale()); - const auto output_zero_point = static_cast<int32_t>(_output->data_offset()); - const float inverse_scale = 1 / output_scale; - int32_t maxval = std::numeric_limits<uint8_t>::max(); - int32_t minval = std::numeric_limits<uint8_t>::min(); - for (int32_t val = minval; val <= maxval; ++val) - { - const float dequantized = input_scale * (val - input_zero_point); - float transformed = 0.f; - if (op_type == ElementwiseActivationType::kTanh) - { - transformed = std::tanh(dequantized); - } - else if (op_type == ElementwiseActivationType::kLogistic) - { - transformed = 1.0f / (1.0f + std::exp(-dequantized)); - } - else - { - throw std::runtime_error("ElementwiseActivationLayer : unsupported activation type"); - } - const float rescaled = std::round(transformed * inverse_scale); - const int32_t quantized = static_cast<int32_t>(rescaled + output_zero_point); - _table[val] = static_cast<uint8_t>(std::max(std::min(maxval, quantized), minval)); - } -} - -void ElementwiseActivationLayer::EvalUsingLookupTable(const IPortableTensor *input, - IPortableTensor *output) -{ - const int size = MatchingFlatSize(getTensorShape(input), getTensorShape(output)); - const uint8_t *input_data = reinterpret_cast<const uint8_t *>(input->buffer()); - uint8_t *output_data = reinterpret_cast<uint8_t *>(output->buffer()); - - for (int i = 0; i < size; ++i) - { - output_data[i] = _table[input_data[i]]; - } -} - -void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortableTensor *output, - float alpha, float beta, - ElementwiseActivationType op_type) -{ - _input = input; - _output = output; - - switch (op_type) - { - case ElementwiseActivationType::kLogistic: - if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - PopulateLookupTable(op_type); - _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this, - std::placeholders::_1, std::placeholders::_2); - } - else if (_input->data_type() == OperandType::FLOAT32) - { - _kernel = [](const IPortableTensor *input, IPortableTensor *output) { - nnfw::cker::Logistic(getTensorShape(input), - reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); - }; - } - else - { - throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"}; - } - break; - case ElementwiseActivationType::kReLU: - if (_input->data_type() == OperandType::FLOAT32) - { - if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f) - { - _kernel = [](const IPortableTensor *input, IPortableTensor *output) { - nnfw::cker::ReLU(getTensorShape(input), - reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); - }; - } - else if (alpha == 6.f && beta == 0.f) - { - _kernel = [](const IPortableTensor *input, IPortableTensor *output) { - nnfw::cker::ReLU6(getTensorShape(input), - reinterpret_cast<const float *>(input->buffer()), - reinterpret_cast<float *>(output->buffer())); - }; - } - else - { - throw std::runtime_error( - "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)"); - } - } - else - { - throw std::runtime_error{"ElementwiseActivationLayer(ReLU): unsupported data type"}; - } - break; - case ElementwiseActivationType::kTanh: - if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - PopulateLookupTable(op_type); - _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this, - std::placeholders::_1, std::placeholders::_2); - } - else if (_input->data_type() == OperandType::FLOAT32) - { - _kernel = [](const IPortableTensor *input, IPortableTensor *output) { - nnfw::cker::Tanh(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); - }; - } - else - { - throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"}; - } - break; - default: - throw std::runtime_error("ElementwiseActivationLayer: unsupported op type"); - } -} - -void ElementwiseActivationLayer::run() { _kernel(_input, _output); } - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.h b/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.h deleted file mode 100644 index 3ef580041..000000000 --- a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_ElementwiseActivationLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_ElementwiseActivationLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -enum class ElementwiseActivationType -{ - kLogistic, - kReLU, - kTanh -}; - -class ElementwiseActivationLayer : public ::onert::exec::IFunction -{ -public: - ElementwiseActivationLayer(); - -public: - void configure(const IPortableTensor *input, IPortableTensor *output, float alpha, float beta, - const ElementwiseActivationType op_type); - - void run() override; - - void PopulateLookupTable(const ElementwiseActivationType op_type); - - void EvalUsingLookupTable(const IPortableTensor *input, IPortableTensor *output); - -private: - const IPortableTensor *_input; - IPortableTensor *_output; - uint8_t _table[256]; - std::function<void(const IPortableTensor *input, IPortableTensor *output)> _kernel; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_ElementwiseActivationLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc b/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc deleted file mode 100644 index ea3c1e7cd..000000000 --- a/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ElementwiseBinaryLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/LogicalOr.h> -#include <cker/operation/MaxMin.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -namespace -{ -template <typename T> -void logicalOrGeneric(const IPortableTensor *lhs, const IPortableTensor *rhs, - IPortableTensor *output) -{ - if (!HaveSameShapes(lhs, rhs)) - { - nnfw::cker::LogicalOrBroadcast<T>( - getTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()), getTensorShape(rhs), - reinterpret_cast<const T *>(rhs->buffer()), getTensorShape(output), - reinterpret_cast<T *>(output->buffer())); - } - else - { - nnfw::cker::LogicalOrElementwise<T>( - getTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()), - reinterpret_cast<const T *>(rhs->buffer()), reinterpret_cast<T *>(output->buffer())); - } -} - -template <typename T> -void maximumGeneric(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output) -{ - nnfw::cker::Max<T>(getTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()), - getTensorShape(rhs), reinterpret_cast<const T *>(rhs->buffer()), - getTensorShape(output), reinterpret_cast<T *>(output->buffer())); -} - -template <typename T> -void minimumGeneric(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output) -{ - nnfw::cker::Min<T>(getTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()), - getTensorShape(rhs), reinterpret_cast<const T *>(rhs->buffer()), - getTensorShape(output), reinterpret_cast<T *>(output->buffer())); -} - -bool haveSameQauntInfo(const IPortableTensor *lhs, const IPortableTensor *rhs, - const IPortableTensor *output) -{ - return (lhs->data_scale() == rhs->data_scale() && lhs->data_scale() == output->data_scale()) && - (lhs->data_offset() == rhs->data_offset() && lhs->data_offset() == output->data_offset()); -} -} // namespace - -void ElementwiseBinaryLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs, - IPortableTensor *output, const ElementwiseBinaryType op_type) -{ - assert(lhs != nullptr); - assert(rhs != nullptr); - assert(output != nullptr); - - _lhs = lhs; - _rhs = rhs; - _output = output; - - switch (op_type) - { - case ElementwiseBinaryType::kLogicalOr: - if ((_lhs->data_type() == OperandType::BOOL8) && (_rhs->data_type() == OperandType::BOOL8)) - { - _kernel = logicalOrGeneric<bool>; - } - else - { - throw std::runtime_error{"LogicalOr: Unsupported data type"}; - } - break; - case ElementwiseBinaryType::kMax: - if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - if (!haveSameQauntInfo(_lhs, _rhs, _output)) - { - throw std::runtime_error("Max NYI for quantized"); - } - _kernel = maximumGeneric<uint8_t>; - } - else if (_lhs->data_type() == OperandType::FLOAT32) - { - _kernel = maximumGeneric<float>; - } - else - { - throw std::runtime_error{"Max: unsupported data type"}; - } - break; - case ElementwiseBinaryType::kMin: - if (_lhs->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - if (!haveSameQauntInfo(_lhs, _rhs, _output)) - { - throw std::runtime_error("Min NYI for quantized"); - } - _kernel = minimumGeneric<uint8_t>; - } - else if (_lhs->data_type() == OperandType::INT32) - { - _kernel = minimumGeneric<int32_t>; - } - else if (_lhs->data_type() == OperandType::FLOAT32) - { - _kernel = minimumGeneric<float>; - } - else - { - throw std::runtime_error{"Min: unsupported data type"}; - } - break; - default: - throw std::runtime_error{"ElementwiseBinary: Unsupported ElementwiseBinary type"}; - } -} - -void ElementwiseBinaryLayer::run() { _kernel(_lhs, _rhs, _output); } - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.h b/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.h deleted file mode 100644 index 052747a4c..000000000 --- a/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_ELEMENTWISEBINARYLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_ELEMENTWISEBINARYLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -enum class ElementwiseBinaryType -{ - kLogicalAnd, - kLogicalOr, - kMax, - kMin, -}; - -class ElementwiseBinaryLayer : public ::onert::exec::IFunction -{ -public: - ElementwiseBinaryLayer() : _lhs(nullptr), _rhs(nullptr), _output(nullptr) - { - // DO NOTHING - } - -public: - void configure(const IPortableTensor *lhs, const IPortableTensor *rhs, IPortableTensor *output, - const ElementwiseBinaryType op_type); - - void run() override; - -private: - const IPortableTensor *_lhs; - const IPortableTensor *_rhs; - IPortableTensor *_output; - std::function<void(const IPortableTensor *, const IPortableTensor *, IPortableTensor *)> _kernel; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_ELEMENTWISEBINARYLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc b/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc deleted file mode 100644 index f8f89ab15..000000000 --- a/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc +++ /dev/null @@ -1,336 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ElementwiseUnaryLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Elementwise.h> -#include <cker/operation/Erf.h> -#include <cker/operation/Exp.h> -#include <cker/operation/LogicalNot.h> -#include <cker/operation/Quantize.h> -#include <cker/operation/Round.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -namespace -{ -void absFloat32(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::Abs(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); -} - -template <typename FromT> -void castPtr(const FromT *in, DataPtr out, int num_elements, ir::DataType data_type_out) -{ - switch (data_type_out) - { - case ir::DataType::FLOAT32: - std::transform(in, in + num_elements, out.f, [](FromT a) { return static_cast<float>(a); }); - return; - case ir::DataType::INT32: - std::transform(in, in + num_elements, out.i32, - [](FromT a) { return static_cast<int32_t>(a); }); - return; - case ir::DataType::UINT32: - std::transform(in, in + num_elements, out.u32, - [](FromT a) { return static_cast<uint32_t>(a); }); - return; - case ir::DataType::UINT8: - std::transform(in, in + num_elements, out.u8, - [](FromT a) { return static_cast<uint8_t>(a); }); - return; - case ir::DataType::BOOL8: - std::transform(in, in + num_elements, out.b, [](FromT a) { return static_cast<bool>(a); }); - return; - case ir::DataType::INT64: - std::transform(in, in + num_elements, out.i64, - [](FromT a) { return static_cast<int64_t>(a); }); - return; - default: - throw std::runtime_error("Cast: Not supported output type" + - std::to_string((int)data_type_out)); - } -} - -void cast(const IPortableTensor *input, IPortableTensor *output) -{ - auto input_buf = input->buffer(); - auto output_buf = output->buffer(); - const auto in = *reinterpret_cast<const DataPtr *>(&input_buf); - auto out = *reinterpret_cast<DataPtr *>(&output_buf); - - auto input_shape = getTensorShape(input); - auto output_shape = getTensorShape(output); - const auto num_elements = MatchingFlatSize(input_shape, output_shape); - - switch (input->data_type()) - { - case ir::DataType::FLOAT32: - castPtr(in.f, out, num_elements, output->data_type()); - return; - case ir::DataType::INT32: - castPtr(in.i32, out, num_elements, output->data_type()); - return; - case ir::DataType::UINT32: - castPtr(in.u32, out, num_elements, output->data_type()); - return; - case ir::DataType::UINT8: - castPtr(in.u8, out, num_elements, output->data_type()); - return; - case ir::DataType::BOOL8: - castPtr(in.b, out, num_elements, output->data_type()); - return; - case ir::DataType::INT64: - castPtr(in.i64, out, num_elements, output->data_type()); - return; - default: - throw std::runtime_error("Cast: unsupported data type" + - std::to_string((int)input->data_type())); - } -} - -void cosFloat32(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::Cos(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); -} - -void expFloat32(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::Exp(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); -} - -void erfFloat32(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::Erf(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); -} - -void logFloat32(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::Log(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); -} - -void logicalNot(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::LogicalNot(getTensorShape(input), reinterpret_cast<const bool *>(input->buffer()), - getTensorShape(output), reinterpret_cast<bool *>(output->buffer())); -} - -void negFloat32(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::Neg(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); -} - -template <typename InputT, typename OutputT> -void affineQuantize(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::Quantize(getTensorShape(input), reinterpret_cast<const InputT *>(input->buffer()), - getTensorShape(output), reinterpret_cast<OutputT *>(output->buffer()), - output->data_scale(), output->data_offset()); -} - -void roundFloat32(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::Round(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); -} - -void rsqrtFloat32(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::Rsqrt(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); -} - -void sinFloat32(const IPortableTensor *input, IPortableTensor *output) -{ - nnfw::cker::Sin(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()), - getTensorShape(output), reinterpret_cast<float *>(output->buffer())); -} - -template <typename T> void zerosLikeFloat32(const IPortableTensor *input, IPortableTensor *output) -{ - if (!HaveSameShapes(input, output)) - throw std::runtime_error{"ZerosLike: input and output shape don't match."}; - - auto element_size = getTensorShape(input).FlatSize(); - - memset(reinterpret_cast<T *>(output->buffer()), 0, element_size * sizeof(T)); -} -} // namespace - -void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTensor *output, - const ElementwiseUnaryType op_type) -{ - assert(input != nullptr); - assert(output != nullptr); - - _input = input; - _output = output; - - switch (op_type) - { - case ElementwiseUnaryType::kAbs: - if ((input->data_type() == OperandType::FLOAT32)) - { - _kernel = absFloat32; - } - else - { - throw std::runtime_error{"Abs: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kCast: - _kernel = cast; - break; - case ElementwiseUnaryType::kCos: - if ((input->data_type() == OperandType::FLOAT32)) - { - _kernel = cosFloat32; - } - else - { - throw std::runtime_error{"Cos: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kExp: - if ((input->data_type() == OperandType::FLOAT32)) - { - _kernel = expFloat32; - } - else - { - throw std::runtime_error{"Exp: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kErf: - if ((input->data_type() == OperandType::FLOAT32)) - { - _kernel = erfFloat32; - } - else - { - throw std::runtime_error{"Exp: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kLog: - if ((input->data_type() == OperandType::FLOAT32)) - { - _kernel = logFloat32; - } - else - { - throw std::runtime_error{"Log: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kLogicalNot: - if ((input->data_type() == OperandType::BOOL8)) - { - _kernel = logicalNot; - } - else - { - throw std::runtime_error{"LogicalNot: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kNeg: - if ((input->data_type() == OperandType::FLOAT32)) - { - _kernel = negFloat32; - } - else - { - throw std::runtime_error{"Neg: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kQuantize: - if ((input->data_type() == OperandType::FLOAT32)) - { - _kernel = affineQuantize<float, uint8_t>; - } - else - { - throw std::runtime_error{"Quantize: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kRound: - if ((input->data_type() == OperandType::FLOAT32)) - { - _kernel = roundFloat32; - } - else - { - throw std::runtime_error{"Round: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kRSqrt: - if ((input->data_type() == OperandType::FLOAT32)) - { - _kernel = rsqrtFloat32; - } - else - { - throw std::runtime_error{"RSqrt: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kSin: - if ((input->data_type() == OperandType::FLOAT32)) - { - _kernel = sinFloat32; - } - else - { - throw std::runtime_error{"Sin: Unsupported data type"}; - } - break; - case ElementwiseUnaryType::kZerosLike: - if (input->data_type() == OperandType::FLOAT32) - { - _kernel = zerosLikeFloat32<float>; - } - else if (input->data_type() == OperandType::INT32) - { - _kernel = zerosLikeFloat32<int32_t>; - } - else - { - throw std::runtime_error{"ZerosLike: Unsupported data type"}; - } - break; - default: - throw std::runtime_error{"ElementwiseBinary: Unsupported ElementwiseBinary type"}; - } -} - -void ElementwiseUnaryLayer::run() { _kernel(_input, _output); } - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.h b/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.h deleted file mode 100644 index 74968386d..000000000 --- a/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_ELEMENTWISEUNARYLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_ELEMENTWISEUNARYLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -enum class ElementwiseUnaryType -{ - kAbs, - kCast, - kCos, - kErf, - kExp, - kLog, - kLogicalNot, - kNeg, - kQuantize, - kRound, - kRSqrt, - kSin, - kZerosLike -}; - -class ElementwiseUnaryLayer : public ::onert::exec::IFunction -{ -public: - ElementwiseUnaryLayer() : _input(nullptr), _output(nullptr), _kernel() - { - // DO NOTHING - } - -public: - void configure(const IPortableTensor *input, IPortableTensor *output, - const ElementwiseUnaryType op_type); - - void run() override; - -private: - const IPortableTensor *_input; - IPortableTensor *_output; - std::function<void(const IPortableTensor *, IPortableTensor *)> _kernel; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_ELEMENTWISEUNARYLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ExpandDimsLayer.cc b/runtime/onert/backend/cpu/ops/ExpandDimsLayer.cc deleted file mode 100644 index b545e6743..000000000 --- a/runtime/onert/backend/cpu/ops/ExpandDimsLayer.cc +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ExpandDimsLayer.h" - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -ExpandDimsLayer::ExpandDimsLayer() : _input(nullptr), _axis(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void ExpandDimsLayer::configure(const IPortableTensor *input, const IPortableTensor *axis, - IPortableTensor *output) -{ - _input = input; - _axis = axis; - _output = output; -} - -void ExpandDimsLayer::run() -{ - // TODO use _axis to calculate shape of output when _axis is not constant - size_t count = _input->total_size(); - memcpy(_output->buffer(), _input->buffer(), count); -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ExpandDimsLayer.h b/runtime/onert/backend/cpu/ops/ExpandDimsLayer.h deleted file mode 100644 index b5d4938b5..000000000 --- a/runtime/onert/backend/cpu/ops/ExpandDimsLayer.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ExpandDi__ONERT_BACKEND_CPU_OPS_EXPANDDIMS_LAYER_H__ms -#define ExpandDi__ONERT_BACKEND_CPU_OPS_EXPANDDIMS_LAYER_H__ms - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class ExpandDimsLayer : public ::onert::exec::IFunction -{ -public: - ExpandDimsLayer(); - -public: - void configure(const IPortableTensor *input, const IPortableTensor *axis, - IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_axis; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // ExpandDi__ONERT_BACKEND_CPU_OPS_EXPANDDIMS_LAYER_H__ms diff --git a/runtime/onert/backend/cpu/ops/FillLayer.cc b/runtime/onert/backend/cpu/ops/FillLayer.cc deleted file mode 100644 index 0a95ab005..000000000 --- a/runtime/onert/backend/cpu/ops/FillLayer.cc +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "FillLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Fill.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -FillLayer::FillLayer() : _input(nullptr), _value(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void FillLayer::configure(const IPortableTensor *input, const IPortableTensor *value, - IPortableTensor *output) -{ - _input = input; - _value = value; - _output = output; -} - -void FillLayer::run() -{ - switch (_output->data_type()) - { - case OperandType::FLOAT32: - nnfw::cker::Fill<float *>(getTensorShape(_input), reinterpret_cast<int *>(_input->buffer()), - reinterpret_cast<float *>(_value->buffer()), - getTensorShape(_output), - reinterpret_cast<float *>(_output->buffer())); - break; - case OperandType::INT32: - nnfw::cker::Fill<int32_t *>(getTensorShape(_input), reinterpret_cast<int *>(_input->buffer()), - reinterpret_cast<int32_t *>(_value->buffer()), - getTensorShape(_output), - reinterpret_cast<int32_t *>(_output->buffer())); - break; - case OperandType::UINT32: - nnfw::cker::Fill<uint32_t *>( - getTensorShape(_input), reinterpret_cast<int *>(_input->buffer()), - reinterpret_cast<uint32_t *>(_value->buffer()), getTensorShape(_output), - reinterpret_cast<uint32_t *>(_output->buffer())); - break; - default: - throw std::runtime_error{"Fill: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/FillLayer.h b/runtime/onert/backend/cpu/ops/FillLayer.h deleted file mode 100644 index 1f17d6b68..000000000 --- a/runtime/onert/backend/cpu/ops/FillLayer.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_FILLLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_FILLLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class FillLayer : public ::onert::exec::IFunction -{ -public: - FillLayer(); - - void configure(const IPortableTensor *input, const IPortableTensor *value, - IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_value; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_FILLLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc b/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc deleted file mode 100644 index f873a3430..000000000 --- a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "FullyConnectedLayer.h" - -#include "../Tensor.h" -#include <cker/operation/FullyConnected.h> -#include <cker/TensorUtils.h> -#include <misc/polymorphic_downcast.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -FullyConnectedLayer::FullyConnectedLayer() - : _input(nullptr), _weights(nullptr), _bias(nullptr), _output(nullptr), - _activation(ir::Activation::NONE), _temp_arena(new nnfw::cker::FCTempArena()), - _external_context(nullptr), _is_hybrid(false) -{ - // DO NOTHING -} - -FullyConnectedLayer::~FullyConnectedLayer() = default; - -void FullyConnectedLayer::fullyConnectedFloat32() -{ - float output_activation_min = 0, output_activation_max = 0; - CalculateActivationRange(_activation, &output_activation_min, &output_activation_max); - - nnfw::cker::FullyConnectedParams op_params; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - op_params.activation = convertActivationType(_activation); - - nnfw::cker::FullyConnected( - op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_weights), reinterpret_cast<const float *>(_weights->buffer()), - getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); -} - -// executionMutex is used to protect concurrent access of non-threadsafe resources -// like gemmlowp::GemmContext. -void FullyConnectedLayer::fullyConnectedQuant8() -{ - double real_multiplier = 0.0; - int32_t output_multiplier = 0; - int32_t output_shift = 0; - int32_t output_activation_min = 0; - int32_t output_activation_max = 0; - GetQuantizedConvolutionMultiplier(_input, _weights, _bias, _output, &real_multiplier); - QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift); - CalculateActivationRangeUint8(_activation, _output, &output_activation_min, - &output_activation_max); - - nnfw::cker::FullyConnectedParams op_params; - op_params.input_offset = -_input->data_offset(); - op_params.weights_offset = -_weights->data_offset(); - op_params.output_offset = _output->data_offset(); - op_params.output_multiplier = output_multiplier; - op_params.output_shift = output_shift; - op_params.quantized_activation_min = output_activation_min; - op_params.quantized_activation_max = output_activation_max; - - nnfw::cker::FullyConnected( - op_params, getTensorShape(_input), reinterpret_cast<const uint8_t *>(_input->buffer()), - getTensorShape(_weights), reinterpret_cast<const uint8_t *>(_weights->buffer()), - getTensorShape(_bias), reinterpret_cast<const int32_t *>(_bias ? _bias->buffer() : nullptr), - getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer())); -} - -void FullyConnectedLayer::fullyConnectedHybrid() -{ - nnfw::cker::FCTempArena &temp_arena = *_temp_arena; - if (!temp_arena.prepared) - { - temp_arena.prepare(getTensorShape(_input), getTensorShape(_weights)); - } - - nnfw::cker::FullyConnectedParams op_params; - op_params.activation = convertActivationType(_activation); - op_params.weights_scale = _weights->data_scale(); - -#ifndef USE_RUY_GEMV - nnfw::cker::FullyConnectedHybrid( - op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_weights), reinterpret_cast<const int8_t *>(_weights->buffer()), - getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()), temp_arena, - _external_context->ruy_context()); -#else - nnfw::cker::FullyConnectedHybrid( - op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_weights), - (_cached_weights) ? reinterpret_cast<const int8_t *>(_cached_weights) - : reinterpret_cast<const int8_t *>(_weights->buffer()), - getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()), temp_arena, - _external_context->ruy_context()); - - if (_cached_weights == nullptr || _is_weights_freed) - return; - - // '_cached_weights is not nullptr and _is_weights_freed is false' means - // this weight shape is satisfied with the ruy kernel's prepack cache's condition. - // After entering here, it will not enter again except below the case - input is zero-vector - - // if input's elements are filled with zero, it by-passes(does not enter ruy-kernel path) - // so that handle this case - const int input_size = getTensorShape(_input).FlatSize(); - if (nnfw::cker::IsZeroVector(reinterpret_cast<float *>(_input->buffer()), input_size)) - return; - - auto weight_tensor = nnfw::misc::polymorphic_downcast<const Tensor *>(_weights); - - // This weight tensor could be other ops' const tensor. - // Therefore, below reference should be checked like following - auto tensor = const_cast<Tensor *>(weight_tensor); - if (tensor->buffer() == nullptr) // ref is already 0? - { - _is_weights_freed = true; - return; - } - - tensor->decrease_ref(); - if (tensor->buffer() == nullptr) // ref == 0? - { - _is_weights_freed = true; - } -#endif -} - -void FullyConnectedLayer::fullyConnectedSparseWeight() -{ - float output_activation_min = 0, output_activation_max = 0; - CalculateActivationRange(_activation, &output_activation_min, &output_activation_max); - - nnfw::cker::FullyConnectedParams op_params; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - op_params.activation = convertActivationType(_activation); - - const uint16_t *w1_segments = _weights->sparsity()->w1_segments(); - const uint16_t *w1_indices = _weights->sparsity()->w1_indices(); - - auto block_size = _weights->sparsity()->block_size(); - if (block_size.size() == 0) - { - nnfw::cker::FullyConnectedSparseWeightRandom( - op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_weights), reinterpret_cast<const float *>(_weights->buffer()), - getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()), w1_segments, - w1_indices); - } - else if (block_size.size() == 2 && block_size[0] == 16 && block_size[1] == 1) - { - nnfw::cker::FullyConnectedSparseWeight16x1( - op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_weights), reinterpret_cast<const float *>(_weights->buffer()), - getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()), w1_segments, - w1_indices); - } - else - throw std::runtime_error{"FullyConnected: unsupported sparsity"}; -} - -void FullyConnectedLayer::configure(const IPortableTensor *input, const IPortableTensor *weights, - const IPortableTensor *bias, ir::Activation activation, - IPortableTensor *output, - const std::shared_ptr<ExternalContext> &external_context) -{ - _input = input; - _weights = weights; - _bias = bias; - _activation = activation; - _output = output; - _is_hybrid = input->data_type() == OperandType::FLOAT32 && - weights->data_type() == OperandType::QUANT_INT8_SYMM; - _external_context = external_context; -} - -void FullyConnectedLayer::run() -{ - if (_is_hybrid) - { - fullyConnectedHybrid(); - } - else if (_weights->sparsity()) - { - fullyConnectedSparseWeight(); - } - else if (_input->data_type() == OperandType::FLOAT32) - { - fullyConnectedFloat32(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - fullyConnectedQuant8(); - } - else - { - throw std::runtime_error{"FullyConnected: unsupported data type"}; - } -} - -void FullyConnectedLayer::prepare() -{ - if (_bias && _bias->is_constant()) - { - const int bias_size = getTensorShape(_bias).FlatSize(); - if (nnfw::cker::IsZeroVector(reinterpret_cast<float *>(_bias->buffer()), bias_size)) - { - _bias = nullptr; - } - } - -#if (defined(__ARM_NEON__) || defined(__ARM_NEON)) && defined(USE_RUY_GEMV) - // TODO This is workaround - // The only fc hybrid will use ruy kernel - if (_input->data_type() != OperandType::FLOAT32 || - _weights->data_type() != OperandType::QUANT_INT8_SYMM) - { - return; - } - - // NOTE. The condition to enable caching on ruy kernel can be changed according to ruy's version - - // If input is dynamic, it changes total size of input - // If weights is not constant, weights cannot be cached - if (_input->is_dynamic() || !_weights->is_constant()) - return; - - const int rows = getTensorShape(_weights).Dims(0); - if (rows % 4 == 0) - { - // TODO If it's possible to extract precaching from ruy kernel, - // place this instead of below code - - // buffer will be used by ruy kernel as a cache key - _cached_weights = _weights->buffer(); - } -#endif -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.h b/runtime/onert/backend/cpu/ops/FullyConnectedLayer.h deleted file mode 100644 index f1242677c..000000000 --- a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_FULLYCONNECTEDLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_FULLYCONNECTEDLAYER_H__ - -#include <backend/IPortableTensor.h> -#include "../ExternalContext.h" -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace nnfw -{ -namespace cker -{ -class FCTempArena; -} -} // namespace nnfw - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class FullyConnectedLayer : public ::onert::exec::IFunction -{ -public: - FullyConnectedLayer(); - ~FullyConnectedLayer(); - -public: - void fullyConnectedFloat32(); - - void fullyConnectedQuant8(); - - void fullyConnectedHybrid(); - - void fullyConnectedSparseWeight(); - - void configure(const IPortableTensor *input, const IPortableTensor *weights, - const IPortableTensor *bias, ir::Activation activation, IPortableTensor *output, - const std::shared_ptr<ExternalContext> &external_context); - - void run() override; - - void prepare() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_weights; - const IPortableTensor *_bias; - IPortableTensor *_output; - - ir::Activation _activation; - std::unique_ptr<nnfw::cker::FCTempArena> _temp_arena; - - std::shared_ptr<ExternalContext> _external_context; - - bool _is_hybrid; - -#ifdef USE_RUY_GEMV - uint8_t *_cached_weights = nullptr; // weights to be cached and a key - bool _is_weights_freed = false; // is weights freed? -#endif -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_FULLYCONNECTEDLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc b/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc deleted file mode 100644 index c2c592db7..000000000 --- a/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "FusedBatchNormLayer.h" - -#include <cker/operation/FusedBatchNorm.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -FusedBatchNormLayer::FusedBatchNormLayer() - : _inputs(), _output(nullptr), _epsilon(0), _is_training(true), - _fusedbatchnorm_kernel(new nnfw::cker::FusedBatchNorm()) -{ - // DO NOTHING -} - -FusedBatchNormLayer::~FusedBatchNormLayer() = default; - -void FusedBatchNormLayer::fusedbatchnormFloat32() -{ - uint32_t num_inputs = _inputs.size(); - nnfw::cker::FusedBatchNorm &kernel = *_fusedbatchnorm_kernel; - - kernel.prepare(); - - std::vector<nnfw::cker::Shape> inputShapes; - std::vector<const float *> inputFloatPtrs; - - for (uint32_t i = 0; i < num_inputs; i++) - { - inputShapes.emplace_back(getTensorShape(_inputs[i])); - inputFloatPtrs.emplace_back(reinterpret_cast<const float *>(_inputs[i]->buffer())); - } - - nnfw::cker::FusedBatchNormParams param; - - param.epsilon = _epsilon; - param.is_training = _is_training; - param.data_format = _data_format; - - kernel(inputShapes, inputFloatPtrs, getTensorShape(_output), - reinterpret_cast<float *>(_output->buffer()), param); -} - -void FusedBatchNormLayer::run() -{ - if (_output->data_type() == OperandType::FLOAT32) - { - fusedbatchnormFloat32(); - } - else - { - throw std::runtime_error{"FusedBatchNorm: unsupported data type"}; - } -} - -void FusedBatchNormLayer::configure(const std::vector<const IPortableTensor *> &inputs, - float epsilon, bool is_training, std::string data_format, - IPortableTensor *output) -{ - assert(inputs.size() > 0); - assert(output != nullptr); - - _inputs = inputs; - _output = output; - _epsilon = epsilon; - _is_training = is_training; - _data_format = data_format; -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.h b/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.h deleted file mode 100644 index d42b0c900..000000000 --- a/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_FUSEDBATCHNORM_LAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_FUSEDBATCHNORM_LAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> -#include <functional> -#include <memory> - -namespace nnfw -{ -namespace cker -{ -class FusedBatchNorm; -} -} // namespace nnfw - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class FusedBatchNormLayer : public ::onert::exec::IFunction -{ -public: - FusedBatchNormLayer(); - ~FusedBatchNormLayer(); - -public: - void fusedbatchnormFloat32(); - - void configure(const std::vector<const IPortableTensor *> &inputs, float epsilon, - bool is_training, std::string data_format, IPortableTensor *output); - - void run() override; - -private: - std::vector<const IPortableTensor *> _inputs; - IPortableTensor *_output; - float _epsilon; - bool _is_training; - std::string _data_format; - - std::unique_ptr<nnfw::cker::FusedBatchNorm> _fusedbatchnorm_kernel; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_FUSEDBATCHNORM_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/GatherLayer.cc b/runtime/onert/backend/cpu/ops/GatherLayer.cc deleted file mode 100644 index 641daa972..000000000 --- a/runtime/onert/backend/cpu/ops/GatherLayer.cc +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "GatherLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Gather.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -void GatherLayer::configure(const IPortableTensor *input, const IPortableTensor *indices, - IPortableTensor *output, int32_t axis) -{ - _input = input; - _indices = indices; - _axis = axis; - _output = output; -} - -template <typename InputType> void GatherLayer::runByInputType() -{ - using OutputType = InputType; - nnfw::cker::GatherParams op_params; - op_params.axis = _axis; - - switch (_indices->data_type()) - { - case OperandType::INT32: - { - using IndicesType = int32_t; - - nnfw::cker::Gather<InputType, IndicesType>( - op_params, getTensorShape(_input), reinterpret_cast<const InputType *>(_input->buffer()), - getTensorShape(_indices), reinterpret_cast<const IndicesType *>(_indices->buffer()), - getTensorShape(_output), reinterpret_cast<OutputType *>(_output->buffer())); - break; - } - case OperandType::INT64: - { - using IndicesType = int64_t; - - nnfw::cker::Gather<InputType, IndicesType>( - op_params, getTensorShape(_input), reinterpret_cast<const InputType *>(_input->buffer()), - getTensorShape(_indices), reinterpret_cast<const IndicesType *>(_indices->buffer()), - getTensorShape(_output), reinterpret_cast<OutputType *>(_output->buffer())); - break; - } - default: - throw std::runtime_error("Gather: unsupported indices data type"); - } -} - -void GatherLayer::run() -{ - switch (_input->data_type()) - { - case OperandType::FLOAT32: - runByInputType<float>(); - break; - case OperandType::QUANT_UINT8_ASYMM: - runByInputType<uint8_t>(); - break; - case OperandType::INT32: - runByInputType<int32_t>(); - break; - default: - throw std::runtime_error("Gather: unsupported input data type"); - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/GatherLayer.h b/runtime/onert/backend/cpu/ops/GatherLayer.h deleted file mode 100644 index 8fe80cc2b..000000000 --- a/runtime/onert/backend/cpu/ops/GatherLayer.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_GATHERLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_GATHERLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class GatherLayer : public ::onert::exec::IFunction -{ -public: - GatherLayer() : _input{nullptr}, _indices{nullptr}, _output{nullptr}, _axis{-1} - { - // DO NOTHING - } - -public: - void configure(const IPortableTensor *input, const IPortableTensor *indices, - IPortableTensor *output, int32_t axis); - - void run() override; - -private: - template <typename OpType> void runByInputType(); - -private: - const IPortableTensor *_input; - const IPortableTensor *_indices; - IPortableTensor *_output; - - int32_t _axis; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_GATHERLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/L2NormLayer.cc b/runtime/onert/backend/cpu/ops/L2NormLayer.cc deleted file mode 100644 index 0d99b0586..000000000 --- a/runtime/onert/backend/cpu/ops/L2NormLayer.cc +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "L2NormLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/L2Normalize.h> -#include <cker/Types.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -void L2NormLayer::configure(const IPortableTensor *input, IPortableTensor *output) -{ - assert(input != nullptr); - assert(output != nullptr); - - _input = input; - _output = output; -} - -void L2NormLayer::run() -{ - switch (_input->data_type()) - { - case OperandType::FLOAT32: - nnfw::cker::L2NormalizeFloat32( - getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); - break; - - case OperandType::QUANT_UINT8_ASYMM: - { - nnfw::cker::L2NormParams params; - assert(_input->data_offset() == 128); - params.input_zero_point = _input->data_offset(); - nnfw::cker::L2NormalizeQuant8( - params, getTensorShape(_input), reinterpret_cast<const uint8_t *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer())); - } - break; - - default: - throw std::runtime_error{"L2Norm: Unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/L2NormLayer.h b/runtime/onert/backend/cpu/ops/L2NormLayer.h deleted file mode 100644 index 63f2d1133..000000000 --- a/runtime/onert/backend/cpu/ops/L2NormLayer.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_L2NORM_LAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_L2NORM_LAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ -class L2NormLayer : public ::onert::exec::IFunction -{ -public: - L2NormLayer() : _input(nullptr), _output(nullptr) - { - // Nothing - } - -public: - void configure(const IPortableTensor *_input, IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_L2NORM_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc b/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc deleted file mode 100644 index 1d7ee6caa..000000000 --- a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.cc +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "LogSoftMaxLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/LogSoftMax.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -LogSoftMaxLayer::LogSoftMaxLayer() : _input(nullptr), _output(nullptr), _beta(0.0), _axis(0) -{ - // DO NOTHING -} - -void LogSoftMaxLayer::PopulateLookupTable(const float kBeta) -{ - const float scale = -_input->data_scale() * kBeta; - const int32_t max_uint8 = std::numeric_limits<uint8_t>::max(); - for (int32_t val = 0; val <= max_uint8; ++val) - { - _table[max_uint8 - val] = expf(scale * val); - } -} - -void LogSoftMaxLayer::logsoftmaxFloat32() -{ - nnfw::cker::SoftmaxParams op_params; - op_params.beta = _beta; - op_params.axis = _axis; - nnfw::cker::LogSoftmax(op_params, getTensorShape(_input), - reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_output), - reinterpret_cast<float *>(_output->buffer())); -} - -void LogSoftMaxLayer::logsoftmaxQuant8() -{ - nnfw::cker::SoftmaxParams op_params; - op_params.beta = _beta; - op_params.axis = _axis; - op_params.table = _table; - op_params.zero_point = _output->data_offset(); - op_params.scale = _output->data_scale(); - nnfw::cker::LogSoftmax(op_params, _input->data_scale(), getTensorShape(_input), - reinterpret_cast<const uint8_t *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer())); -} - -void LogSoftMaxLayer::configure(const IPortableTensor *input, const float beta, const int axis, - IPortableTensor *output) -{ - _input = input; - _output = output; - _beta = beta; - _axis = axis; - if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - PopulateLookupTable(_beta); - } -} - -void LogSoftMaxLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - logsoftmaxFloat32(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - logsoftmaxQuant8(); - } - else - { - throw std::runtime_error{"LogSoftmax : unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h b/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h deleted file mode 100644 index 1533f3361..000000000 --- a/runtime/onert/backend/cpu/ops/LogSoftMaxLayer.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_LOGSOFTMAXLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_LOGSOFTMAXLAYER_H__ - -#include "../Tensor.h" - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class LogSoftMaxLayer : public ::onert::exec::IFunction -{ -public: - LogSoftMaxLayer(); - -public: - void logsoftmaxFloat32(); - - void logsoftmaxQuant8(); - - void configure(const IPortableTensor *input, const float beta, const int axis, - IPortableTensor *output); - - void run(); - - void PopulateLookupTable(const float kBeta); - -private: - const IPortableTensor *_input; - IPortableTensor *_output; - - float _beta; - int _axis; - float _table[256]; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_LOGSOFTMAXLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.cc b/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.cc deleted file mode 100644 index b770cce5d..000000000 --- a/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.cc +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "MatrixBandPartLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/MatrixBandPart.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -MatrixBandPartLayer::MatrixBandPartLayer() - : _input(nullptr), _num_lower_diag(nullptr), _num_upper_diag(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void MatrixBandPartLayer::matrixBandPartFloat32() -{ - if (_num_lower_diag->data_type() == OperandType::INT64) - { - nnfw::cker::MatrixBandPart<int64_t>( - *reinterpret_cast<const int64_t *>(_num_lower_diag->buffer()), - *reinterpret_cast<const int64_t *>(_num_upper_diag->buffer()), getTensorShape(_input), - reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_output), - reinterpret_cast<float *>(_output->buffer())); - } - else - { - nnfw::cker::MatrixBandPart<int32_t>( - *reinterpret_cast<const int32_t *>(_num_lower_diag->buffer()), - *reinterpret_cast<const int32_t *>(_num_upper_diag->buffer()), getTensorShape(_input), - reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_output), - reinterpret_cast<float *>(_output->buffer())); - } -} - -void MatrixBandPartLayer::matrixBandPartQuant8() { throw std::runtime_error{"NYI"}; } - -void MatrixBandPartLayer::configure(const IPortableTensor *input, - const IPortableTensor *num_lower_diag, - const IPortableTensor *num_upper_diag, IPortableTensor *output) -{ - _input = input; - _num_lower_diag = num_lower_diag; - _num_upper_diag = num_upper_diag; - _output = output; -} - -void MatrixBandPartLayer::run() -{ - if (_num_lower_diag->data_type() != _num_upper_diag->data_type()) - { - throw std::runtime_error{"MatrixBandpart: num_lower and num_upper must have the same type"}; - } - - if (_input->data_type() == OperandType::FLOAT32) - { - matrixBandPartFloat32(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - matrixBandPartQuant8(); - } - else - { - throw std::runtime_error{"MatrixBandpart: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.h b/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.h deleted file mode 100644 index 9dcc6b277..000000000 --- a/runtime/onert/backend/cpu/ops/MatrixBandPartLayer.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_MATRIXBANDPARTLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_MATRIXBANDPARTLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class MatrixBandPartLayer : public ::onert::exec::IFunction -{ -public: - MatrixBandPartLayer(); - -public: - void matrixBandPartFloat32(); - - void matrixBandPartQuant8(); - - void configure(const IPortableTensor *input, const IPortableTensor *num_lower_diag, - const IPortableTensor *num_upper_diag, IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_num_lower_diag; - const IPortableTensor *_num_upper_diag; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_MATRIXBANDPARTLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/MeanLayer.cc b/runtime/onert/backend/cpu/ops/MeanLayer.cc deleted file mode 100644 index 4921ac748..000000000 --- a/runtime/onert/backend/cpu/ops/MeanLayer.cc +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "MeanLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/ReduceMean.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -MeanLayer::MeanLayer() : _input(nullptr), _axes(nullptr), _output(nullptr), _keep_dims(false) -{ - // DO NOTHING -} - -void MeanLayer::MeanFloat32() -{ - nnfw::cker::Mean(getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()), - getReducerAxes(_axes)); -} - -void MeanLayer::MeanQuant8() -{ - nnfw::cker::MeanQ8Asymm(getTensorShape(_input), - reinterpret_cast<const uint8_t *>(_input->buffer()), _input->data_scale(), - _input->data_offset(), getTensorShape(_output), - reinterpret_cast<uint8_t *>(_output->buffer()), _output->data_scale(), - _output->data_offset(), getReducerAxes(_axes)); -} - -void MeanLayer::configure(const IPortableTensor *input, const IPortableTensor *axes, - IPortableTensor *output, bool keep_dims) -{ - _input = input; - _axes = axes; - _output = output; - _keep_dims = keep_dims; -} - -void MeanLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - MeanFloat32(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - MeanQuant8(); - } - else - { - throw std::runtime_error{"Mean: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/MeanLayer.h b/runtime/onert/backend/cpu/ops/MeanLayer.h deleted file mode 100644 index 3e95c1203..000000000 --- a/runtime/onert/backend/cpu/ops/MeanLayer.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_MEANLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_MEANLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class MeanLayer : public ::onert::exec::IFunction -{ -public: - MeanLayer(); - -public: - void MeanFloat32(); - - void MeanQuant8(); - - void configure(const IPortableTensor *input, const IPortableTensor *axes, IPortableTensor *output, - bool keep_dims); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_axes; - IPortableTensor *_output; - bool _keep_dims; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_MEANLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/OneHotLayer.cc b/runtime/onert/backend/cpu/ops/OneHotLayer.cc deleted file mode 100644 index 2a82b00ee..000000000 --- a/runtime/onert/backend/cpu/ops/OneHotLayer.cc +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "OneHotLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/OneHot.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -template <typename T> void OneHotLayer::oneHotImpl() -{ - // It assumes index is int32_t type. - nnfw::cker::OneHot<T, int32_t>( - *reinterpret_cast<const int32_t *>(_depth->buffer()), - *reinterpret_cast<T *>(_on_value->buffer()), *reinterpret_cast<T *>(_off_value->buffer()), - _axis, getTensorShape(_indices), reinterpret_cast<const int32_t *>(_indices->buffer()), - getTensorShape(_output), reinterpret_cast<T *>(_output->buffer())); -} - -void OneHotLayer::configure(const IPortableTensor *indices, const IPortableTensor *depth, - const IPortableTensor *on_value, const IPortableTensor *off_value, - IPortableTensor *output, const int32_t axis) -{ - _indices = indices; - _output = output; - _depth = depth; - _on_value = on_value; - _off_value = off_value; - _axis = axis; -} - -void OneHotLayer::run() -{ - if (_output->data_type() == OperandType::FLOAT32) - { - oneHotImpl<float>(); - } - else - { - throw std::runtime_error{"OneHot: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/OneHotLayer.h b/runtime/onert/backend/cpu/ops/OneHotLayer.h deleted file mode 100644 index c05498440..000000000 --- a/runtime/onert/backend/cpu/ops/OneHotLayer.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_ONEHOTLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_ONEHOTLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class OneHotLayer : public ::onert::exec::IFunction -{ -public: - OneHotLayer() - : _indices(nullptr), _depth(nullptr), _on_value(nullptr), _off_value(nullptr), - _output(nullptr), _axis(-1) - { - // DO NOTHING - } - -public: - template <typename T> void oneHotImpl(); - - void configure(const IPortableTensor *indices, const IPortableTensor *depth, - const IPortableTensor *on_value, const IPortableTensor *off_value, - IPortableTensor *output, int32_t axis); - - void run() override; - -private: - const IPortableTensor *_indices; - const IPortableTensor *_depth; - const IPortableTensor *_on_value; - const IPortableTensor *_off_value; - IPortableTensor *_output; - - int32_t _axis; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_ONEHOTLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/OperationUtils.cc b/runtime/onert/backend/cpu/ops/OperationUtils.cc deleted file mode 100644 index 2eee6dc85..000000000 --- a/runtime/onert/backend/cpu/ops/OperationUtils.cc +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "OperationUtils.h" - -#include <algorithm> -#include <cassert> -#include <cmath> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -uint32_t getNumberOfDimensions(const IPortableTensor *tensor) -{ - assert(tensor); - return tensor->num_dimensions(); -} - -uint32_t getNumberOfElements(const IPortableTensor *tensor) -{ - assert(tensor); - uint32_t count = 1; - for (size_t i = 0; i < tensor->num_dimensions(); i++) - { - count *= tensor->dimension(i); - } - return count; -} - -uint32_t getSizeOfDimension(const IPortableTensor *tensor, uint32_t dimensionIdx) -{ - assert(tensor); - if (dimensionIdx >= tensor->num_dimensions()) - { - // TODO, log the error - return 0; - } - return tensor->dimension(dimensionIdx); -} - -void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift) -{ - if (double_multiplier == 0.) - { - *quantized_multiplier = 0; - *shift = 0; - return; - } - const double q = std::frexp(double_multiplier, shift); - auto q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31))); - - assert(q_fixed <= (1ll << 31)); - if (q_fixed == (1ll << 31)) - { - q_fixed /= 2; - ++*shift; - } - assert(q_fixed <= std::numeric_limits<int32_t>::max()); - *quantized_multiplier = static_cast<int32_t>(q_fixed); -} - -void GetQuantizedConvolutionMultiplier(const IPortableTensor *input, const IPortableTensor *filter, - const IPortableTensor *bias, const IPortableTensor *output, - double *multiplier) -{ - const double input_product_scale = input->data_scale() * filter->data_scale(); - const double bias_scale = (bias != nullptr) ? bias->data_scale() : input_product_scale; - const double output_scale = output->data_scale(); - // The following conditions must be guaranteed by the training pipeline. - UNUSED_RELEASE(bias_scale); - assert(std::abs(input_product_scale - bias_scale) <= - 1e-6 * std::min(input_product_scale, bias_scale)); - assert(input_product_scale >= 0); - assert(input_product_scale < output_scale); - *multiplier = input_product_scale / output_scale; -} - -void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier, - int *left_shift) -{ - assert(double_multiplier > 1.); - const double q = std::frexp(double_multiplier, left_shift); - int64_t q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31))); - assert(q_fixed <= (1ll << 31)); - if (q_fixed == (1ll << 31)) - { - q_fixed /= 2; - ++*left_shift; - } - assert(*left_shift >= 0); - assert(q_fixed <= std::numeric_limits<int32_t>::max()); - *quantized_multiplier = static_cast<int32_t>(q_fixed); -} - -void CalculateActivationRangeUint8(ir::Activation activation, const IPortableTensor *output, - int32_t *act_min, int32_t *act_max) -{ - const int32_t qmin = std::numeric_limits<uint8_t>::min(); - const int32_t qmax = std::numeric_limits<uint8_t>::max(); - const auto scale = output->data_scale(); - const auto zero_point = output->data_offset(); - auto quantize = [scale, zero_point](float f) { - return zero_point + static_cast<int32_t>(std::round(f / scale)); - }; - if (activation == ir::Activation::RELU) - { - *act_min = std::max(qmin, quantize(0.0)); - *act_max = qmax; - } - else if (activation == ir::Activation::RELU6) - { - *act_min = std::max(qmin, quantize(0.0)); - *act_max = std::min(qmax, quantize(6.0)); - } - else if (activation == ir::Activation::RELU1) - { - *act_min = std::max(qmin, quantize(-1.0)); - *act_max = std::min(qmax, quantize(1.0)); - } - else if (activation == ir::Activation::SIGMOID) - { - *act_min = std::max(qmin, quantize(0.0)); - *act_max = std::min(qmax, quantize(1.0)); - } - else if (activation == ir::Activation::NONE) - { - *act_min = qmin; - *act_max = qmax; - } - else - { - std::cout << "Unsupported fused activation function." << std::endl; - } -} - -bool HaveSameShapes(const IPortableTensor *input1, const IPortableTensor *input2) -{ - if (input1 == input2) - return true; - if (input2 == NULL || input2 == NULL) - return false; - - if (input1 == NULL) - { - return (getNumberOfDimensions(input2) == 0); - } - - if (getNumberOfDimensions(input1) != getNumberOfDimensions(input2)) - return false; - - for (uint32_t i = 0; i < getNumberOfDimensions(input1); i++) - if (input1->dimension(i) != input2->dimension(i)) - return false; - - return true; -} - -int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift) -{ - const double max_input_rescaled = 1.0 * ((1 << input_integer_bits) - 1) * - (1ll << (31 - input_integer_bits)) / (1ll << input_left_shift); - // Tighten bound using floor. Suppose that we could use the exact value. - // After scaling the difference, the result would be at the maximum. Thus we - // must ensure that our value has lower magnitude. - return static_cast<int32_t>(std::floor(max_input_rescaled)); -} - -uint32_t sizeOfData(OperandType type, const std::vector<int32_t> &dimensions) -{ - uint32_t size = 4; - - switch (type) - { - case OperandType::FLOAT32: - case OperandType::INT32: - case OperandType::UINT32: - size = 4; - break; - case OperandType::BOOL8: - case OperandType::QUANT_UINT8_ASYMM: - case OperandType::QUANT_INT8_SYMM: - size = 1; - break; - case OperandType::INT64: - size = 8; - break; - default: - throw std::runtime_error("Not supported operand type."); - break; - } - - for (auto d : dimensions) - { - assert(d >= 0); - size *= static_cast<uint32_t>(d); - } - - return size; -} - -nnfw::cker::PaddingType getPaddingType(ir::PaddingType ir_padding_type) -{ - switch (ir_padding_type) - { - case ir::PaddingType::EXPLICIT: - return nnfw::cker::PaddingType::kNone; - case ir::PaddingType::SAME: - return nnfw::cker::PaddingType::kSame; - case ir::PaddingType::VALID: - return nnfw::cker::PaddingType::kValid; - default: - throw std::runtime_error("Wrong padding type."); - break; - } -} - -std::vector<int32_t> getReducerAxes(const IPortableTensor *axes) -{ - std::vector<int32_t> ret; - - assert(axes->layout() == ir::Layout::NHWC); - assert(axes->dimension(0) == axes->getShape().num_elements()); - switch (axes->data_type()) - { - case ir::DataType::INT32: - { - for (size_t i = 0; i < axes->dimension(0); ++i) - ret.emplace_back(*(reinterpret_cast<const int32_t *>(axes->buffer()) + i)); - break; - } - case ir::DataType::INT64: - { - for (size_t i = 0; i < axes->dimension(0); ++i) - ret.emplace_back(*(reinterpret_cast<const int64_t *>(axes->buffer()) + i)); - break; - } - default: - throw std::runtime_error("getReducerAxes: Not supported data type"); - break; - } - return ret; -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/OperationUtils.h b/runtime/onert/backend/cpu/ops/OperationUtils.h deleted file mode 100644 index eb24dd43c..000000000 --- a/runtime/onert/backend/cpu/ops/OperationUtils.h +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__ -#define __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__ - -#include <backend/IPortableTensor.h> - -#include <cker/Shape.h> -#include <cker/Types.h> -#include <iostream> -#include <ir/DataType.h> -#include <ir/InternalType.h> -#include <ir/Operand.h> -#include <ir/Padding.h> - -#include <limits> -#include <vector> - -using OperandType = onert::ir::DataType; - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -union DataPtr { - uint8_t *u8; - int8_t *i8; - uint32_t *u32; - int32_t *i32; - bool *b; - float *f; - int64_t *i64; - void *v; -}; - -union ConstDataPtr { - const uint8_t *u8; - const int8_t *i8; - const uint32_t *u32; - const int32_t *i32; - const bool *b; - const float *f; - const int64_t *i64; - const void *v; -}; - -uint32_t getNumberOfDimensions(const IPortableTensor *tensor); - -uint32_t getNumberOfElements(const IPortableTensor *tensor); - -uint32_t getSizeOfDimension(const IPortableTensor *tensor, uint32_t dimensionIdx); - -inline nnfw::cker::Shape getExtendedTensorShape(const IPortableTensor *tensor) -{ - assert(tensor); - const int32_t extended_rank = 4; - int32_t raw_shape[extended_rank]; - uint32_t src = extended_rank - tensor->num_dimensions(); - for (uint32_t i = 0; i < extended_rank; ++i) - { - if (i < src) - { - raw_shape[i] = 1; - } - else - { - raw_shape[i] = tensor->dimension(i - src); - } - } - - return nnfw::cker::Shape(extended_rank, raw_shape); -} - -inline nnfw::cker::Shape getTensorShape(const IPortableTensor *tensor) -{ - if (tensor == nullptr) - return nnfw::cker::Shape(); - - const ir::Shape &shape = tensor->get_info().shape(); - - assert(tensor->layout() == ir::Layout::NHWC); - - auto rank = shape.rank(); - nnfw::cker::Shape ret(rank); - auto data = ret.DimsData(); - for (int i = 0; i < rank; ++i) - { - data[i] = shape.dim(i); - } - return ret; -} - -inline nnfw::cker::FusedActivationFunctionType -convertActivationType(const ir::Activation activation) -{ - switch (activation) - { - case ir::Activation::NONE: - return nnfw::cker::FusedActivationFunctionType::kNone; - case ir::Activation::RELU: - return nnfw::cker::FusedActivationFunctionType::kRelu; - case ir::Activation::RELU1: - return nnfw::cker::FusedActivationFunctionType::kRelu1; - case ir::Activation::RELU6: - return nnfw::cker::FusedActivationFunctionType::kRelu6; - default: - throw std::runtime_error{"CPU backend: Cannot convert activation type"}; - } -} - -inline int32_t getAxis(uint32_t rank, int32_t axis, ir::Layout frontend_layout) -{ - auto ret = axis; - - if (axis < 0) - { - ret += rank; - } - - // NCHW -> NHWC - if (frontend_layout == ir::Layout::NCHW) - { - int32_t permutation[4] = {0, 3, 1, 2}; - ret = permutation[ret]; - } - - return ret; -} - -void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift); - -void GetQuantizedConvolutionMultiplier(const IPortableTensor *inputDescr, - const IPortableTensor *filterDescr, - const IPortableTensor *biasDescr, - const IPortableTensor *outputDescr, double *multiplier); - -void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier, - int *left_shift); - -template <typename T> -void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max) -{ - if (activation == ir::Activation::RELU) - { - *activation_min = 0; - *activation_max = std::numeric_limits<T>::max(); - } - else if (activation == ir::Activation::RELU6) - { - *activation_min = 0; - *activation_max = 6; - } - else if (activation == ir::Activation::RELU1) - { - *activation_min = -1; - *activation_max = 1; - } - else if (activation == ir::Activation::SIGMOID) - { - *activation_min = 0; - *activation_max = 1; - } - else if (activation == ir::Activation::NONE) - { - *activation_min = std::numeric_limits<T>::lowest(); - *activation_max = std::numeric_limits<T>::max(); - } - else - { - std::cout << "Unsupported fused activation function." << std::endl; - } -} - -void CalculateActivationRangeUint8(ir::Activation activation, const IPortableTensor *output, - int32_t *act_min, int32_t *act_max); - -bool HaveSameShapes(const IPortableTensor *input1, const IPortableTensor *input2); - -int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift); - -uint32_t sizeOfData(OperandType type, const std::vector<int32_t> &dimensions); - -nnfw::cker::PaddingType getPaddingType(ir::PaddingType ir_padding_type); - -std::vector<int32_t> getReducerAxes(const IPortableTensor *axes); - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__ diff --git a/runtime/onert/backend/cpu/ops/PackLayer.cc b/runtime/onert/backend/cpu/ops/PackLayer.cc deleted file mode 100644 index 314b192a2..000000000 --- a/runtime/onert/backend/cpu/ops/PackLayer.cc +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "PackLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Pack.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -PackLayer::PackLayer() : _inputs(), _output(nullptr), _axis(0) -{ - // DO NOTHING -} - -template <typename T> void PackLayer::packImpl() -{ - uint32_t num_inputs = _inputs.size(); - nnfw::cker::PackParams op_params; - op_params.axis = _axis; - op_params.inputs_count = num_inputs; - - std::vector<nnfw::cker::Shape *> inputDimsPtr; - std::vector<nnfw::cker::Shape> inputDims; - inputDimsPtr.reserve(num_inputs); - inputDims.reserve(num_inputs); - - for (uint32_t i = 0; i < num_inputs; i++) - { - inputDims.push_back(getTensorShape(_inputs[i])); - inputDimsPtr.push_back(&inputDims[i]); - } - - std::vector<const T *> inputPtrs; - - for (const auto input : _inputs) - { - inputPtrs.emplace_back(reinterpret_cast<const T *>(input->buffer())); - } - - nnfw::cker::Pack<T>(op_params, inputPtrs.data(), getTensorShape(_output), - reinterpret_cast<T *>(_output->buffer())); -} - -void PackLayer::configure(const std::vector<const IPortableTensor *> &inputs, int32_t axis, - IPortableTensor *output) -{ - assert(inputs.size() > 0); - assert(output != nullptr); - - _inputs = inputs; - _axis = axis; - _output = output; -} - -void PackLayer::run() -{ - if (_output->data_type() == OperandType::FLOAT32) - { - packImpl<float>(); - } - else if (_output->data_type() == OperandType::INT32) - { - packImpl<int32_t>(); - } - else - { - throw std::runtime_error{"Pack: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/PackLayer.h b/runtime/onert/backend/cpu/ops/PackLayer.h deleted file mode 100644 index b92c8d48c..000000000 --- a/runtime/onert/backend/cpu/ops/PackLayer.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_PACKLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_PACKLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class PackLayer : public ::onert::exec::IFunction -{ -public: - PackLayer(); - -public: - template <typename T> void packImpl(); - - void configure(const std::vector<const IPortableTensor *> &inputs, int32_t axis, - IPortableTensor *output); - void run() override; - -private: - std::vector<const IPortableTensor *> _inputs; - IPortableTensor *_output; - int32_t _axis; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_PACKLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/PadLayer.cc b/runtime/onert/backend/cpu/ops/PadLayer.cc deleted file mode 100644 index 6a2bf9da0..000000000 --- a/runtime/onert/backend/cpu/ops/PadLayer.cc +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "PadLayer.h" - -#include <cker/operation/Pad.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -PadLayer::PadLayer() - : _input(nullptr), _output(nullptr), _padData(), _padRank(), _constantValueData() -{ - // DO NOTHING -} - -template <typename T> void PadLayer::padImpl(const T *constant_value_data) -{ - nnfw::cker::Pad<T>(_padData, _padRank, getTensorShape(_input), - reinterpret_cast<const T *>(_input->buffer()), getTensorShape(_output), - reinterpret_cast<T *>(_output->buffer()), constant_value_data); -} - -void PadLayer::configure(const IPortableTensor *input, IPortableTensor *output, - const int32_t *padData, int32_t padRank, const void *constantValueData) -{ - _input = input; - _output = output; - memcpy(_padData, padData, sizeof(_padData)); - _padRank = padRank; - _constantValueData.v = constantValueData; -} - -void PadLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - padImpl<float>(_constantValueData.f); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - if (_constantValueData.u8 == nullptr) - { - uint8_t pad_value = static_cast<uint8_t>(_output->data_offset()); - padImpl<uint8_t>(&pad_value); - } - else - { - padImpl<uint8_t>(_constantValueData.u8); - } - } - else - { - throw std::runtime_error{"Pad: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/PadLayer.h b/runtime/onert/backend/cpu/ops/PadLayer.h deleted file mode 100644 index efd73d5e5..000000000 --- a/runtime/onert/backend/cpu/ops/PadLayer.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_PADLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_PADLAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -// Note, this is pad with mode=`CONSTANT`: it doesn't support `REFLECT` and -// `SYMMETRIC` -class PadLayer : public ::onert::exec::IFunction -{ -public: - PadLayer(); - -public: - template <typename T> void padImpl(const T *constant_value_data); - - void configure(const IPortableTensor *input, IPortableTensor *output, const int32_t *padData, - int32_t padRank, const void *constantValueData = nullptr); - - void run() override; - -private: - const IPortableTensor *_input; - IPortableTensor *_output; - - int32_t _padData[8]; - int32_t _padRank; - ConstDataPtr _constantValueData; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_PADLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/PoolLayer.cc b/runtime/onert/backend/cpu/ops/PoolLayer.cc deleted file mode 100644 index 85d02a751..000000000 --- a/runtime/onert/backend/cpu/ops/PoolLayer.cc +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "PoolLayer.h" - -#include <cker/operation/AveragePool.h> -#include <cker/operation/MaxPool.h> - -#include <unordered_map> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -namespace -{ -template <typename T> -void avgPool2D(const nnfw::cker::PoolParams ¶ms, const IPortableTensor *input, - IPortableTensor *output) -{ - nnfw::cker::AveragePool<T>(params, getTensorShape(input), - reinterpret_cast<const T *>(input->buffer()), getTensorShape(output), - reinterpret_cast<T *>(output->buffer())); -} - -template <typename T> -void maxPool2D(const nnfw::cker::PoolParams ¶ms, const IPortableTensor *input, - IPortableTensor *output) -{ - nnfw::cker::MaxPool<T>(params, getTensorShape(input), - reinterpret_cast<const T *>(input->buffer()), getTensorShape(output), - reinterpret_cast<T *>(output->buffer())); -} - -template <typename T> -std::function<void(const IPortableTensor *, IPortableTensor *)> -generateKernelGeneric(const nnfw::cker::PoolParams ¶ms, PoolType op_type) -{ - if (op_type == PoolType::kAvg) - { - return std::bind(&avgPool2D<T>, params, std::placeholders::_1, std::placeholders::_2); - } - else if (op_type == PoolType::kMax) - { - return std::bind(&maxPool2D<T>, params, std::placeholders::_1, std::placeholders::_2); - } - else - { - throw std::runtime_error{"Pool: unsupported pool type"}; - } -} -} // namespace - -PoolLayer::PoolLayer() : _input(nullptr), _output(nullptr), _kernel() -{ - // DO NOTHING -} - -#define POOLING_PARAMETERS \ - nnfw::cker::PoolParams op_params; \ - op_params.stride_height = strideHeight; \ - op_params.stride_width = strideWidth; \ - op_params.filter_height = kernelHeight; \ - op_params.filter_width = kernelWidth; \ - op_params.padding_values.height = (int8_t)paddingTop; \ - op_params.padding_values.width = (int8_t)paddingLeft; - -void PoolLayer::configure(const IPortableTensor *input, const uint32_t paddingLeft, const uint32_t, - const uint32_t paddingTop, const uint32_t, const uint32_t strideWidth, - const uint32_t strideHeight, const uint32_t kernelWidth, - const uint32_t kernelHeight, const ir::Activation activation, - IPortableTensor *output, const PoolType op_type) -{ - assert(input != nullptr); - assert(output != nullptr); - - _input = input; - _output = output; - - POOLING_PARAMETERS - if (_input->data_type() == OperandType::FLOAT32) - { - float output_activation_min = 0; - float output_activation_max = 0; - CalculateActivationRange<float>(activation, &output_activation_min, &output_activation_max); - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - _kernel = generateKernelGeneric<float>(op_params, op_type); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - int32_t output_activation_min = 0; - int32_t output_activation_max = 0; - CalculateActivationRangeUint8(activation, _output, &output_activation_min, - &output_activation_max); - op_params.quantized_activation_min = output_activation_min; - op_params.quantized_activation_max = output_activation_max; - _kernel = generateKernelGeneric<uint8_t>(op_params, op_type); - } - else - { - throw std::runtime_error{"Pool: unsupported data type"}; - } -} - -void PoolLayer::run() { _kernel(_input, _output); } - -#undef AVGPOOLING_PARAMETERS - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/PoolLayer.h b/runtime/onert/backend/cpu/ops/PoolLayer.h deleted file mode 100644 index b37835946..000000000 --- a/runtime/onert/backend/cpu/ops/PoolLayer.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_POOLLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_POOLLAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -enum class PoolType -{ - kAvg, - kL2, - kMax, -}; - -class PoolLayer : public ::onert::exec::IFunction -{ -public: - PoolLayer(); - -public: - void configure(const IPortableTensor *input, const uint32_t paddingLeft, - const uint32_t paddingRight, const uint32_t paddingTop, - const uint32_t paddingBottom, const uint32_t strideWidth, - const uint32_t strideHeight, const uint32_t kernelWidth, - const uint32_t kernelHeight, const ir::Activation activation, - IPortableTensor *output, const PoolType op_type); - - void run() override; - -private: - const IPortableTensor *_input; - IPortableTensor *_output; - - std::function<void(const IPortableTensor *, IPortableTensor *)> _kernel; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_POOLLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/PowLayer.cc b/runtime/onert/backend/cpu/ops/PowLayer.cc deleted file mode 100644 index 04a1af1e1..000000000 --- a/runtime/onert/backend/cpu/ops/PowLayer.cc +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "PowLayer.h" - -#include <cker/operation/Pow.h> -#include <cker/operation/BinaryArithmeticOps.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -void PowLayer::powFloat32() -{ - float output_activation_min = 0, output_activation_max = 0; - CalculateActivationRange(_activation, &output_activation_min, &output_activation_max); - nnfw::cker::BinaryArithmeticOpParam op_params; - op_params.float_activation_max = output_activation_max; - op_params.float_activation_min = output_activation_min; - - if (!HaveSameShapes(_lhs, _rhs)) - { - nnfw::cker::BroadcastBinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::POW>( - op_params, getTensorShape(_lhs), reinterpret_cast<const float *>(_lhs->buffer()), - getTensorShape(_rhs), reinterpret_cast<const float *>(_rhs->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); - return; - } - - nnfw::cker::powImpl(getTensorShape(_lhs), reinterpret_cast<const float *>(_lhs->buffer()), - getTensorShape(_rhs), reinterpret_cast<const float *>(_rhs->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); -} - -void PowLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs, - ir::Activation activation, IPortableTensor *output) -{ - _lhs = lhs; - _rhs = rhs; - _activation = activation; - _output = output; -} - -void PowLayer::run() -{ - if (_output->data_type() == OperandType::FLOAT32) - powFloat32(); - else - throw std::runtime_error{"Pow: unsupportted data type"}; -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/PowLayer.h b/runtime/onert/backend/cpu/ops/PowLayer.h deleted file mode 100644 index 2689aad17..000000000 --- a/runtime/onert/backend/cpu/ops/PowLayer.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_POWLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_POWLAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class PowLayer : public ::onert::exec::IFunction -{ -public: - PowLayer() : _lhs(nullptr), _rhs(nullptr), _output(nullptr) - { - // DO NOTHING - } - -public: - void powFloat32(); - - void configure(const IPortableTensor *lhs, const IPortableTensor *rhs, - const ir::Activation activation, IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_lhs; - const IPortableTensor *_rhs; - IPortableTensor *_output; - - ir::Activation _activation{ir::Activation::NONE}; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_POWLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/RangeLayer.cc b/runtime/onert/backend/cpu/ops/RangeLayer.cc deleted file mode 100644 index f00101fa8..000000000 --- a/runtime/onert/backend/cpu/ops/RangeLayer.cc +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "RangeLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Range.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ -RangeLayer::RangeLayer() : _start(nullptr), _limit(nullptr), _delta(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void RangeLayer::configure(const IPortableTensor *start, const IPortableTensor *limit, - const IPortableTensor *delta, IPortableTensor *output) -{ - _start = start; - _limit = limit; - _delta = delta; - _output = output; -} - -void RangeLayer::run() -{ - switch (_output->data_type()) - { - case OperandType::FLOAT32: - nnfw::cker::Range<float>(reinterpret_cast<float *>(_start->buffer()), - reinterpret_cast<float *>(_limit->buffer()), - reinterpret_cast<float *>(_delta->buffer()), - reinterpret_cast<float *>(_output->buffer())); - break; - case OperandType::INT32: - nnfw::cker::Range<int32_t>(reinterpret_cast<int32_t *>(_start->buffer()), - reinterpret_cast<int32_t *>(_limit->buffer()), - reinterpret_cast<int32_t *>(_delta->buffer()), - reinterpret_cast<int32_t *>(_output->buffer())); - break; - default: - throw std::runtime_error{"Range: unsupported data type"}; - break; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/RangeLayer.h b/runtime/onert/backend/cpu/ops/RangeLayer.h deleted file mode 100644 index 2d83b39b1..000000000 --- a/runtime/onert/backend/cpu/ops/RangeLayer.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_RANGELAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_RANGELAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ -class RangeLayer : public ::onert::exec::IFunction -{ -public: - RangeLayer(); - - void configure(const IPortableTensor *start, const IPortableTensor *limit, - const IPortableTensor *delta, IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_start; - const IPortableTensor *_limit; - const IPortableTensor *_delta; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_RANGELAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/RankLayer.cc b/runtime/onert/backend/cpu/ops/RankLayer.cc deleted file mode 100644 index 4690bdf72..000000000 --- a/runtime/onert/backend/cpu/ops/RankLayer.cc +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "RankLayer.h" - -#include "OperationUtils.h" - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -RankLayer::RankLayer() : _input(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void RankLayer::configure(const IPortableTensor *input, IPortableTensor *output) -{ - _input = input; - _output = output; -} - -void RankLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32 || _input->data_type() == OperandType::INT32) - { - int32_t *output_data = reinterpret_cast<int32_t *>(_output->buffer()); - output_data[0] = _input->num_dimensions(); - } - else - { - throw std::runtime_error{"Rank : unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/RankLayer.h b/runtime/onert/backend/cpu/ops/RankLayer.h deleted file mode 100644 index 6282ceb07..000000000 --- a/runtime/onert/backend/cpu/ops/RankLayer.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_RANKLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_RANKLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class RankLayer : public ::onert::exec::IFunction -{ -public: - RankLayer(); - -public: - void configure(const IPortableTensor *input, IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_RANKLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ReduceLayer.cc b/runtime/onert/backend/cpu/ops/ReduceLayer.cc deleted file mode 100644 index 4a55b2a33..000000000 --- a/runtime/onert/backend/cpu/ops/ReduceLayer.cc +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ReduceLayer.h" - -#include "OperationUtils.h" - -#include "cker/neon/neon_check.h" -#include <cker/operation/Reduce.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -namespace -{ - -template <typename T> -void evalLogic(const IPortableTensor *input, IPortableTensor *output, const std::vector<int> &axes, - bool keep_dims, T init_value, nnfw::cker::Reduce &reduce_kernel, - T reducer(const T current, const T in)) -{ - reduce_kernel.prepare(input->num_dimensions(), axes.size()); - bool result = reduce_kernel.ReduceGeneric<T>( - getTensorShape(input), reinterpret_cast<const T *>(input->buffer()), getTensorShape(output), - reinterpret_cast<T *>(output->buffer()), axes, keep_dims, init_value, reducer); - - if (!result) - { - throw std::runtime_error{"Reduce: Fail to run"}; - } -} - -template <typename T> -std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)> -evalType(bool keep_dims, nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type) -{ - switch (reduce_type) - { - case ReduceType::kSum: - return std::bind(&evalLogic<T>, std::placeholders::_1, std::placeholders::_2, - std::placeholders::_3, keep_dims, static_cast<T>(0), reduce_kernel, - [](const T current, const T in) -> T { return in + current; }); - break; - case ReduceType::kProd: - return std::bind(&evalLogic<T>, std::placeholders::_1, std::placeholders::_2, - std::placeholders::_3, keep_dims, static_cast<T>(1), reduce_kernel, - [](const T current, const T in) -> T { return in * current; }); - break; - case ReduceType::kMax: - return std::bind( - &evalLogic<T>, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, - keep_dims, std::numeric_limits<T>::lowest(), reduce_kernel, - [](const T current, const T in) -> T { return (in > current) ? in : current; }); - break; - case ReduceType::kMin: - return std::bind( - &evalLogic<T>, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, - keep_dims, std::numeric_limits<T>::max(), reduce_kernel, - [](const T current, const T in) -> T { return (in < current) ? in : current; }); - break; - default: - throw std::runtime_error{"Reduce: Unsupported reduce type"}; - } -} - -// Template specialization for bool type -template <> -std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)> -evalType<bool>(bool keep_dims, nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type) -{ - switch (reduce_type) - { - case ReduceType::kAny: - return std::bind(&evalLogic<bool>, std::placeholders::_1, std::placeholders::_2, - std::placeholders::_3, keep_dims, false, reduce_kernel, - [](const bool current, const bool in) -> bool { return in || current; }); - break; - case ReduceType::kAll: - return std::bind(&evalLogic<bool>, std::placeholders::_1, std::placeholders::_2, - std::placeholders::_3, keep_dims, true, reduce_kernel, - [](const bool current, const bool in) -> bool { return in && current; }); - break; - default: - throw std::runtime_error{"Reduce: Unsupported reduce type"}; - } -} - -std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)> -generateKernelGeneric(const IPortableTensor *input, bool keep_dims, - nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type) -{ - switch (input->data_type()) - { - case OperandType::FLOAT32: - return evalType<float>(keep_dims, reduce_kernel, reduce_type); - case OperandType::INT32: - return evalType<int32_t>(keep_dims, reduce_kernel, reduce_type); - case OperandType::BOOL8: - return evalType<bool>(keep_dims, reduce_kernel, reduce_type); - default: - throw std::runtime_error{"Reduce(generic): unsupported data type"}; - } -} - -// TODO Refine this function -void evalSumQuantized(const IPortableTensor *input, IPortableTensor *output, - const std::vector<int> &axes, bool keep_dims, - nnfw::cker::Reduce &reduce_kernel) -{ - const bool same_scale = (input->data_scale() == output->data_scale() && - input->data_offset() == output->data_offset()); - - reduce_kernel.prepare(input->num_dimensions(), axes.size()); - - if (!same_scale) - { - std::vector<int32_t> temp_sum(output->getShape().num_elements()); - bool result = reduce_kernel.QuantizedMeanOrSum<uint8_t, int32_t>( - reinterpret_cast<const uint8_t *>(input->buffer()), input->data_offset(), - input->data_scale(), getTensorShape(input), reinterpret_cast<uint8_t *>(output->buffer()), - output->data_offset(), output->data_scale(), getTensorShape(output), axes, keep_dims, - temp_sum.data(), true, [](const int32_t current, const uint8_t in) -> int32_t { - const int32_t actual_in = static_cast<int32_t>(in); - return current + actual_in; - }); - - if (!result) - { - throw std::runtime_error{"Reduce: Fail to run"}; - } - - return; - } - - const auto kernel = generateKernelGeneric(input, keep_dims, reduce_kernel, ReduceType::kSum); - kernel(input, output, axes); -} - -} // namespace - -ReduceLayer::ReduceLayer() - : _input(nullptr), _axes(nullptr), _output(nullptr), _reduce_kernel(new nnfw::cker::Reduce()), - _kernel(), _reduceType(ReduceType::kInvalid) -{ - // DO NOTHING -} - -ReduceLayer::~ReduceLayer() = default; - -void ReduceLayer::configure(const IPortableTensor *input, const IPortableTensor *axes, - IPortableTensor *output, ReduceType reduceType, bool keep_dims) -{ - _input = input; - _axes = axes; - _output = output; - _reduceType = reduceType; - - switch (_reduceType) - { - case ReduceType::kSum: - if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - _kernel = std::bind(&evalSumQuantized, std::placeholders::_1, std::placeholders::_2, - std::placeholders::_3, keep_dims, *_reduce_kernel); - return; - } - _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kSum); - break; - case ReduceType::kProd: - _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kProd); - break; - case ReduceType::kMax: - _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMax); - break; - case ReduceType::kMin: - _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMin); - break; - case ReduceType::kAny: - _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAny); - break; - case ReduceType::kAll: - _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAll); - break; - default: - throw std::runtime_error{"Reduce: Unsupported reduce type"}; - } -} - -void ReduceLayer::run() -{ - const auto axes = getReducerAxes(_axes); -#ifdef USE_NEON - int32_t rank = _input->num_dimensions(); - if (_input->data_type() == ir::DataType::FLOAT32 && _reduceType == ReduceType::kSum && - axes.size() == 1 && (axes[0] == -1 || axes[0] == rank - 1)) - { - OptimizedReduceSum(reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_input), - reinterpret_cast<float *>(_output->buffer())); - return; - } -#endif // NEON - _kernel(_input, _output, axes); -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ReduceLayer.h b/runtime/onert/backend/cpu/ops/ReduceLayer.h deleted file mode 100644 index 8265dd41f..000000000 --- a/runtime/onert/backend/cpu/ops/ReduceLayer.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_REDUCESUMLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_REDUCESUMLAYER_H__ - -#include "cker/neon/neon_check.h" - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> -#include <memory> - -namespace nnfw -{ -namespace cker -{ -class Reduce; -} -} // namespace nnfw - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -enum class ReduceType -{ - kSum, - kProd, - kMax, - kMin, - kAny, - kAll, - kInvalid // For debug and initialize -}; - -class ReduceLayer : public ::onert::exec::IFunction -{ -public: - ReduceLayer(); - ~ReduceLayer(); - -public: - void configure(const IPortableTensor *input, const IPortableTensor *axes, IPortableTensor *output, - ReduceType reduceType, bool keep_dims); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_axes; - IPortableTensor *_output; - - std::unique_ptr<nnfw::cker::Reduce> _reduce_kernel; - std::function<void(const IPortableTensor *input, IPortableTensor *output, - const std::vector<int> &axes)> - _kernel; - - ReduceType _reduceType; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_REDUCESUMLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ReshapeLayer.cc b/runtime/onert/backend/cpu/ops/ReshapeLayer.cc deleted file mode 100644 index 3c2b115f4..000000000 --- a/runtime/onert/backend/cpu/ops/ReshapeLayer.cc +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ReshapeLayer.h" - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -ReshapeLayer::ReshapeLayer() : _input(nullptr), _shape(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void ReshapeLayer::reshapeGeneric() -{ - size_t count = _input->total_size(); - memcpy(_output->buffer(), _input->buffer(), count); -} - -void ReshapeLayer::configure(const IPortableTensor *input, const IPortableTensor *shape, - IPortableTensor *output) -{ - _input = input; - /* note : shape is optional. If not provided from model, _shape is nullptr. */ - _shape = shape; - _output = output; -} - -void ReshapeLayer::run() { reshapeGeneric(); } - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ReshapeLayer.h b/runtime/onert/backend/cpu/ops/ReshapeLayer.h deleted file mode 100644 index b49c0bf7d..000000000 --- a/runtime/onert/backend/cpu/ops/ReshapeLayer.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_RESHAPELAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_RESHAPELAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class ReshapeLayer : public ::onert::exec::IFunction -{ -public: - ReshapeLayer(); - -public: - void reshapeGeneric(); - - void configure(const IPortableTensor *input, const IPortableTensor *shape, - IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_shape; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_RESHAPELAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc b/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc deleted file mode 100644 index 1fe56cb99..000000000 --- a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "OperationUtils.h" -#include "ResizeBilinearLayer.h" -#include "cker/operation/ResizeBilinear.h" -#include <cker/Types.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -ResizeBilinearLayer::ResizeBilinearLayer() - : _input(nullptr), _output(nullptr), _size(nullptr), _output_height(0), _output_width(0), - _align_corners(false), _half_pixel_centers(false) -{ - // DO NOTHING -} - -void ResizeBilinearLayer::configure(const IPortableTensor *input, IPortableTensor *output, - const IPortableTensor *size, bool align_corners, - bool half_pixel_centers) -{ - assert(!size->is_constant()); - _input = input; - _output = output; - _size = size; - _align_corners = align_corners; - _half_pixel_centers = half_pixel_centers; -} - -void ResizeBilinearLayer::configure(const IPortableTensor *input, IPortableTensor *output, - int32_t output_height, int32_t output_width, bool align_corners, - bool half_pixel_centers) -{ - assert(_size == nullptr); - if (output_height < 0) - { - throw std::runtime_error{"ResizeBilinear: size value must be positive value, output_height = " + - std::to_string(output_height)}; - } - if (output_width < 0) - { - throw std::runtime_error{"ResizeBilinear: size value must be positive value, output_width = " + - std::to_string(output_width)}; - } - _input = input; - _output = output; - _output_height = output_height; - _output_width = output_width; - _align_corners = align_corners; - _half_pixel_centers = half_pixel_centers; -} - -void ResizeBilinearLayer::run() -{ - nnfw::cker::ResizeBilinearParams params; - if (_size == nullptr) - { - params.output_height = _output_height; - params.output_width = _output_width; - } - else - { - const auto size_buf = reinterpret_cast<const int32_t *>(_size->buffer()); - params.output_height = size_buf[0]; - params.output_width = size_buf[1]; - } - params.align_corners = _align_corners; - params.half_pixel_centers = _half_pixel_centers; - - switch (_input->data_type()) - { - case OperandType::FLOAT32: - nnfw::cker::ResizeBilinear( - params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); - break; - - case OperandType::QUANT_UINT8_ASYMM: - nnfw::cker::ResizeBilinear( - params, getTensorShape(_input), reinterpret_cast<const uint8_t *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer())); - break; - - case OperandType::UINT8: - case OperandType::BOOL8: - case OperandType::FLOAT16: - case OperandType::INT32: - case OperandType::INT64: - case OperandType::QUANT_INT8_SYMM: - std::runtime_error("ResizeBilinear NYI"); - break; - default: - std::runtime_error("ResizeBilinear unsupported data type"); - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h b/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h deleted file mode 100644 index d7ae1c620..000000000 --- a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_RESIZEBILINEAR_H__ -#define __ONERT_BACKEND_CPU_OPS_RESIZEBILINEAR_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class ResizeBilinearLayer : public ::onert::exec::IFunction -{ -public: - ResizeBilinearLayer(); - -public: - void configure(const IPortableTensor *input1, IPortableTensor *output, - const IPortableTensor *size, bool align_corners, bool half_pixel_centers); - - void configure(const IPortableTensor *input, IPortableTensor *output, int32_t output_height, - int32_t output_width, bool align_corners, bool half_pixel_centers); - - void run() override; - -private: - const IPortableTensor *_input; - IPortableTensor *_output; - const IPortableTensor *_size; - int32_t _output_height; - int32_t _output_width; - bool _align_corners; - bool _half_pixel_centers; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_RESIZEBILINEAR_H__ diff --git a/runtime/onert/backend/cpu/ops/ReverseLayer.cc b/runtime/onert/backend/cpu/ops/ReverseLayer.cc deleted file mode 100644 index 7979e77a0..000000000 --- a/runtime/onert/backend/cpu/ops/ReverseLayer.cc +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ReverseLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Reverse.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -void ReverseLayer::run() -{ - - if (_axis->total_size() != 4) - { - throw std::runtime_error{"Reverse: only support 1 axis"}; - } - int32_t axis = *(reinterpret_cast<int32_t *>(_axis->buffer())); - if (axis < 0) - { - axis += _input->num_dimensions(); - } - - switch (_input->data_type()) - { - case OperandType::FLOAT32: - nnfw::cker::Reverse<float>( - axis, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); - break; - default: - throw std::runtime_error{"Reverse: unsupported data type"}; - } -} - -void ReverseLayer::configure(const IPortableTensor *input, const IPortableTensor *axis, - IPortableTensor *output) -{ - _input = input; - _axis = axis; - _output = output; -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ReverseLayer.h b/runtime/onert/backend/cpu/ops/ReverseLayer.h deleted file mode 100644 index 9591dae32..000000000 --- a/runtime/onert/backend/cpu/ops/ReverseLayer.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_REVERSE_LAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_REVERSE_LAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class ReverseLayer : public ::onert::exec::IFunction -{ -public: - ReverseLayer() : _input{nullptr}, _axis{nullptr}, _output{nullptr} - { - // DO NOTHING - } - -public: - void configure(const IPortableTensor *input, const IPortableTensor *axis, - IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_axis; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_REVERSE_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/SelectLayer.cc b/runtime/onert/backend/cpu/ops/SelectLayer.cc deleted file mode 100644 index 95cfe1df0..000000000 --- a/runtime/onert/backend/cpu/ops/SelectLayer.cc +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SelectLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Select.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -SelectLayer::SelectLayer() - : _cond(nullptr), _input_true(nullptr), _input_false(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void SelectLayer::configure(const IPortableTensor *cond, const IPortableTensor *input_true, - const IPortableTensor *input_false, IPortableTensor *output) -{ - _cond = cond; - _input_true = input_true; - _input_false = input_false; - _output = output; -} - -void SelectLayer::run() -{ - -#define KERNEL_SELECT(type, op) \ - nnfw::cker::op(getTensorShape(_cond), reinterpret_cast<uint8_t *>(_cond->buffer()), \ - getTensorShape(_input_true), reinterpret_cast<type *>(_input_true->buffer()), \ - getTensorShape(_input_false), reinterpret_cast<type *>(_input_false->buffer()), \ - getTensorShape(_output), reinterpret_cast<type *>(_output->buffer())); - -#define KERNEL_SWITCH(type, op) \ - switch (type) \ - { \ - break; \ - case OperandType::FLOAT32: \ - KERNEL_SELECT(float, op); \ - break; \ - default: \ - throw std::runtime_error{"Select: unsupported data type"}; \ - } - - auto input_type = _input_true->data_type(); - bool require_broadcast = - !HaveSameShapes(_input_true, _cond) || !HaveSameShapes(_input_false, _cond); - bool rank_one_select = ((_input_true->num_dimensions() == 1) && !require_broadcast); - - if (rank_one_select) - { - KERNEL_SWITCH(input_type, RankOneSelect); - } - else if (require_broadcast) - { - KERNEL_SWITCH(input_type, BroadcastSelect4DSlow); - } - else - { - KERNEL_SWITCH(input_type, Select); - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/SelectLayer.h b/runtime/onert/backend/cpu/ops/SelectLayer.h deleted file mode 100644 index 2ef50f369..000000000 --- a/runtime/onert/backend/cpu/ops/SelectLayer.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_SELECT_LAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_SELECT_LAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class SelectLayer : public ::onert::exec::IFunction -{ -public: - SelectLayer(); - -public: - void configure(const IPortableTensor *cond, const IPortableTensor *input_true, - const IPortableTensor *input_false, IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_cond; - const IPortableTensor *_input_true; - const IPortableTensor *_input_false; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_SELECT_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/ShapeLayer.cc b/runtime/onert/backend/cpu/ops/ShapeLayer.cc deleted file mode 100644 index bffb04bc6..000000000 --- a/runtime/onert/backend/cpu/ops/ShapeLayer.cc +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ShapeLayer.h" - -#include "OperationUtils.h" - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -ShapeLayer::ShapeLayer() : _input(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -template <typename T> void GetRawShape(const IPortableTensor *input, T *output_data) -{ - for (uint32_t i = 0; i < input->num_dimensions(); ++i) - { - output_data[i] = static_cast<T>(input->dimension(i)); - } -} - -void ShapeLayer::shape() -{ - if (_output->data_type() == OperandType::UINT32) - { - GetRawShape(_input, reinterpret_cast<uint32_t *>(_output->buffer())); - } - else if (_output->data_type() == OperandType::INT32) - { - GetRawShape(_input, reinterpret_cast<int32_t *>(_output->buffer())); - } - else - { - throw std::runtime_error{"NYI : not supported output type for ShapeLayer"}; - } -} - -void ShapeLayer::configure(const IPortableTensor *input, IPortableTensor *output) -{ - _input = input; - _output = output; -} - -void ShapeLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32 || _input->data_type() == OperandType::INT32 || - _input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - shape(); - } - else - { - throw std::runtime_error{"Shape : unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/ShapeLayer.h b/runtime/onert/backend/cpu/ops/ShapeLayer.h deleted file mode 100644 index fb358c7a4..000000000 --- a/runtime/onert/backend/cpu/ops/ShapeLayer.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_SHAPELAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_SHAPELAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class ShapeLayer : public ::onert::exec::IFunction -{ -public: - ShapeLayer(); - -public: - void shape(); - - void configure(const IPortableTensor *input, IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_SHAPELAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/SliceLayer.cc b/runtime/onert/backend/cpu/ops/SliceLayer.cc deleted file mode 100644 index 449c073e6..000000000 --- a/runtime/onert/backend/cpu/ops/SliceLayer.cc +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SliceLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Slice.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -SliceLayer::SliceLayer() : _input(nullptr), _begin(nullptr), _size(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -template <typename T> -void SliceLayer::GetBeginAndSizeVectors(int dimensions, const IPortableTensor *begin, - const IPortableTensor *size, std::vector<int> *begins, - std::vector<int> *sizes) -{ - for (int idx = dimensions - 1; idx >= 0; --idx) - { - begins->push_back(reinterpret_cast<T *>(begin->buffer())[idx]); - sizes->push_back(reinterpret_cast<T *>(size->buffer())[idx]); - } -} - -template <typename T> void SliceLayer::sliceImpl() -{ - const int kMaxDim = nnfw::cker::Shape::kMaxSmallSize; - - std::vector<int> begins; - std::vector<int> sizes; - begins.reserve(kMaxDim); - sizes.reserve(kMaxDim); - - GetBeginAndSizeVectors<int32_t>(_input->num_dimensions(), _begin, _size, &begins, &sizes); - - // begins : 0-based, sizes : 1-based - for (int i = _input->num_dimensions(); i < kMaxDim; ++i) - { - begins.push_back(0); - sizes.push_back(1); - } - - nnfw::cker::SliceParams op_params; - op_params.begin_count = 4; - op_params.size_count = 4; - for (int i = 0; i < 4; ++i) - { - op_params.begin[i] = begins[3 - i]; - op_params.size[i] = sizes[3 - i]; - } - - nnfw::cker::Slice(op_params, getExtendedTensorShape(_input), - reinterpret_cast<const T *>(_input->buffer()), - reinterpret_cast<T *>(_output->buffer())); -} - -void SliceLayer::configure(const IPortableTensor *input, const IPortableTensor *begin, - const IPortableTensor *size, IPortableTensor *output) -{ - _input = input; - _output = output; - _begin = begin; - _size = size; -} - -void SliceLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - sliceImpl<float>(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - sliceImpl<uint8_t>(); - } - else - { - throw std::runtime_error{"Slice: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/SliceLayer.h b/runtime/onert/backend/cpu/ops/SliceLayer.h deleted file mode 100644 index 650e2c97a..000000000 --- a/runtime/onert/backend/cpu/ops/SliceLayer.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_SLICELAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_SLICELAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class SliceLayer : public ::onert::exec::IFunction -{ -public: - SliceLayer(); - -public: - void configure(const IPortableTensor *input, const IPortableTensor *begin, - const IPortableTensor *size, IPortableTensor *output); - - void run() override; - -private: - template <typename T> void sliceImpl(); - - template <typename T> - void GetBeginAndSizeVectors(int dimensions, const IPortableTensor *begin, - const IPortableTensor *size, std::vector<int> *begins, - std::vector<int> *sizes); - -private: - const IPortableTensor *_input; - const IPortableTensor *_begin; - const IPortableTensor *_size; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_SLICELAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/SoftMaxLayer.cc b/runtime/onert/backend/cpu/ops/SoftMaxLayer.cc deleted file mode 100644 index b42be3042..000000000 --- a/runtime/onert/backend/cpu/ops/SoftMaxLayer.cc +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SoftMaxLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/SoftMax.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -SoftMaxLayer::SoftMaxLayer() : _input(nullptr), _output(nullptr), _beta(0.0) -{ - // DO NOTHING -} - -void SoftMaxLayer::softmaxFloat32() -{ - if (getNumberOfDimensions(_input) == 1) - { - uint32_t input_size = getNumberOfElements(_input); - nnfw::cker::Softmax(reinterpret_cast<const float *>(_input->buffer()), input_size, 1, _beta, - reinterpret_cast<float *>(_output->buffer())); - } - else if (getNumberOfDimensions(_input) == 2) - { - uint32_t batch_size = getSizeOfDimension(_input, 0); - if (batch_size == 0) - throw std::runtime_error("batch_size should not be 0"); - - uint32_t input_size = getNumberOfElements(_input) / batch_size; - nnfw::cker::Softmax(reinterpret_cast<const float *>(_input->buffer()), input_size, batch_size, - _beta, reinterpret_cast<float *>(_output->buffer())); - } - else if (getNumberOfDimensions(_input) == 4) - { - nnfw::cker::SoftmaxParams op_params; - op_params.beta = _beta; - nnfw::cker::Softmax(op_params, getTensorShape(_input), - reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_output), - reinterpret_cast<float *>(_output->buffer())); - } - else - { - nnfw::cker::SoftmaxParams op_params; - op_params.beta = _beta; - nnfw::cker::reference::Softmax( - op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); - } -} - -void SoftMaxLayer::softmaxQuant8() -{ - nnfw::cker::Shape descrIn4D(4); - - if (getNumberOfDimensions(_input) == 2) - { - auto batch_size = getSizeOfDimension(_input, 0); - if (batch_size == 0) - throw std::runtime_error("batch_size should not be 0"); - - auto input_size = getNumberOfElements(_input) / batch_size; - descrIn4D.SetDim(0, batch_size); - descrIn4D.SetDim(1, 1); - descrIn4D.SetDim(2, 1); - descrIn4D.SetDim(3, input_size); - } - else if (getNumberOfDimensions(_input) == 4) - { - descrIn4D.SetDim(0, _input->dimension(0)); - descrIn4D.SetDim(1, _input->dimension(1)); - descrIn4D.SetDim(2, _input->dimension(2)); - descrIn4D.SetDim(3, _input->dimension(3)); - } - else - { - throw std::runtime_error{"only 2D and 4D tensors supported"}; - } - if (_output->data_offset() != 0 || _output->data_scale() != 1.f / 256) - { - throw std::runtime_error{"incorrect scale / offset for output"}; - } - static const int32_t kScaledDiffIntegerBits = 5; - const double input_beta_real_multiplier = std::min( - 1.0 * _beta * _input->data_scale() * (1 << (31 - kScaledDiffIntegerBits)), (1ll << 31) - 1.0); - int32_t input_multiplier = 0; - int32_t input_left_shift = 0; - QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier, &input_multiplier, - &input_left_shift); - float diff_min = -1.0f * CalculateInputRadius(kScaledDiffIntegerBits, input_left_shift); - - nnfw::cker::SoftmaxParams op_params; - op_params.input_multiplier = input_multiplier; - op_params.input_left_shift = input_left_shift; - op_params.diff_min = diff_min; - nnfw::cker::Softmax(op_params, descrIn4D, reinterpret_cast<const uint8_t *>(_input->buffer()), - descrIn4D, reinterpret_cast<uint8_t *>(_output->buffer())); -} - -void SoftMaxLayer::configure(const IPortableTensor *input, const float beta, - IPortableTensor *output) -{ - _input = input; - _output = output; - _beta = beta; -} - -void SoftMaxLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - softmaxFloat32(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - softmaxQuant8(); - } - else - { - throw std::runtime_error{"SoftMax: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/SoftMaxLayer.h b/runtime/onert/backend/cpu/ops/SoftMaxLayer.h deleted file mode 100644 index d0c704c2c..000000000 --- a/runtime/onert/backend/cpu/ops/SoftMaxLayer.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class SoftMaxLayer : public ::onert::exec::IFunction -{ -public: - SoftMaxLayer(); - -public: - void softmaxFloat32(); - - void softmaxQuant8(); - - void configure(const IPortableTensor *input, const float beta, IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - IPortableTensor *_output; - - float _beta; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc b/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc deleted file mode 100644 index 896e262ba..000000000 --- a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SpaceToBatchNDLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/SpaceToBatchND.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ -SpaceToBatchNDLayer::SpaceToBatchNDLayer() - : _input(nullptr), _block_shape(nullptr), _padding(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -// TO DO : move into shape inferer -void SpaceToBatchNDLayer::checkDimension() -{ - const int kSpatialDimensionNum = 2; - if (_block_shape->dimension(0) != kSpatialDimensionNum) - { - throw std::runtime_error("SpaceToBatchND : block_shape(block_size) tensor's rank is wrong\n"); - } - - // Ensures the input height and width (with padding) is a multiple of block - // shape height and width. - for (int dim = 0; dim < kSpatialDimensionNum; ++dim) - { - int final_dim_size = - (_input->dimension(dim + 1) + reinterpret_cast<int32_t *>(_padding->buffer())[dim * 2] + - reinterpret_cast<int32_t *>(_padding->buffer())[dim * 2 + 1]); - - if (final_dim_size % reinterpret_cast<int32_t *>(_block_shape->buffer())[dim] != 0) - { - throw std::runtime_error( - "SpaceToBatchND : padded input's dimension is not a multiple of block size\n"); - } - - if ((int32_t)_output->dimension(dim + 1) != - final_dim_size / reinterpret_cast<int32_t *>(_block_shape->buffer())[dim]) - { - throw std::runtime_error("SpaceToBatchND : wrong output dimension\n"); - } - } -} - -template <> uint32_t SpaceToBatchNDLayer::getPad<float>() { return 0; } -template <> uint32_t SpaceToBatchNDLayer::getPad<uint8_t>() { return _output->data_offset(); } - -template <typename T> void SpaceToBatchNDLayer::spaceToBatchND() -{ - checkDimension(); - - nnfw::cker::SpaceToBatchParams params; - params.output_offset = getPad<T>(); - - nnfw::cker::SpaceToBatchND( - params, getTensorShape(_input), reinterpret_cast<const T *>(_input->buffer()), - getTensorShape(_block_shape), reinterpret_cast<const int32_t *>(_block_shape->buffer()), - getTensorShape(_padding), reinterpret_cast<const int32_t *>(_padding->buffer()), - getTensorShape(_output), reinterpret_cast<T *>(_output->buffer())); -} - -void SpaceToBatchNDLayer::configure(const IPortableTensor *input, - const IPortableTensor *block_shape, - const IPortableTensor *padding, IPortableTensor *output) -{ - _input = input; - _block_shape = block_shape; - _padding = padding; - _output = output; -} - -void SpaceToBatchNDLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - spaceToBatchND<float>(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - spaceToBatchND<uint8_t>(); - } - else - { - throw std::runtime_error{"SpaceToBatchND: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.h b/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.h deleted file mode 100644 index 6f4638719..000000000 --- a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_SPACE_TO_BATCH_ND_LAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_SPACE_TO_BATCH_ND_LAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ -class SpaceToBatchNDLayer : public ::onert::exec::IFunction -{ -public: - SpaceToBatchNDLayer(); - - void configure(const IPortableTensor *input, const IPortableTensor *block_shape, - const IPortableTensor *padding, IPortableTensor *output); - - void run() override; - -private: - void checkDimension(); - - template <typename T> uint32_t getPad(); - - template <typename T> void spaceToBatchND(); - - const IPortableTensor *_input; - const IPortableTensor *_block_shape; - const IPortableTensor *_padding; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_SPACE_TO_BATCH_ND_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc b/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc deleted file mode 100644 index a0869aed8..000000000 --- a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SpaceToDepthLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/SpaceToDepth.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ -SpaceToDepthLayer::SpaceToDepthLayer() : _input(nullptr), _block_size(0), _output(nullptr) -{ - // DO NOTHING -} - -template <typename T> void SpaceToDepthLayer::spaceToDepth() -{ - - nnfw::cker::SpaceToDepthParams params; - params.block_size = _block_size; - - nnfw::cker::SpaceToDepth(params, getTensorShape(_input), - reinterpret_cast<const T *>(_input->buffer()), getTensorShape(_output), - reinterpret_cast<T *>(_output->buffer())); -} - -void SpaceToDepthLayer::configure(const IPortableTensor *input, const int32_t block_size, - IPortableTensor *output) -{ - _input = input; - _block_size = block_size; - _output = output; -} - -void SpaceToDepthLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - spaceToDepth<float>(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - spaceToDepth<uint8_t>(); - } - else - { - throw std::runtime_error{"SpaceToDepth: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h b/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h deleted file mode 100644 index c11ef2b0a..000000000 --- a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_SPACE_TO_DEPTH_LAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_SPACE_TO_DEPTH_LAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ -class SpaceToDepthLayer : public ::onert::exec::IFunction -{ -public: - SpaceToDepthLayer(); - - void configure(const IPortableTensor *input, const int32_t block_size, IPortableTensor *output); - - void run() override; - -private: - template <typename T> void spaceToDepth(); - - const IPortableTensor *_input; - int32_t _block_size; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_SPACE_TO_BATCH_ND_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/SplitLayer.cc b/runtime/onert/backend/cpu/ops/SplitLayer.cc deleted file mode 100644 index 922cde2e3..000000000 --- a/runtime/onert/backend/cpu/ops/SplitLayer.cc +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SplitLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Split.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -SplitLayer::SplitLayer() : _input(nullptr), _axis(nullptr), _num_splits(0), _outputs() -{ - // DO NOTHING -} - -template <typename T> void SplitLayer::split(void) -{ - nnfw::cker::SplitParams op_params; - if (_axis->total_size() != sizeof(int32_t)) - { - throw std::runtime_error("ArgMinMax: wrong shape of axis"); - } - auto axis = *reinterpret_cast<const int32_t *>(_axis->buffer()); - if (axis < 0) - { - axis += _input->num_dimensions(); - } - op_params.axis = axis; - op_params.num_split = _num_splits; - - std::vector<T *> outputPtrs; - - for (const auto output : _outputs) - { - assert(output->total_size() == sizeOfData(output->data_type(), output->getShape().dims())); - outputPtrs.emplace_back(reinterpret_cast<T *>(output->buffer())); - } - - assert(_input->total_size() == sizeOfData(_input->data_type(), _input->getShape().dims())); - nnfw::cker::Split<T>(op_params, getTensorShape(_input), reinterpret_cast<T *>(_input->buffer()), - getTensorShape(_outputs[0]), outputPtrs.data()); -} - -void SplitLayer::configure(const IPortableTensor *input, const IPortableTensor *axis, - uint16_t num_splits, std::vector<IPortableTensor *> &outputs) -{ - assert(input != nullptr); - - _num_splits = num_splits; - _input = input; - _axis = axis; - _outputs = outputs; -} - -void SplitLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - split<float>(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - split<uint8_t>(); - } - else if (_input->data_type() == OperandType::INT32) - { - split<int32_t>(); - } - else if (_input->data_type() == OperandType::INT64) - { - split<int64_t>(); - } - else - { - throw std::runtime_error{"Split: unsupported input type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/SplitLayer.h b/runtime/onert/backend/cpu/ops/SplitLayer.h deleted file mode 100644 index 090f87166..000000000 --- a/runtime/onert/backend/cpu/ops/SplitLayer.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_SPLITLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_SPLITLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class SplitLayer : public ::onert::exec::IFunction -{ -public: - SplitLayer(); - -public: - template <typename T> void split(void); - - void configure(const IPortableTensor *input, const IPortableTensor *axis, uint16_t num_splits, - std::vector<IPortableTensor *> &outputs); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_axis; - uint16_t _num_splits; - std::vector<IPortableTensor *> _outputs; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_SPLITLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/SplitVLayer.cc b/runtime/onert/backend/cpu/ops/SplitVLayer.cc deleted file mode 100644 index d6ca12442..000000000 --- a/runtime/onert/backend/cpu/ops/SplitVLayer.cc +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SplitVLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/SplitV.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -SplitVLayer::SplitVLayer() - : _input(nullptr), _size_splits(nullptr), _split_dim(nullptr), _num_splits(0), _outputs() -{ - // DO NOTHING -} - -template <typename T> void SplitVLayer::splitV(void) -{ - nnfw::cker::SplitVParams op_params; - op_params.axis = *(reinterpret_cast<const int32_t *>(_split_dim->buffer())); - op_params.num_split = _num_splits; - - std::vector<T *> outputPtrs; - std::vector<nnfw::cker::Shape> outshape; - - for (const auto output : _outputs) - { - assert(output->total_size() == sizeOfData(output->data_type(), output->getShape().dims())); - outputPtrs.emplace_back(reinterpret_cast<T *>(output->buffer())); - outshape.emplace_back(getTensorShape(output)); - } - - assert(_input->total_size() == sizeOfData(_input->data_type(), _input->getShape().dims())); - nnfw::cker::SplitV<T>(op_params, getTensorShape(_input), reinterpret_cast<T *>(_input->buffer()), - outshape, outputPtrs.data()); -} - -void SplitVLayer::configure(const IPortableTensor *input, const IPortableTensor *size_splits, - const IPortableTensor *split_dim, uint16_t num_splits, - std::vector<IPortableTensor *> &outputs) -{ - assert(input != nullptr); - - _num_splits = num_splits; - _size_splits = size_splits; - _input = input; - _split_dim = split_dim; - _outputs = outputs; -} - -void SplitVLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - splitV<float>(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - splitV<uint8_t>(); - } - else if (_input->data_type() == OperandType::INT32) - { - splitV<int32_t>(); - } - else if (_input->data_type() == OperandType::INT64) - { - splitV<int64_t>(); - } - else - { - throw std::runtime_error{"SplitV: unsupported input type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/SplitVLayer.h b/runtime/onert/backend/cpu/ops/SplitVLayer.h deleted file mode 100644 index 98f2f4406..000000000 --- a/runtime/onert/backend/cpu/ops/SplitVLayer.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_SPLIT_V_LAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_SPLIT_V_LAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class SplitVLayer : public ::onert::exec::IFunction -{ -public: - SplitVLayer(); - -public: - template <typename T> void splitV(void); - - void configure(const IPortableTensor *input, const IPortableTensor *size_splits, - const IPortableTensor *size_dim, uint16_t num_splits, - std::vector<IPortableTensor *> &outputs); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_size_splits; - const IPortableTensor *_split_dim; - uint16_t _num_splits; - std::vector<IPortableTensor *> _outputs; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_SPLIT_V_LAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/SquaredDiffLayer.cc b/runtime/onert/backend/cpu/ops/SquaredDiffLayer.cc deleted file mode 100644 index cf67a5c00..000000000 --- a/runtime/onert/backend/cpu/ops/SquaredDiffLayer.cc +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SquaredDiffLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/SqDiff.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -SqDiffLayer::SqDiffLayer() : _input1(nullptr), _input2(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void SqDiffLayer::SqDiffFloat32() -{ - nnfw::cker::SqDiff(getTensorShape(_input1), reinterpret_cast<const float *>(_input1->buffer()), - getTensorShape(_input2), reinterpret_cast<const float *>(_input2->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); -} - -void SqDiffLayer::configure(const IPortableTensor *input1, const IPortableTensor *input2, - IPortableTensor *output) -{ - _input1 = input1; - _input2 = input2; - _output = output; -} - -void SqDiffLayer::run() -{ - if (_input1->data_type() == OperandType::FLOAT32) - { - SqDiffFloat32(); - } - else - { - throw std::runtime_error{"SquaredDiff: unsupported data type"}; - } -} -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/SquaredDiffLayer.h b/runtime/onert/backend/cpu/ops/SquaredDiffLayer.h deleted file mode 100644 index 386eea9ae..000000000 --- a/runtime/onert/backend/cpu/ops/SquaredDiffLayer.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in riting, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_SQDIFFLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_SQDIFFLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class SqDiffLayer : public ::onert::exec::IFunction -{ -public: - SqDiffLayer(); - -public: - void SqDiffFloat32(); - - void configure(const IPortableTensor *input1, const IPortableTensor *input2, - IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input1; - const IPortableTensor *_input2; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_SQDIFFLAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc b/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc deleted file mode 100644 index b8dfcb4b5..000000000 --- a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "StatelessRandomUniformLayer.h" - -#include <cker/operation/StatelessRandomUniform.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -StatelessRandomUniformLayer::StatelessRandomUniformLayer() - : _shape(nullptr), _seed(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void StatelessRandomUniformLayer::configure(const IPortableTensor *shape, - const IPortableTensor *seed, IPortableTensor *output) -{ - _shape = shape; - _seed = seed; - _output = output; -} - -void StatelessRandomUniformLayer::StatelessRandomUniformFloat32() -{ - nnfw::cker::StatelessRandomUniform( - getTensorShape(_shape), reinterpret_cast<const int *>(_shape->buffer()), - getTensorShape(_seed), reinterpret_cast<const int *>(_seed->buffer()), - getTensorShape(_output), reinterpret_cast<float *>(_output->buffer())); -} - -void StatelessRandomUniformLayer::run() -{ - switch (_output->data_type()) - { - // ToDo : It need to support INT8 and UINT8 also when will be applied quantization. - case OperandType::FLOAT32: - StatelessRandomUniformFloat32(); - break; - default: - throw std::runtime_error{"StatelessRandomUniformLayer: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h b/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h deleted file mode 100644 index ef11d623d..000000000 --- a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_STATELESS_RANDOM_UNIFORM_H__ -#define __ONERT_BACKEND_CPU_OPS_STATELESS_RANDOM_UNIFORM_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class StatelessRandomUniformLayer : public ::onert::exec::IFunction -{ -public: - StatelessRandomUniformLayer(); - -public: - void configure(const IPortableTensor *shape, const IPortableTensor *seed, - IPortableTensor *output); - - void StatelessRandomUniformFloat32(); - - void run() override; - -private: - const IPortableTensor *_shape; - const IPortableTensor *_seed; - - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_STATELESS_RANDOM_UNIFORM_H__ diff --git a/runtime/onert/backend/cpu/ops/StridedSliceLayer.cc b/runtime/onert/backend/cpu/ops/StridedSliceLayer.cc deleted file mode 100644 index f77f4d691..000000000 --- a/runtime/onert/backend/cpu/ops/StridedSliceLayer.cc +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "StridedSliceLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/StridedSlice.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -StridedSliceLayer::StridedSliceLayer() - : _input(nullptr), _begin(nullptr), _end(nullptr), _strides(nullptr), _output(nullptr), - _begin_mask(0), _ellipsis_mask(0), _end_mask(0), _new_axis_mask(0), _shrink_axis_mask(0) -{ -} - -template <typename T> void StridedSliceLayer::stridedSliceImpl() -{ - const auto input_shape = getTensorShape(_input); - const auto output_shape = getTensorShape(_output); - auto op_params = nnfw::cker::buildStridedSliceParams( - reinterpret_cast<uint32_t *>(_begin->buffer()), reinterpret_cast<uint32_t *>(_end->buffer()), - reinterpret_cast<uint32_t *>(_strides->buffer()), _begin_mask, _end_mask, _shrink_axis_mask, - input_shape.DimensionsCount()); - - nnfw::cker::checkOutputSize(op_params, input_shape, output_shape, input_shape.DimensionsCount()); - - nnfw::cker::StridedSlice(op_params, input_shape, reinterpret_cast<const T *>(_input->buffer()), - output_shape, reinterpret_cast<T *>(_output->buffer())); -} - -void StridedSliceLayer::configure(const IPortableTensor *input, const IPortableTensor *begin, - const IPortableTensor *end, const IPortableTensor *strides, - IPortableTensor *output, const int32_t begin_mask, - const int32_t end_mask, const int32_t shrink_axis_mask) -{ - _input = input; - _begin = begin; - _end = end; - _strides = strides; - _output = output; - - _begin_mask = begin_mask; - _ellipsis_mask = 0; - _end_mask = end_mask; - _new_axis_mask = 0; - _shrink_axis_mask = shrink_axis_mask; -} - -void StridedSliceLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - stridedSliceImpl<float>(); - } - else if (_input->data_type() == OperandType::INT32) - { - stridedSliceImpl<int32_t>(); - } - else if (_input->data_type() == OperandType::INT64) - { - stridedSliceImpl<int64_t>(); - } - else - { - throw std::runtime_error{"StridedSlice: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/StridedSliceLayer.h b/runtime/onert/backend/cpu/ops/StridedSliceLayer.h deleted file mode 100644 index 468408152..000000000 --- a/runtime/onert/backend/cpu/ops/StridedSliceLayer.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_STRIDEDSLICELAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_STRIDEDSLICELAYER_H__ - -#include <backend/IPortableTensor.h> -#include "OperationUtils.h" - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class StridedSliceLayer : public ::onert::exec::IFunction -{ -public: - StridedSliceLayer(); - -public: - void configure(const IPortableTensor *input, const IPortableTensor *begin, - const IPortableTensor *end, const IPortableTensor *strides, - IPortableTensor *output, const int32_t begin_mask, const int32_t end_mask, - const int32_t shrink_axis_mask); - void run() override; - -private: - template <typename T> void stridedSliceImpl(); - -private: - const IPortableTensor *_input; - const IPortableTensor *_begin; - const IPortableTensor *_end; - const IPortableTensor *_strides; - IPortableTensor *_output; - - int32_t _begin_mask; - int32_t _ellipsis_mask; - int32_t _end_mask; - int32_t _new_axis_mask; - int32_t _shrink_axis_mask; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_STRIDEDSLICELAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/TileLayer.cc b/runtime/onert/backend/cpu/ops/TileLayer.cc deleted file mode 100644 index bfc371972..000000000 --- a/runtime/onert/backend/cpu/ops/TileLayer.cc +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "TileLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Tile.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -TileLayer::TileLayer() : _input(nullptr), _multipliers(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -void TileLayer::tileFloat32() -{ - TileOneDimension(getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()), - reinterpret_cast<const int *>(_multipliers->buffer()), - reinterpret_cast<float *>(_output->buffer()), 0); -} - -void TileLayer::tileQuant8() -{ - // cker quant8 tile is not implemented yet - throw std::runtime_error{"NYI"}; -} - -void TileLayer::configure(const IPortableTensor *input, const IPortableTensor *multipliers, - IPortableTensor *output) -{ - _input = input; - _multipliers = multipliers; - _output = output; -} - -void TileLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - tileFloat32(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - tileQuant8(); - } - else - { - throw std::runtime_error{"Tile: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/TileLayer.h b/runtime/onert/backend/cpu/ops/TileLayer.h deleted file mode 100644 index d7b793ecc..000000000 --- a/runtime/onert/backend/cpu/ops/TileLayer.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_TILELAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_TILELAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class TileLayer : public ::onert::exec::IFunction -{ -public: - TileLayer(); - -public: - void tileFloat32(); - - void tileQuant8(); - - void configure(const IPortableTensor *input, const IPortableTensor *_multipliers, - IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_multipliers; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_TILELAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/TransposeLayer.cc b/runtime/onert/backend/cpu/ops/TransposeLayer.cc deleted file mode 100644 index 3362c3396..000000000 --- a/runtime/onert/backend/cpu/ops/TransposeLayer.cc +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "TransposeLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Transpose.h> -#include <numeric> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -TransposeLayer::TransposeLayer() : _input(nullptr), _perm(nullptr), _output(nullptr) -{ - // DO NOTHING -} - -template <typename T> void TransposeLayer::transpose() -{ - nnfw::cker::TransposeParams param; - assert(_perm->num_dimensions() == 1); - - param.perm_count = _input->num_dimensions(); - if (_perm->dimension(0) == 0) // This means _perm is (n-1...0) - { - const auto begin = param.perm; - const auto end = param.perm + _input->num_dimensions(); - std::iota(begin, end, 0); - std::reverse(begin, end); - } - else - { - assert(param.perm_count == static_cast<int>(_perm->dimension(0))); - for (auto i = 0; i < param.perm_count; i++) - { - param.perm[i] = *(reinterpret_cast<const int32_t *>(_perm->buffer()) + i); - } - } - - nnfw::cker::Transpose(param, getTensorShape(_input), - reinterpret_cast<const T *>(_input->buffer()), getTensorShape(_output), - reinterpret_cast<T *>(_output->buffer())); -} - -void TransposeLayer::transposeQuant8() -{ - if (_input->data_offset() != _output->data_offset()) - { - throw std::runtime_error("TransposeLayer : qassym8 input and output offsets unmatched"); - } - - if (_input->data_scale() != _output->data_scale()) - { - throw std::runtime_error("TransposeLayer : qassym8 input and output scales unmatched"); - } - - transpose<uint8_t>(); -} - -void TransposeLayer::configure(const IPortableTensor *input, const IPortableTensor *perm, - IPortableTensor *output) -{ - _input = input; - _perm = perm; - _output = output; -} - -void TransposeLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - { - transpose<float>(); - } - else if (_input->data_type() == OperandType::INT32) - { - transpose<int32_t>(); - } - else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM) - { - transposeQuant8(); - } - else - { - throw std::runtime_error{"Transpose: unsupported data type"}; - } -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/TransposeLayer.h b/runtime/onert/backend/cpu/ops/TransposeLayer.h deleted file mode 100644 index c8e9f8ae7..000000000 --- a/runtime/onert/backend/cpu/ops/TransposeLayer.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_TRANSPOSELAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_TRANSPOSELAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class TransposeLayer : public ::onert::exec::IFunction -{ -public: - TransposeLayer(); - -public: - template <typename T> void transpose(); - - void transposeQuant8(); - - void configure(const IPortableTensor *input, const IPortableTensor *perm, - IPortableTensor *output); - - void run() override; - -private: - const IPortableTensor *_input; - const IPortableTensor *_perm; - IPortableTensor *_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_TRANSPOSELAYER_H__ diff --git a/runtime/onert/backend/cpu/ops/UnpackLayer.cc b/runtime/onert/backend/cpu/ops/UnpackLayer.cc deleted file mode 100644 index 428b38588..000000000 --- a/runtime/onert/backend/cpu/ops/UnpackLayer.cc +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "UnpackLayer.h" - -#include "OperationUtils.h" - -#include <cker/operation/Unpack.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -UnpackLayer::UnpackLayer() : _input(nullptr), _outputs(), _axis(0), _num_output(0) -{ - // DO NOTHING -} - -template <typename T> void UnpackLayer::unpackImpl() -{ - nnfw::cker::UnpackParams op_params; - op_params.axis = _axis; - op_params.num_split = _num_output; - - std::vector<nnfw::cker::Shape *> outputDimsPtr; - std::vector<nnfw::cker::Shape> outputDims; - outputDimsPtr.reserve(_num_output); - outputDims.reserve(_num_output); - - for (int32_t i = 0; i < _num_output; i++) - { - outputDims.push_back(getTensorShape(_outputs[i])); - outputDimsPtr.push_back(&outputDims[i]); - } - - std::vector<T *> outputPtrs; - - for (const auto output : _outputs) - { - outputPtrs.emplace_back(reinterpret_cast<T *>(output->buffer())); - } - - nnfw::cker::Unpack<T>(op_params, getTensorShape(_input), reinterpret_cast<T *>(_input->buffer()), - getTensorShape(_outputs[0]), outputPtrs.data()); -} - -void UnpackLayer::configure(const IPortableTensor *input, uint32_t axis, int32_t num, - std::vector<IPortableTensor *> &outputs) -{ - assert(input != nullptr); - assert(outputs.size() > 0); - assert(outputs.size() == (size_t)num); - - _input = input; - _axis = axis; - _num_output = num; - _outputs = outputs; -} - -void UnpackLayer::run() -{ - if (_input->data_type() == OperandType::FLOAT32) - unpackImpl<float>(); - else if (_input->data_type() == OperandType::INT32) - unpackImpl<int32_t>(); - else - throw std::runtime_error{"Unpack: Unsupported data type"}; -} - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert diff --git a/runtime/onert/backend/cpu/ops/UnpackLayer.h b/runtime/onert/backend/cpu/ops/UnpackLayer.h deleted file mode 100644 index a185b31a0..000000000 --- a/runtime/onert/backend/cpu/ops/UnpackLayer.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __ONERT_BACKEND_CPU_OPS_UNPACKLAYER_H__ -#define __ONERT_BACKEND_CPU_OPS_UNPACKLAYER_H__ - -#include <backend/IPortableTensor.h> - -#include <exec/IFunction.h> - -namespace onert -{ -namespace backend -{ -namespace cpu -{ -namespace ops -{ - -class UnpackLayer : public ::onert::exec::IFunction -{ -public: - UnpackLayer(); - -public: - void configure(const IPortableTensor *input, uint32_t axis, int32_t num_output, - std::vector<IPortableTensor *> &output); - void run() override; - -private: - template <typename T> void unpackImpl(); - -private: - const IPortableTensor *_input; - std::vector<IPortableTensor *> _outputs; - uint32_t _axis; - int32_t _num_output; -}; - -} // namespace ops -} // namespace cpu -} // namespace backend -} // namespace onert - -#endif // __ONERT_BACKEND_CPU_OPS_UNPACKLAYER_H__ |