diff options
Diffstat (limited to 'runtimes/neurun/backend/cpu/kernel/AddLayer.cc')
-rw-r--r-- | runtimes/neurun/backend/cpu/kernel/AddLayer.cc | 87 |
1 files changed, 87 insertions, 0 deletions
diff --git a/runtimes/neurun/backend/cpu/kernel/AddLayer.cc b/runtimes/neurun/backend/cpu/kernel/AddLayer.cc new file mode 100644 index 000000000..14e2afec8 --- /dev/null +++ b/runtimes/neurun/backend/cpu/kernel/AddLayer.cc @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "AddLayer.h" + +#include <cker/operation/Add.h> + +#include "OperationUtils.h" + +namespace neurun +{ +namespace backend +{ +namespace cpu +{ +namespace kernel +{ + +void AddLayer::addFloat32() +{ + float output_activation_min, output_activation_max; + CalculateActivationRangeFloat(_activation, &output_activation_min, &output_activation_max); + nnfw::cker::AddParam op_params; + op_params.float_activation_max = output_activation_max; + op_params.float_activation_min = output_activation_min; + + nnfw::cker::Add(op_params, convertShapeToCkerShape(_lhsShape), _lhsData.f, + convertShapeToCkerShape(_rhsShape), _rhsData.f, + convertShapeToCkerShape(_outputShape), _outputData.f); +} + +void AddLayer::addQuant8() +{ + int32_t output_activation_min, output_activation_max; + CalculateActivationRangeUint8(_activation, _outputShape, &output_activation_min, + &output_activation_max); + // nnfw::cker::AddParam op_params; + // op_params.quantized_activation_max = output_activation_max; + // op_params.quantized_activation_min = output_activation_min; + + // cker quant8 add is not implemented yet + throw std::runtime_error{"NYI"}; +} + +void AddLayer::configure(uint8_t *lhsData, const Shape &lhsShape, uint8_t *rhsData, + const Shape &rhsShape, const model::Activation activation, + uint8_t *outputData, const Shape &outputShape) +{ + _lhsData.u8 = lhsData; + _lhsShape = lhsShape; + _rhsData.u8 = rhsData; + _rhsShape = rhsShape; + _inputType = lhsShape.type; + _activation = activation; + _outputData.u8 = outputData; + _outputShape = outputShape; +} + +void AddLayer::run() +{ + if (_inputType == OperandType::FLOAT32) + { + addFloat32(); + } + else if (_inputType == OperandType::QUANT8_ASYMM) + { + addQuant8(); + } +} + +} // namespace kernel +} // namespace cpu +} // namespace backend +} // namespace neurun |