summaryrefslogtreecommitdiff
path: root/runtime/neurun/backend/cpu/kernel/ConcatLayer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/neurun/backend/cpu/kernel/ConcatLayer.cc')
-rw-r--r--runtime/neurun/backend/cpu/kernel/ConcatLayer.cc137
1 files changed, 137 insertions, 0 deletions
diff --git a/runtime/neurun/backend/cpu/kernel/ConcatLayer.cc b/runtime/neurun/backend/cpu/kernel/ConcatLayer.cc
new file mode 100644
index 000000000..471c9b3bb
--- /dev/null
+++ b/runtime/neurun/backend/cpu/kernel/ConcatLayer.cc
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConcatLayer.h"
+
+#include "OperationUtils.h"
+
+#include <cker/operation/Concatenation.h>
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+namespace kernel
+{
+
+ConcatLayer::ConcatLayer()
+ : _inputDataPtrs(), _outputData(), _axis(0), _inputDescriptors(), _outputDescr(),
+ _inputType(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
+void ConcatLayer::concatenationFloat32()
+{
+ uint32_t num_inputs = _inputDescriptors.size();
+
+ nnfw::cker::ConcatenationParams op_params;
+ op_params.axis = _axis;
+ op_params.inputs_count = num_inputs;
+
+ std::vector<nnfw::cker::Shape *> inputDimsPtr;
+ std::vector<nnfw::cker::Shape> inputDims;
+ inputDimsPtr.reserve(num_inputs);
+ inputDims.reserve(num_inputs);
+
+ for (uint32_t i = 0; i < num_inputs; i++)
+ {
+ inputDims.push_back(convertTensorDescriptorToCkerShape(_inputDescriptors[i]));
+ inputDimsPtr.push_back(&inputDims[i]);
+ }
+
+ std::vector<const float *> inputFloatPtrs;
+
+ for (auto ptr : _inputDataPtrs)
+ {
+ inputFloatPtrs.emplace_back(reinterpret_cast<const float *>(ptr));
+ }
+
+ nnfw::cker::Concatenation<float>(op_params, inputDimsPtr.data(), inputFloatPtrs.data(),
+ convertTensorDescriptorToCkerShape(_outputDescr), _outputData.f);
+}
+void ConcatLayer::concatenationQuant8()
+{
+ uint32_t num_inputs = _inputDescriptors.size();
+
+ std::vector<int32_t> input_zeropoints(num_inputs);
+ std::vector<float> input_scales(num_inputs);
+ for (uint32_t i = 0; i < num_inputs; i++)
+ {
+ input_zeropoints[i] = _inputDescriptors[i].offset;
+ input_scales[i] = _inputDescriptors[i].scale;
+ }
+
+ nnfw::cker::ConcatenationParams op_params;
+ op_params.axis = _axis;
+ op_params.inputs_count = num_inputs;
+ op_params.input_zeropoint = input_zeropoints.data();
+ op_params.input_scale = input_scales.data();
+ op_params.output_zeropoint = _outputDescr.offset;
+ op_params.output_scale = _outputDescr.scale;
+
+ std::vector<nnfw::cker::Shape *> inputDimsPtr;
+ std::vector<nnfw::cker::Shape> inputDims;
+ inputDimsPtr.reserve(num_inputs);
+ inputDims.reserve(num_inputs);
+ for (uint32_t i = 0; i < num_inputs; i++)
+ {
+ inputDims.push_back(convertTensorDescriptorToCkerShape(_inputDescriptors[i]));
+ inputDimsPtr.push_back(&inputDims[i]);
+ }
+
+ nnfw::cker::Concatenation<uint8_t>(op_params, inputDimsPtr.data(), _inputDataPtrs.data(),
+ convertTensorDescriptorToCkerShape(_outputDescr),
+ _outputData.u8);
+}
+
+void ConcatLayer::configure(const std::vector<const uint8_t *> &inputDataPtrs,
+ const std::vector<TensorDescriptor> &inputDescriptors, int32_t axis,
+ uint8_t *outputData, const TensorDescriptor outputDescr)
+{
+ _inputDataPtrs = inputDataPtrs;
+
+ for (auto inputDescr : inputDescriptors)
+ {
+ _inputDescriptors.emplace_back(inputDescr);
+ _inputType = inputDescr.type;
+ }
+
+ _axis = axis;
+
+ _outputData.u8 = outputData;
+ _outputDescr = outputDescr;
+}
+
+void ConcatLayer::run()
+{
+ if (_inputType == OperandType::FLOAT32)
+ {
+ concatenationFloat32();
+ }
+ else if (_inputType == OperandType::QUANT8_ASYMM)
+ {
+ concatenationQuant8();
+ }
+}
+
+} // namespace kernel
+} // namespace cpu
+} // namespace backend
+} // namespace neurun