summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/kernel/cpu/ConcatLayer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/kernel/cpu/ConcatLayer.cc')
-rw-r--r--runtimes/neurun/src/kernel/cpu/ConcatLayer.cc138
1 files changed, 0 insertions, 138 deletions
diff --git a/runtimes/neurun/src/kernel/cpu/ConcatLayer.cc b/runtimes/neurun/src/kernel/cpu/ConcatLayer.cc
deleted file mode 100644
index be093b437..000000000
--- a/runtimes/neurun/src/kernel/cpu/ConcatLayer.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ConcatLayer.h"
-
-#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
-#include "kernel/cpu/OperationUtils.h"
-
-namespace neurun
-{
-namespace kernel
-{
-
-namespace cpu
-{
-
-ConcatLayer::ConcatLayer()
- : _inputDataPtrs(), _outputData(nullptr), _axis(0), _inputShapes(), _outputShape(),
- _inputType(OperandType::SCALAR_FLOAT32)
-{
- // DO NOTHING
-}
-
-bool ConcatLayer::concatenationFloat32()
-{
- uint32_t num_inputs = _inputShapes.size();
-
- tflite::ConcatenationParams op_params;
- op_params.axis = _axis;
- op_params.inputs_count = num_inputs;
-
- std::vector<::tflite::RuntimeShape *> inputDimsPtr;
- std::vector<::tflite::RuntimeShape> inputDims;
- inputDimsPtr.reserve(num_inputs);
- inputDims.reserve(num_inputs);
-
- for (uint32_t i = 0; i < num_inputs; i++)
- {
- inputDims.push_back(convertShapeToTFLiteShape(_inputShapes[i]));
- inputDimsPtr.push_back(&inputDims[i]);
- }
-
- std::vector<const float *> inputFloatPtrs;
-
- for (auto ptr : _inputDataPtrs)
- {
- inputFloatPtrs.emplace_back(reinterpret_cast<const float *>(ptr));
- }
-
- ::tflite::optimized_ops::Concatenation<float>(
- op_params, inputDimsPtr.data(), inputFloatPtrs.data(),
- convertShapeToTFLiteShape(_outputShape), reinterpret_cast<float *>(_outputData));
- return true;
-}
-bool ConcatLayer::concatenationQuant8()
-{
- int num_inputs = _inputShapes.size();
-
- std::vector<int32_t> input_zeropoints(num_inputs);
- std::vector<float> input_scales(num_inputs);
- for (uint32_t i = 0; i < num_inputs; i++)
- {
- input_zeropoints[i] = _inputShapes[i].offset;
- input_scales[i] = _inputShapes[i].scale;
- }
-
- tflite::ConcatenationParams op_params;
- op_params.axis = _axis;
- op_params.inputs_count = num_inputs;
- op_params.input_zeropoint = input_zeropoints.data();
- op_params.input_scale = input_scales.data();
- op_params.output_zeropoint = _outputShape.offset;
- op_params.output_scale = _outputShape.scale;
-
- std::vector<::tflite::RuntimeShape *> inputDimsPtr;
- std::vector<::tflite::RuntimeShape> inputDims;
- inputDimsPtr.reserve(num_inputs);
- inputDims.reserve(num_inputs);
- for (uint32_t i = 0; i < num_inputs; i++)
- {
- inputDims.push_back(convertShapeToTFLiteShape(_inputShapes[i]));
- inputDimsPtr.push_back(&inputDims[i]);
- }
-
- ::tflite::optimized_ops::Concatenation<uint8_t>(
- op_params, inputDimsPtr.data(), _inputDataPtrs.data(),
- convertShapeToTFLiteShape(_outputShape), _outputData);
- return true;
-}
-
-void ConcatLayer::configure(const std::vector<const uint8_t *> &inputDataPtrs,
- const std::vector<Shape> &inputShapes, int32_t axis,
- uint8_t *outputData, const Shape outputShape)
-{
- _inputDataPtrs = inputDataPtrs;
-
- for (auto shape : inputShapes)
- {
- _inputShapes.emplace_back(shape);
- _inputType = shape.type;
- }
-
- _axis = axis;
-
- _outputData = outputData;
- _outputShape = outputShape;
-}
-
-void ConcatLayer::run()
-{
- if (_inputType == OperandType::TENSOR_FLOAT32)
- {
- concatenationFloat32();
- }
- else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
- {
- throw std::runtime_error{"ConcatLayer : Not tested for TENSOR_QUANT8_ASYMM"};
- // concatenationQuant8();
- }
-}
-
-} // namespace cpu
-} // namespace kernel
-} // namespace neurun