summaryrefslogtreecommitdiff
path: root/runtimes/neurun/backend/acl_neon/ShapeFixer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/backend/acl_neon/ShapeFixer.cc')
-rw-r--r--runtimes/neurun/backend/acl_neon/ShapeFixer.cc332
1 files changed, 332 insertions, 0 deletions
diff --git a/runtimes/neurun/backend/acl_neon/ShapeFixer.cc b/runtimes/neurun/backend/acl_neon/ShapeFixer.cc
new file mode 100644
index 000000000..e7cdbea4c
--- /dev/null
+++ b/runtimes/neurun/backend/acl_neon/ShapeFixer.cc
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ShapeFixer.h"
+
+#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEArithmeticAddition.h>
+#include <arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h>
+#include <arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h>
+#include <arm_compute/runtime/NEON/functions/NEPoolingLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEActivationLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEConvolutionLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEFullyConnectedReshapingLayer.h>
+
+#include <Convert.h>
+#include <Swizzle.h>
+
+#include "kernel/ConcatLayer.h"
+#include "util/Padding.h"
+#include "model/Index.h"
+#include "model/DataType.h"
+#include "model/InternalType.h"
+#include "compiler/IExecutionBuilder.h"
+#include "exec/NopFunction.h"
+#include "util/logging.h"
+#include "util/Utils.h"
+
+using ::neurun::compiler::IExecutionBuilder;
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_neon
+{
+
+using ::neurun::backend::acl_common::asAclFunction;
+
+ShapeFixer::ShapeFixer(const neurun::model::Operands &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder)
+ : _ctx(ctx), _tensor_builder(tensor_builder)
+{
+ assert(tensor_builder);
+}
+
+void ShapeFixer::visit(const model::operation::AbsNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ArgMaxNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::Conv2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::DepthwiseConv2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::DequantizeNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::MaxPool2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::MeanNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::AvgPool2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ConcatNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ _tensor_builder->dimCorrection(ofm_index, false);
+ for (const auto &inputs : node.getInputs())
+ _tensor_builder->dimCorrection(inputs, false);
+}
+
+void ShapeFixer::visit(const model::operation::ExpNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::FloorNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::FullyConnectedNode &node)
+{
+ using model::operation::FullyConnectedNode;
+ const auto input_index{node.getInputs().at(FullyConnectedNode::Input::INPUT)};
+ const auto input_rank = _ctx.at(input_index).shape().rank();
+ // TODO Currently we are not handling where the case is that the input's rank is 3.
+ // The handling should be added in the future.
+ assert(input_rank != 3);
+ // Check for reshaping input's shape into rank-2
+ if (input_rank == 4)
+ _tensor_builder->dimCorrection(input_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::L2NormalizationNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::L2Pool2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::LocalResponseNormalizationNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::LogicalAndNode &node)
+{
+ const auto input0_index{node.getInputs().at(model::operation::LogicalAndNode::Input::INPUT0)};
+ const auto input1_index{node.getInputs().at(model::operation::LogicalAndNode::Input::INPUT1)};
+
+ if (!(_ctx.at(input0_index).shape() == _ctx.at(input1_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(input0_index).shape().rank(), _ctx.at(input1_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::LogicalNotNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::LogicalOrNode &node)
+{
+ const auto input0_index{node.getInputs().at(model::operation::LogicalOrNode::Input::INPUT0)};
+ const auto input1_index{node.getInputs().at(model::operation::LogicalOrNode::Input::INPUT1)};
+
+ if (!(_ctx.at(input0_index).shape() == _ctx.at(input1_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(input0_index).shape().rank(), _ctx.at(input1_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::LogisticNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::LSTMNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::PadNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::PadNode::Input::INPUT)};
+ const auto output_index{node.getOutputs().at(0)};
+ _tensor_builder->dimCorrection(input_index, false);
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::MulNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::MulNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::MulNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::NegNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::PReLUNode &node)
+{
+ const auto ifm_index{node.getInputs().at(model::operation::PReLUNode::Input::INPUT)};
+ const auto alpha_index{node.getInputs().at(model::operation::PReLUNode::Input::ALPHA)};
+
+ if (!(_ctx.at(ifm_index).shape() == _ctx.at(alpha_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(ifm_index).shape().rank(), _ctx.at(alpha_index).shape().rank());
+ const_cast<::neurun::model::Shape &>(_ctx.at(ifm_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(alpha_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::ReduceSumNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ReLUNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ReLU1Node &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ReLU6Node &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ReshapeNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::ReshapeNode::Input::INPUT)};
+
+ _tensor_builder->dimCorrection(input_index, false);
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::ResizeBilinearNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::RNNNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ComparisonNode &node)
+{
+ const auto input0_index{node.getInputs().at(model::operation::ComparisonNode::Input::INPUT0)};
+ const auto input1_index{node.getInputs().at(model::operation::ComparisonNode::Input::INPUT1)};
+
+ if (!(_ctx.at(input0_index).shape() == _ctx.at(input1_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(input0_index).shape().rank(), _ctx.at(input1_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::RSQRTNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SqueezeNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::SqueezeNode::Input::INPUT)};
+ _tensor_builder->dimCorrection(input_index, false);
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::TanhNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::StridedSliceNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SoftmaxNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SplitNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::SplitNode::Input::INPUT)};
+ _tensor_builder->dimCorrection(input_index, false);
+ for (const auto &output : node.getOutputs())
+ _tensor_builder->dimCorrection(output, false);
+}
+
+void ShapeFixer::visit(const model::operation::SQRTNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SquaredDifferenceNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::SquaredDifferenceNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::SquaredDifferenceNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::SubNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::SubNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::SubNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::TransposeConvNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::TransposeNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::UnpackNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::UnpackNode::Input::INPUT)};
+ _tensor_builder->dimCorrection(input_index, false);
+ for (const auto &output_index : node.getOutputs())
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::AddNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::AddNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::AddNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+ const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::DivNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::DivNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::DivNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+} // namespace acl_neon
+} // namespace backend
+} // namespace neurun