summaryrefslogtreecommitdiff
path: root/runtime/neurun/backend/acl_neon/ConstantInitializer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/neurun/backend/acl_neon/ConstantInitializer.cc')
-rw-r--r--runtime/neurun/backend/acl_neon/ConstantInitializer.cc246
1 files changed, 0 insertions, 246 deletions
diff --git a/runtime/neurun/backend/acl_neon/ConstantInitializer.cc b/runtime/neurun/backend/acl_neon/ConstantInitializer.cc
deleted file mode 100644
index 9a74bda29..000000000
--- a/runtime/neurun/backend/acl_neon/ConstantInitializer.cc
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ConstantInitializer.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-ConstantInitializer::ConstantInitializer(const ir::Operands &operands,
- const std::shared_ptr<TensorBuilder> &tensor_builder)
- : _operands{operands}, _tensor_builder{tensor_builder}
-{
- // DO NOTHING
-}
-
-void ConstantInitializer::visit(const ir::operation::BatchToSpaceND &node)
-{
- const auto &block_size_index = node.getInputs().at(ir::operation::BatchToSpaceND::BLOCK_SIZE);
- const auto &block_size_obj = _operands.at(block_size_index);
-
- if (block_size_obj.isConstant())
- {
- _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) {
- const auto &shape = model_obj.shape();
- const auto base = reinterpret_cast<const int32_t *>(model_obj.data().base());
- assert(model_obj.shape().rank() == 1);
- obj.access([&](::neurun::backend::operand::ITensor &tensor) {
- for (size_t i = 0; i < shape.num_elements(); ++i)
- {
- const int32_t value = base[shape.num_elements() - i - 1];
- int32_t *into = reinterpret_cast<int32_t *>(tensor.buffer() +
- tensor.calcOffset({static_cast<int32_t>(i)}));
- *into = value;
- }
- });
- };
- }
-}
-
-void ConstantInitializer::visit(const ir::operation::Conv2D &node)
-{
- const auto &kernel_index = node.getInputs().at(ir::operation::Conv2D::KERNEL);
- const auto &kernel_obj = _operands.at(kernel_index);
- registerPermuteInitializer(kernel_index, kernel_obj);
-
- const auto &bias_index = node.getInputs().at(ir::operation::Conv2D::BIAS);
- const auto &bias_obj = _operands.at(bias_index);
- registerCopyInitializer(bias_index, bias_obj);
-}
-
-void ConstantInitializer::visit(const ir::operation::DepthwiseConv2D &node)
-{
- const auto &kernel_index = node.getInputs().at(ir::operation::DepthwiseConv2D::KERNEL);
- const auto &kernel_obj = _operands.at(kernel_index);
- registerPermuteInitializer(kernel_index, kernel_obj);
-
- const auto &bias_index = node.getInputs().at(ir::operation::DepthwiseConv2D::BIAS);
- const auto &bias_obj = _operands.at(bias_index);
- registerCopyInitializer(bias_index, bias_obj);
-}
-
-void ConstantInitializer::visit(const ir::operation::FullyConnected &node)
-{
- const auto &weight_index = node.getInputs().at(ir::operation::FullyConnected::WEIGHT);
- const auto &weight_obj = _operands.at(weight_index);
- registerCopyInitializer(weight_index, weight_obj);
-
- const auto &bias_index = node.getInputs().at(ir::operation::FullyConnected::BIAS);
- const auto &bias_obj = _operands.at(bias_index);
- registerCopyInitializer(bias_index, bias_obj);
-}
-
-void ConstantInitializer::visit(const ir::operation::LSTM &node)
-{
- const auto &input_to_input_weights_index =
- node.getInputs().at(ir::operation::LSTM::INPUT_TO_INPUT_WEIGHTS);
- const auto &input_to_input_weights_obj = _operands.at(input_to_input_weights_index);
- registerCopyInitializer(input_to_input_weights_index, input_to_input_weights_obj);
-
- const auto &input_to_forget_weights_index =
- node.getInputs().at(ir::operation::LSTM::INPUT_TO_FORGET_WEIGHTS);
- const auto &input_to_forget_weights_obj = _operands.at(input_to_forget_weights_index);
- registerCopyInitializer(input_to_forget_weights_index, input_to_forget_weights_obj);
-
- const auto &input_to_cell_weights_index =
- node.getInputs().at(ir::operation::LSTM::INPUT_TO_CELL_WEIGHTS);
- const auto &input_to_cell_weights_obj = _operands.at(input_to_cell_weights_index);
- registerCopyInitializer(input_to_cell_weights_index, input_to_cell_weights_obj);
-
- const auto &input_to_output_weights_index =
- node.getInputs().at(ir::operation::LSTM::INPUT_TO_OUTPUT_WEIGHTS);
- const auto &input_to_output_weights_obj = _operands.at(input_to_output_weights_index);
- registerCopyInitializer(input_to_output_weights_index, input_to_output_weights_obj);
-
- const auto &recurrent_to_input_weights_index =
- node.getInputs().at(ir::operation::LSTM::RECURRENT_TO_INPUT_WEIGHTS);
- const auto &recurrent_to_input_weights_obj = _operands.at(recurrent_to_input_weights_index);
- registerCopyInitializer(recurrent_to_input_weights_index, recurrent_to_input_weights_obj);
-
- const auto &recurrent_to_forget_weights_index =
- node.getInputs().at(ir::operation::LSTM::RECURRENT_TO_FORGET_WEIGHTS);
- const auto &recurrent_to_forget_weights_obj = _operands.at(recurrent_to_forget_weights_index);
- registerCopyInitializer(recurrent_to_forget_weights_index, recurrent_to_forget_weights_obj);
-
- const auto &recurrent_to_cell_weights_index =
- node.getInputs().at(ir::operation::LSTM::RECURRENT_TO_CELL_WEIGHTS);
- const auto &recurrent_to_cell_weights_obj = _operands.at(recurrent_to_cell_weights_index);
- registerCopyInitializer(recurrent_to_cell_weights_index, recurrent_to_cell_weights_obj);
-
- const auto &recurrent_to_output_weights_index =
- node.getInputs().at(ir::operation::LSTM::RECURRENT_TO_OUTPUT_WEIGHTS);
- const auto &recurrent_to_output_weights_obj = _operands.at(recurrent_to_output_weights_index);
- registerCopyInitializer(recurrent_to_output_weights_index, recurrent_to_output_weights_obj);
-
- const auto &cell_to_input_weights_index =
- node.getInputs().at(ir::operation::LSTM::CELL_TO_INPUT_WEIGHTS);
- const auto &cell_to_input_weights_obj = _operands.at(cell_to_input_weights_index);
- registerCopyInitializer(cell_to_input_weights_index, cell_to_input_weights_obj);
-
- const auto &cell_to_forget_weights_index =
- node.getInputs().at(ir::operation::LSTM::CELL_TO_FORGET_WEIGHTS);
- const auto &cell_to_forget_weights_obj = _operands.at(cell_to_forget_weights_index);
- registerCopyInitializer(cell_to_forget_weights_index, cell_to_forget_weights_obj);
-
- const auto &cell_to_output_weights_index =
- node.getInputs().at(ir::operation::LSTM::CELL_TO_OUTPUT_WEIGHTS);
- const auto &cell_to_output_weights_obj = _operands.at(cell_to_output_weights_index);
- registerCopyInitializer(cell_to_output_weights_index, cell_to_output_weights_obj);
-
- const auto &input_gate_bias_index = node.getInputs().at(ir::operation::LSTM::INPUT_GATE_BIAS);
- const auto &input_gate_bias_obj = _operands.at(input_gate_bias_index);
- registerCopyInitializer(input_gate_bias_index, input_gate_bias_obj);
-
- const auto &forget_gate_bias_index = node.getInputs().at(ir::operation::LSTM::FORGET_GATE_BIAS);
- const auto &forget_gate_bias_obj = _operands.at(forget_gate_bias_index);
- registerCopyInitializer(forget_gate_bias_index, forget_gate_bias_obj);
-
- const auto &output_gate_bias_index = node.getInputs().at(ir::operation::LSTM::OUTPUT_GATE_BIAS);
- const auto &output_gate_bias_obj = _operands.at(output_gate_bias_index);
- registerCopyInitializer(output_gate_bias_index, output_gate_bias_obj);
-
- const auto &projection_weights_index =
- node.getInputs().at(ir::operation::LSTM::PROJECTION_WEIGHTS);
- const auto &projection_weights_obj = _operands.at(projection_weights_index);
- registerCopyInitializer(projection_weights_index, projection_weights_obj);
-
- const auto &projection_bias_index = node.getInputs().at(ir::operation::LSTM::PROJECTION_BIAS);
- const auto &projection_bias_obj = _operands.at(projection_bias_index);
- registerCopyInitializer(projection_bias_index, projection_bias_obj);
-}
-
-void ConstantInitializer::visit(const ir::operation::RNN &node)
-{
- const auto &weights_index = node.getInputs().at(ir::operation::RNN::WEIGHTS);
- const auto &weights_obj = _operands.at(weights_index);
- registerCopyInitializer(weights_index, weights_obj);
-
- const auto &recurrent_weights_index = node.getInputs().at(ir::operation::RNN::RECURRENT_WEIGHTS);
- const auto &recurrent_weights_obj = _operands.at(recurrent_weights_index);
- registerCopyInitializer(recurrent_weights_index, recurrent_weights_obj);
-
- const auto &bias_index = node.getInputs().at(ir::operation::RNN::BIAS);
- const auto &bias_obj = _operands.at(bias_index);
- registerCopyInitializer(bias_index, bias_obj);
-}
-
-void ConstantInitializer::visit(const ir::operation::SpaceToBatchND &node)
-{
- const auto &block_size_index = node.getInputs().at(ir::operation::SpaceToBatchND::BLOCK_SIZE);
- const auto &block_size_obj = _operands.at(block_size_index);
-
- if (block_size_obj.isConstant())
- {
- _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) {
- const auto &shape = model_obj.shape();
- const auto base = reinterpret_cast<const int32_t *>(model_obj.data().base());
- assert(model_obj.shape().rank() == 1);
- obj.access([&](::neurun::backend::operand::ITensor &tensor) {
- for (size_t i = 0; i < shape.num_elements(); ++i)
- {
- const int32_t value = base[shape.num_elements() - i - 1];
- int32_t *into = reinterpret_cast<int32_t *>(tensor.buffer() +
- tensor.calcOffset({static_cast<int32_t>(i)}));
- *into = value;
- }
- });
- };
- }
-
- const auto &paddings_index = node.getInputs().at(ir::operation::SpaceToBatchND::PADDINGS);
- const auto &paddings_obj = _operands.at(paddings_index);
- if (paddings_obj.isConstant())
- {
- _init_map[paddings_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) {
- const auto &shape = model_obj.shape();
- const auto base = reinterpret_cast<const int32_t *>(model_obj.data().base());
- assert(model_obj.shape().rank() == 2);
- assert(shape.dim(0) == 2);
- assert(shape.dim(1) == 2);
- obj.access([&](::neurun::backend::operand::ITensor &tensor) {
- for (auto i = 0; i < shape.dim(0); ++i)
- {
- for (auto j = 0; j < shape.dim(1); ++j)
- {
- const int32_t value = base[i * 2 + j];
- int32_t *into = reinterpret_cast<int32_t *>(
- // The coordinates of NETensor are different from the coordiantes of CLTensor in
- // this operand.
- // NEON : {j, reversed i}
- // CL : {reversed i, j}
- tensor.buffer() + tensor.calcOffset({j, shape.dim(0) - i - 1}));
- *into = value;
- }
- }
- });
- };
- }
-}
-
-void ConstantInitializer::visit(const ir::operation::TransposeConv &node)
-{
- const auto &kernel_index = node.getInputs().at(ir::operation::TransposeConv::KERNEL);
- const auto &kernel_obj = _operands.at(kernel_index);
- registerPermuteInitializer(kernel_index, kernel_obj);
-}
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace neurun