summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/linear/Linear.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/linear/Linear.cc')
-rw-r--r--runtimes/neurun/src/linear/Linear.cc199
1 files changed, 0 insertions, 199 deletions
diff --git a/runtimes/neurun/src/linear/Linear.cc b/runtimes/neurun/src/linear/Linear.cc
deleted file mode 100644
index 6452bbd49..000000000
--- a/runtimes/neurun/src/linear/Linear.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-
-#include "Linear.h"
-
-#include "graph/Graph.h"
-
-#include "graph/operation/LowerInfo.h"
-#include "backend/interface/IStageGenerator.h"
-#include "backend/interface/IConfig.h"
-#include "compiler/SubTensorInfo.h"
-#include "compiler/TensorInfo.h"
-
-#include "util/logging.h"
-
-namespace neurun
-{
-namespace linear
-{
-
-Linear::Linear(const graph::Graph &graph) : _graph(graph)
-{
- // Linearize with topological sort
- //
- // Topological sort algorithm
- // 1. Iterate with DFS
- // 2. Append the node to vector when DFS for the node finishes(post order)
- // 3. Reverse the order of nodes
-
- graph::Graph::PostDfsConstIterator().iterate(
- graph, [&](const model::operation::Index &index, const model::operation::Node &node) {
- const auto lower_info = graph.getLowerInfo(index);
- _operations.emplace_back(&node, lower_info);
- });
-
- std::reverse(std::begin(_operations), std::end(_operations));
-}
-
-void Linear::accept(model::operation::NodeVisitor &&visitor) const
-{
- for (const auto op : _operations)
- {
- op.node->accept(std::move(visitor));
- }
-}
-
-backend::TensorBuilderSet Linear::planTensors()
-{
- using ITensorBuilderPtr = std::shared_ptr<backend::ITensorBuilder>;
- using FnOnTensorBuilder =
- std::function<void(const model::operand::Index &ind, ITensorBuilderPtr)>;
-
- const auto &operands = _graph.operands();
- auto iterTensorBuilders = [&operands](const model::operand::Index &ind, FnOnTensorBuilder fn) {
- const auto &obj = operands.at(ind);
- for (auto backend : obj.lower_info()->def_backends())
- {
- auto tensor_builder = backend->tensor_builder();
- fn(ind, tensor_builder);
- }
- };
-
- backend::TensorBuilderSet tensor_builders;
-
- std::unordered_map<model::operand::Index, uint32_t> uses_map;
- std::vector<model::operand::Index> constants;
-
- _graph.operands().iterate(
- [&](const model::operand::Index &ind, const model::operand::Object &obj) {
- uses_map[ind] = obj.getUses().size();
-
- // If a tensor is a constant, increase the use of the tensor.
- // It makes the tensor not be dealloced.
- if (obj.getUsage() == model::operand::OperandUsage::CONSTANT)
- {
- constants.push_back(ind);
- uses_map[ind]++;
- }
-
- for (auto backend : obj.lower_info()->def_backends())
- {
- bool isSubTensor = false;
- auto tensor_builder = backend->tensor_builder();
-
- if (backend->config()->SupportSubTensorAlloc())
- {
- const auto parentInfo = obj.parent_info();
- if (parentInfo != nullptr)
- {
- isSubTensor = true;
- }
- }
-
- if (isSubTensor)
- {
- const compiler::SubTensorInfo info(obj);
- tensor_builder->registerSubTensorInfo(ind, info);
- }
- else
- {
- const auto info = compiler::TensorInfo(obj.shape(), obj.typeInfo());
- tensor_builder->registerTensorInfo(ind, info);
- }
-
- // Prepare tensor builders to be returned
- tensor_builders.insert(tensor_builder);
- }
- });
-
- // If a tensor is model output, increase the use of the tensor.
- // This aim is same to above one.
- for (const auto &ind : _graph.getOutputs())
- {
- uses_map[ind]++;
- }
-
- // Allocate constant operands first
- VERBOSE(LINEAR) << "TENSORS as CONSTANT" << std::endl;
- for (const auto &ind : constants)
- {
- iterTensorBuilders(ind, [](const model::operand::Index &ind, ITensorBuilderPtr tensor_builder) {
- tensor_builder->notifyFirstUse(ind);
- });
- }
-
- // Allocate Model's inputs
- VERBOSE(LINEAR) << "TENSORS as MODEL INPUT" << std::endl;
- for (const auto &ind : _graph.getInputs())
- {
- iterTensorBuilders(ind, [](const model::operand::Index &ind, ITensorBuilderPtr tensor_builder) {
- tensor_builder->notifyFirstUse(ind);
- });
- }
-
- // At each operation,
- // 1. Scan USE of inputs. Decrease the USE and deallocate if the USE is 0
- // 2. Scan DEF of outputs. If the DEF, allocate it
- VERBOSE(LINEAR) << "TENSORS" << std::endl;
- for (const auto op : _operations)
- {
- for (const auto &ind : op.node->getOutputs())
- {
- const auto &obj = operands.at(ind);
- if (obj.getDef().size())
- {
- iterTensorBuilders(ind,
- [](const model::operand::Index &ind, ITensorBuilderPtr tensor_builder) {
- tensor_builder->notifyFirstUse(ind);
- });
- }
- }
-
- for (const auto &ind : op.node->getInputs())
- {
- uses_map[ind]--;
- if (uses_map[ind] == 0)
- {
- iterTensorBuilders(ind,
- [](const model::operand::Index &ind, ITensorBuilderPtr tensor_builder) {
- tensor_builder->notifyLastUse(ind);
- });
- }
- }
- }
-
- // Now, model outputs should be not deallocated
- assert(std::all_of(_graph.getOutputs().begin(), _graph.getOutputs().end(),
- [&uses_map](const model::operand::Index &ind) { return uses_map[ind] > 0; }));
-
- // Set subtensor information
- // Todo: move this phase outside as optimization phase
- return tensor_builders;
-}
-
-void Linear::iterate(const std::function<void(const Element &element)> &fn) const
-{
- for (const auto op : _operations)
- {
- fn(op);
- }
-}
-
-} // namespace linear
-} // namespace neurun