summaryrefslogtreecommitdiff
path: root/runtimes/neurun/core/src/graph/Graph.cc
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-03-05 15:10:09 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-03-05 15:22:53 +0900
commitd91a039e0eda6fd70dcd22672b8ce1817c1ca50e (patch)
tree62668ec548cf31fadbbf4e99522999ad13434a25 /runtimes/neurun/core/src/graph/Graph.cc
parentbd11b24234d7d43dfe05a81c520aa01ffad06e42 (diff)
downloadnnfw-d91a039e0eda6fd70dcd22672b8ce1817c1ca50e.tar.gz
nnfw-d91a039e0eda6fd70dcd22672b8ce1817c1ca50e.tar.bz2
nnfw-d91a039e0eda6fd70dcd22672b8ce1817c1ca50e.zip
catch up to tizen_5.5 and remove unness dir
- update to tizen_5.5 - remove dirs
Diffstat (limited to 'runtimes/neurun/core/src/graph/Graph.cc')
-rw-r--r--runtimes/neurun/core/src/graph/Graph.cc589
1 files changed, 589 insertions, 0 deletions
diff --git a/runtimes/neurun/core/src/graph/Graph.cc b/runtimes/neurun/core/src/graph/Graph.cc
new file mode 100644
index 000000000..4264b1a8a
--- /dev/null
+++ b/runtimes/neurun/core/src/graph/Graph.cc
@@ -0,0 +1,589 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph/Graph.h"
+
+#include <algorithm>
+#include <bitset>
+
+#include "util/logging.h"
+#include "verifier/Verifier.h"
+#include "cpp14/memory.h"
+#include "compiler/Linear.h"
+#include "graph/operation/LowerInfo.h"
+#include "graph/operand/LowerInfo.h"
+#include "graph/operand/PermuteFactor.h"
+#include "operand/Shape4DConvert.h"
+#include "compiler/BackendResolver.h"
+#include "backend/IConfig.h"
+#include "pass/PermutationInsertionPass.h"
+#include "pass/PermutationEliminationPass.h"
+
+namespace neurun
+{
+namespace graph
+{
+
+Graph::Graph(std::unique_ptr<model::Model> &&model) : _model{std::move(model)}
+{
+ // DO NOTHING
+}
+
+Graph::~Graph(void) = default;
+
+model::OperandIndex Graph::addOperand(const model::Shape &shape, const model::TypeInfo &type)
+{
+ return _model->operands.emplace(shape, type);
+}
+
+model::OperationIndex Graph::addOperation(std::unique_ptr<model::Operation> &&node)
+{
+ assert(isBuildingPhase());
+ return _model->operations.push(std::move(node));
+}
+
+void Graph::setOperandValue(const model::OperandIndex &ind, std::unique_ptr<model::Data> &&data)
+{
+ assert(isBuildingPhase());
+ assert(_model->operands.exist(ind));
+ _model->operands.at(ind).data(std::move(data));
+}
+
+void Graph::addInput(const model::OperandIndex &ind)
+{
+ assert(isBuildingPhase());
+ _model->inputs.append(ind);
+}
+
+void Graph::addOutput(const model::OperandIndex &ind)
+{
+ assert(isBuildingPhase());
+ _model->outputs.append(ind);
+}
+
+void Graph::finishBuilding(void)
+{
+ assert(isBuildingPhase());
+ _phase = Phase::MODEL;
+
+ // Initialize operand use-def
+ initializeUseDef();
+
+ // Call graph verifications for the MODEL phase
+ {
+ assert(verifier::DAGChecker().verify(*this));
+ assert(verifier::EdgeConsistencyChecker().verify(*this));
+ }
+}
+
+void Graph::lower(void)
+{
+ assert(_phase == Phase::MODEL);
+
+ _subgraphs = nnfw::cpp14::make_unique<model::Subgraphs>();
+ bool is_profiling = util::getConfigBool(util::config::PROFILING_MODE);
+
+ // Lower
+ {
+ // operand::LowerInfo holder
+ model::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> operands_lower_info;
+
+ _model->operands.iterate([&](const model::OperandIndex &index, const model::Operand &object) {
+ operands_lower_info[index] =
+ nnfw::cpp14::make_unique<operand::LowerInfo>(graph::operand::asShape4D(object.shape()));
+ });
+
+ _lower_info_map = nnfw::cpp14::make_unique<LowerInfoMap>();
+
+ // Are they mergeable?
+ // 1. the same backend id and layout?
+ // 2. if 1 is true, the subg and a node are connected?
+ auto mergeable = [&](const model::SubgraphIndex &subg_index,
+ const model::OperationIndex &node_index, model::Layout layout) {
+ const auto &subg = _subgraphs->at(subg_index);
+ const auto &node = _model->operations.at(node_index);
+
+ // The same backend id and layout?
+ {
+ const auto subg_backend_layout = getLowerInfo(subg_index)->layout();
+ const auto &subg_backend_id = getLowerInfo(subg_index)->backend()->config()->id();
+ const auto &node_backend_id = _backend_resolver->getBackend(node_index)->config()->id();
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " { " << subg_backend_id << "("
+ << model::to_string(subg_backend_layout) << ") } "
+ << " NODE#" << node_index.value() << " (" << node.getName() << ") { "
+ << node_backend_id << "(" << model::to_string(layout) << ") } " << std::endl;
+ if (subg_backend_id != node_backend_id || subg_backend_layout != layout)
+ return false;
+ }
+
+ // Connected?
+ // an input of one node is an output of the other node? or vice-versa?
+ {
+ const auto &node_inputs = node.getInputs();
+ const auto &node_outputs = node.getOutputs();
+
+ // subg's operations are in order so that we just check the first and the last
+ std::vector<model::Element> subg_ops{subg.operations()[0]};
+ if (subg.operations().size() > 1)
+ subg_ops.emplace_back(subg.operations()[subg.operations().size() - 1]);
+
+ for (const auto &elem : subg_ops)
+ {
+ const auto &n_index = elem.index;
+ const auto &n = *elem.node;
+
+ // node's output == subg's input?
+ const auto &n_inputs = n.getInputs();
+ for (auto input : n_inputs)
+ {
+ if (node_outputs.contains(input))
+ {
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " 's NODE#" << n_index.value()
+ << "(" << n.getName() << ") is connected to NODE#"
+ << node_index.value() << "(" << node.getName() << ")" << std::endl;
+ return true;
+ }
+ }
+
+ // node's input == subg's output?
+ const auto &n_outputs = n.getOutputs();
+ for (auto output : n_outputs)
+ {
+ if (node_inputs.contains(output))
+ {
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " 's NODE#" << n_index.value()
+ << " (" << n.getName() << ") is connected to NODE#"
+ << node_index.value() << std::endl;
+ return true;
+ }
+ }
+ }
+
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " is not connected to NODE#"
+ << node_index.value() << "(" << node.getName() << ")" << std::endl;
+ }
+
+ return false;
+ };
+
+ // Create a fresh subgraph with one operation, and append it to subgraphs
+ auto append_fresh_single_op_subgraph = [&](const model::OperationIndex &node_index,
+ const model::Operation &node, model::Layout layout) {
+ // Create a fresh subgraph
+ auto subg = nnfw::cpp14::make_unique<model::Subgraph>(layout);
+
+ // Add an operation
+ subg->appendOperation(node_index, node);
+
+ // Update input/output
+ subg->setOutputs(node.getOutputs());
+ subg->setInputs(node.getInputs());
+
+ return _subgraphs->emplace(std::move(subg));
+ };
+
+ model::Subgraph *subg = nullptr;
+ model::SubgraphIndex subg_index;
+
+ // Make subgraphs while checking whether a node can be merged into a subgraph.
+ // NOTE: The below method appends nodes while making one subgraph if needed. If something better
+ // ways, happy to update this code.
+ Graph::PostDfsConstIterator().iterate(*this, [&](const model::OperationIndex &node_index,
+ const model::Operation &node) {
+ // LowerInfo for in/output operands
+ auto backend = _backend_resolver->getBackend(node_index);
+ // TODO How to get layout of this node from IR
+ auto frontend_layout = model::Layout::NHWC;
+ auto backend_layout = frontend_layout;
+ const std::string acl_layout_str = util::getConfigString(util::config::ACL_LAYOUT);
+ if (acl_layout_str == "NHWC")
+ {
+ backend_layout = model::Layout::NHWC;
+ }
+ else if (acl_layout_str == "NCHW")
+ {
+ backend_layout = model::Layout::NCHW;
+ }
+
+ // CPU supports only NHWC now
+ if (backend->config()->id() == "cpu")
+ {
+ backend_layout = model::Layout::NHWC;
+ }
+
+ for (auto operand : node.getInputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ lower_info->addUsePermuteFactor(operand::PermuteFactor{backend, backend_layout});
+ }
+ for (auto operand : node.getOutputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ lower_info->addDefPermuteFactor(operand::PermuteFactor{backend, backend_layout});
+ }
+ /*for profiling each subgraph must contain just one node,
+ so that we can measure a node separately*/
+ if (!subg || is_profiling || !mergeable(subg_index, node_index, backend_layout))
+ {
+ auto new_subg_index = append_fresh_single_op_subgraph(node_index, node, frontend_layout);
+
+ // Subgraph LowerInfo
+ setLowerInfo(new_subg_index, nnfw::cpp14::make_unique<graph::operation::LowerInfo>(
+ backend, backend_layout));
+
+ subg_index = new_subg_index;
+ subg = &(_subgraphs->at(new_subg_index));
+
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " is created for "
+ << "NODE#" << node_index.value() << "(" << node.getName() << ")"
+ << std::endl;
+ }
+ else
+ {
+ subg->appendOperation(node_index, node);
+ subg->setInputs(node.getInputs());
+
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " merges "
+ << "NODE#" << node_index.value() << "(" << node.getName() << ")"
+ << std::endl;
+ }
+
+ bool finish = false;
+ {
+ size_t prev_op_cnt = 0;
+ for (auto input : node.getInputs())
+ {
+ // only valid_inputs
+ const auto &operand = _model->operands.at(input);
+ if (operand.isConstant())
+ continue;
+
+ // This operand is input of operation, not weight or bias
+ if (operand.getDef().list().size() > 0)
+ ++prev_op_cnt;
+
+ // Test the node is Concat or BeginningBranch
+ // About (1)isConcat and (2)isBeginningBranch
+ // (1) Current node has multiple inputs as concat?
+ // - Does current node have two or more than previous operation?
+ //
+ // [CONV] [CONV] [CONV] [MAX_POOL]
+ // | | | |
+ // [0] [1] [2] [3]
+ // \ | | /
+ // [ C O N C A T ] # current node
+ //
+ // (2) Current node is on the separated branch at the beginning?
+ // - Does current node's input operand's uses have two or more than?
+ //
+ // [CONV]
+ // |
+ // [0]----.
+ // | |
+ // [CONV] [CONV] # current node
+ // | |
+ // [1] [2]
+ // \ /
+ // [CONCAT]
+ if (prev_op_cnt > 1 || operand.getUses().list().size() > 1)
+ {
+ finish = true;
+ break;
+ }
+ }
+ }
+
+ if (finish)
+ subg = nullptr;
+ });
+
+ _subgraphs->iterate([&](const model::SubgraphIndex &, model::Subgraph &subg) {
+ assert(subg.operations().size() > 0);
+ std::reverse(std::begin(subg.operations()), std::end(subg.operations()));
+ });
+
+ _subgraphs->dump("merged and sorted operations without permutation");
+
+// NOTE This is desired way to handle model input and outputs however getDefaultBackend() is
+// cpu backend dependent for now we cannot use it.
+#if 0
+ // Add def backend to model input/output operand as default backend
+ for (auto index : getInputs())
+ {
+ auto &&lower_info = operands_lower_info.at(index);
+ lower_info->addDefBackend(_backend_resolver->getDefaultBackend());
+ }
+
+ for (auto index : getOutputs())
+ {
+ auto &&lower_info = operands_lower_info.at(index);
+ lower_info->addUseBackend(_backend_resolver->getDefaultBackend());
+ }
+#endif
+
+ // Add DefFactor constants same as UseFactor
+ // NOTE This assumes a constant operand is used by only one operation
+ _model->operations.iterate([&](const model::OperationIndex &, model::Operation &node) {
+ // LowerInfo for input operands
+ for (auto operand : node.getInputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ if (lower_info->def_factors().empty())
+ {
+ // NOTE Handling model inputs here is not ideal. See above NOTE comment.
+ // If it is a model input, not a constant
+ if (_model->inputs.contains(operand))
+ {
+ // If one or more elements then any PermuteFactor is OK so pick first one
+ if (!lower_info->use_factors().empty())
+ {
+ lower_info->addDefPermuteFactor(*lower_info->use_factors().begin());
+ }
+ }
+ // If it is a constant
+ else
+ {
+ lower_info->addDefPermuteFactor(lower_info->use_factors().getOnlyElement());
+ }
+ }
+ }
+ });
+
+ // Set LowerInfo for each operand from the operand::LowerInfo holder
+ _model->operands.iterate([&](const model::OperandIndex &index, model::Operand &object) {
+ setLowerInfo(index, std::move(operands_lower_info[index]));
+
+ // Dump operand LowerInfo
+ // TODO Extract this dumping procedure to be reusable
+ if (!getLowerInfo(index)->def_factors().empty() ||
+ !getLowerInfo(index)->use_factors().empty())
+ {
+ auto factors_to_string = [](const operand::PermuteFactorSet &factors) {
+ std::string str;
+ for (auto factor : factors)
+ {
+ str += factor.backend()->config()->id();
+ str += "(" + model::to_string(factor.layout()) + ")";
+ str += " ";
+ }
+ return "{ " + str + "}";
+ };
+
+ auto operation_index_to_string = [](const model::OperationIndexList &operations) {
+ std::string str;
+ for (auto op : operations.list())
+ {
+ str += std::to_string(op.value());
+ str += " ";
+ }
+ return "{ " + str + "}";
+ };
+
+ const auto lower_info = getLowerInfo(index);
+ const auto &shape = object.shape();
+ const auto &lower_shape = lower_info->shape();
+ std::string def_ops = operation_index_to_string(object.getDef());
+ std::string use_ops = operation_index_to_string(object.getUses());
+ std::string def_layouts = factors_to_string(lower_info->def_factors());
+ std::string use_layouts = factors_to_string(lower_info->use_factors());
+ VERBOSE(Lower) << "* Operand #" << index.value() << " LowerInfo" << std::endl;
+ VERBOSE(Lower) << " - Shape : { " << shape.dim(0) << " "
+ << (shape.rank() > 1 ? shape.dim(1) : 0) << " "
+ << (shape.rank() > 2 ? shape.dim(2) : 0) << " "
+ << (shape.rank() > 3 ? shape.dim(3) : 0) << " "
+ << "}" << std::endl;
+ VERBOSE(Lower) << " - Def Operations : " << def_ops << std::endl;
+ VERBOSE(Lower) << " - Use Operations : " << use_ops << std::endl;
+ VERBOSE(Lower) << " - Lower Info" << std::endl;
+ VERBOSE(Lower) << " - 4D Shape (NHWC) : { " << lower_shape.n() << " " << lower_shape.h()
+ << " " << lower_shape.w() << " " << lower_shape.c() << " "
+ << "}" << std::endl;
+ VERBOSE(Lower) << " - Def Backends : " << def_layouts << std::endl;
+ VERBOSE(Lower) << " - Use Backends : " << use_layouts << std::endl;
+ }
+ });
+ }
+
+ // Run PermutationInsertionPass
+ {
+ pass::PermutationInsertionPass pi_pass(*this);
+ pi_pass.run();
+ // Implemented code no longer works.
+ // pass::PermutationEliminationPass pe_pass(*this);
+ // pe_pass.run();
+
+ // TODO merge perm subgraphs if possible
+ _subgraphs->dump("merged and sorted operations with permutation");
+ }
+
+ // Graph verifications for the LOWERED phase
+ {
+ assert(verifier::DAGChecker().verify(*this));
+ assert(verifier::EdgeConsistencyChecker().verify(*this));
+ }
+}
+
+std::unique_ptr<compiler::Linear> Graph::linearize(void)
+{
+ assert(_phase == Phase::MODEL);
+
+ auto linear = nnfw::cpp14::make_unique<compiler::Linear>(
+ shareModel(), releaseSubgraphs(), releaseLowerInfo(), releaseBackendResolver());
+
+ // TODO Move the operations and operands to linear object
+ return linear;
+}
+
+void Graph::initializeUseDef()
+{
+ operations().iterate(
+ [&](const model::OperationIndex &index, const model::Operation &node) -> void {
+ auto outputs = node.getOutputs();
+ for (auto output : outputs)
+ {
+ operands().at(output).appendDef(index);
+ }
+
+ auto inputs = node.getInputs();
+ for (auto input : inputs)
+ {
+ operands().at(input).appendUse(index);
+ }
+ });
+}
+
+const operation::LowerInfo *Graph::getLowerInfo(const model::SubgraphIndex &subg_index) const
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operation.find(subg_index);
+ if (itr == _lower_info_map->operation.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+void Graph::setLowerInfo(const model::SubgraphIndex &subg_index,
+ std::unique_ptr<operation::LowerInfo> &&lower_info)
+{
+ assert(_lower_info_map);
+ _lower_info_map->operation.insert(std::make_pair(subg_index, std::move(lower_info)));
+}
+
+const operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) const
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operand.find(index);
+ if (itr == _lower_info_map->operand.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index)
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operand.find(index);
+ if (itr == _lower_info_map->operand.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+void Graph::setLowerInfo(const model::OperandIndex &index,
+ std::unique_ptr<operand::LowerInfo> &&lower_info)
+{
+ assert(_lower_info_map);
+ _lower_info_map->operand.insert(std::make_pair(index, std::move(lower_info)));
+}
+
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+
+// Explicit instantiations to have implementation in the source file.
+
+template class Graph::DefaultIterator<true>;
+template class Graph::DefaultIterator<false>;
+
+template class Graph::PostDfsIterator<true>;
+template class Graph::PostDfsIterator<false>;
+
+//
+// Graph::DefaultIterator
+//
+
+template <bool is_const>
+void Graph::DefaultIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
+{
+ graph.operations().iterate(
+ [&](const model::OperationIndex &index, NodeRef node) -> void { fn(index, node); });
+}
+
+//
+// Graph::PostDfsIterator
+//
+
+template <bool is_const>
+void Graph::PostDfsIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
+{
+ assert(!graph.isBuildingPhase()); // Restrict iteration condition
+
+ model::OperationIndexMap<bool> visited;
+ graph.operations().iterate(
+ [&](const model::OperationIndex &index, NodeRef) { visited[index] = false; });
+
+ std::function<void(const model::OperationIndex &, NodeRef)> dfs_recursive =
+ [&](const model::OperationIndex &index, NodeRef node) -> void {
+ if (visited[index])
+ return;
+ visited[index] = true;
+
+ for (auto output : node.getOutputs())
+ {
+ const auto &operand = graph.operands().at(output);
+ for (const auto &use : operand.getUses().list())
+ {
+ dfs_recursive(use, graph.operations().at(use));
+ }
+ }
+
+ fn(index, node);
+ };
+
+ graph.operations().iterate(dfs_recursive);
+
+ // All of the operations(nodes) must have been visited.
+ assert(
+ std::all_of(visited.begin(), visited.end(),
+ [](const std::pair<const model::OperationIndex, bool> &v) { return v.second; }));
+}
+
+void Graph::setBackendResolver(std::unique_ptr<compiler::BackendResolver> &&br)
+{
+ _backend_resolver = std::move(br);
+}
+
+std::unique_ptr<compiler::BackendResolver> Graph::releaseBackendResolver()
+{
+ return std::move(_backend_resolver);
+}
+
+} // namespace graph
+} // namespace neurun