summaryrefslogtreecommitdiff
path: root/runtime/neurun/core/src/ir
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/neurun/core/src/ir')
-rw-r--r--runtime/neurun/core/src/ir/Graph.cc551
-rw-r--r--runtime/neurun/core/src/ir/GraphIterator.cc84
-rw-r--r--runtime/neurun/core/src/ir/GraphIterator.h74
-rw-r--r--runtime/neurun/core/src/ir/LayoutSet.cc66
-rw-r--r--runtime/neurun/core/src/ir/LayoutSet.h58
-rw-r--r--runtime/neurun/core/src/ir/OpCode.cc37
-rw-r--r--runtime/neurun/core/src/ir/OpSequence.cc83
-rw-r--r--runtime/neurun/core/src/ir/Operand.cc70
-rw-r--r--runtime/neurun/core/src/ir/OperandIndexSequence.cc58
-rw-r--r--runtime/neurun/core/src/ir/Operation.cc55
-rw-r--r--runtime/neurun/core/src/ir/OperationIndexList.cc37
-rw-r--r--runtime/neurun/core/src/ir/Shape.cc85
-rw-r--r--runtime/neurun/core/src/ir/Subgraphs.cc87
-rw-r--r--runtime/neurun/core/src/ir/TypeInfo.cc47
-rw-r--r--runtime/neurun/core/src/ir/dumper/Dumper.cc633
-rw-r--r--runtime/neurun/core/src/ir/dumper/Dumper.h102
-rw-r--r--runtime/neurun/core/src/ir/operand/Shape4DConvert.h57
-rw-r--r--runtime/neurun/core/src/ir/operation/Abs.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/Add.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/ArgMax.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/AvgPool2D.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/BatchToSpaceND.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Cast.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/Comparison.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Concat.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Conv2D.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Custom.cc46
-rw-r--r--runtime/neurun/core/src/ir/operation/DepthToSpace.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/DepthwiseConv2D.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Dequantize.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/Div.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/EmbeddingLookup.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Exp.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/Floor.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/FullyConnected.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Gather.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/HashtableLookup.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/InstanceNorm.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/L2Normalization.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/L2Pool2D.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/LSTM.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/LocalResponseNormalization.cc41
-rw-r--r--runtime/neurun/core/src/ir/operation/LogicalAnd.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/LogicalNot.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/LogicalOr.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/Logistic.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/LowerInfo.cc34
-rw-r--r--runtime/neurun/core/src/ir/operation/Max.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/MaxPool2D.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Mean.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Min.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/Mul.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Neg.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/OneHot.cc37
-rw-r--r--runtime/neurun/core/src/ir/operation/PReLU.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/Pack.cc33
-rw-r--r--runtime/neurun/core/src/ir/operation/Pad.cc38
-rw-r--r--runtime/neurun/core/src/ir/operation/Permute.cc44
-rw-r--r--runtime/neurun/core/src/ir/operation/RNN.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/RSQRT.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/ReLU.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/ReLU1.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/ReLU6.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/ReduceMax.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/ReduceMin.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/ReduceSum.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Reshape.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/ResizeBilinear.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/SQRT.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/Slice.cc37
-rw-r--r--runtime/neurun/core/src/ir/operation/Softmax.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/SpaceToBatchND.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/SpaceToDepth.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Split.cc33
-rw-r--r--runtime/neurun/core/src/ir/operation/SquaredDifference.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Squeeze.cc37
-rw-r--r--runtime/neurun/core/src/ir/operation/StridedSlice.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Sub.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Tanh.cc39
-rw-r--r--runtime/neurun/core/src/ir/operation/TopKV2.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Transpose.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/TransposeConv.cc40
-rw-r--r--runtime/neurun/core/src/ir/operation/Unpack.cc33
-rw-r--r--runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc104
-rw-r--r--runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h75
-rw-r--r--runtime/neurun/core/src/ir/pass/OperandPass.cc36
-rw-r--r--runtime/neurun/core/src/ir/pass/OperandPass.h53
-rw-r--r--runtime/neurun/core/src/ir/pass/OperationPass.cc38
-rw-r--r--runtime/neurun/core/src/ir/pass/OperationPass.h76
-rw-r--r--runtime/neurun/core/src/ir/pass/Pass.h55
-rw-r--r--runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc195
-rw-r--r--runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h86
-rw-r--r--runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc209
-rw-r--r--runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h59
-rw-r--r--runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc230
-rw-r--r--runtime/neurun/core/src/ir/pass/PermutationOperationPass.h54
-rw-r--r--runtime/neurun/core/src/ir/verifier/Verifier.cc96
-rw-r--r--runtime/neurun/core/src/ir/verifier/Verifier.h68
98 files changed, 6211 insertions, 0 deletions
diff --git a/runtime/neurun/core/src/ir/Graph.cc b/runtime/neurun/core/src/ir/Graph.cc
new file mode 100644
index 000000000..a84ebb68b
--- /dev/null
+++ b/runtime/neurun/core/src/ir/Graph.cc
@@ -0,0 +1,551 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/Graph.h"
+
+#include <algorithm>
+#include <bitset>
+#include <sstream>
+
+#include "util/logging.h"
+#include "verifier/Verifier.h"
+#include "cpp14/memory.h"
+#include "ir/operation/LowerInfo.h"
+#include "ir/operand/LowerInfo.h"
+#include "ir/operand/PermuteFactor.h"
+#include "ir/GraphIterator.h"
+#include "operand/Shape4DConvert.h"
+#include "compiler/BackendResolver.h"
+#include "backend/IConfig.h"
+#include "pass/ConstantInsertionPass.h"
+#include "pass/PermutationInsertionPass.h"
+#include "pass/PermutationEliminationPass.h"
+#include "pass/PermutationOperationPass.h"
+
+namespace neurun
+{
+namespace ir
+{
+
+Graph::Graph() = default;
+
+Graph::~Graph(void) = default;
+
+OperandIndex Graph::addOperand(const Shape &shape, const TypeInfo &type)
+{
+ return _operands.emplace(shape, type);
+}
+
+OperationIndex Graph::addOperation(std::unique_ptr<Operation> &&node)
+{
+ assert(isBuildingPhase());
+ return _operations.push(std::move(node));
+}
+
+void Graph::setOperandValue(const OperandIndex &ind, std::unique_ptr<Data> &&data)
+{
+ assert(isBuildingPhase());
+ assert(_operands.exist(ind));
+ _operands.at(ind).data(std::move(data));
+}
+
+void Graph::addInput(const OperandIndex &ind)
+{
+ assert(isBuildingPhase());
+ _inputs.append(ind);
+}
+
+void Graph::addOutput(const OperandIndex &ind)
+{
+ assert(isBuildingPhase());
+ _outputs.append(ind);
+}
+
+void Graph::finishBuilding(void)
+{
+ assert(isBuildingPhase());
+ _phase = Phase::MODEL;
+
+ // Initialize operand use-def
+ initializeUseDef();
+
+ // Call graph verifications for the MODEL phase
+ {
+ assert(verifier::DAGChecker().verify(*this));
+ assert(verifier::EdgeConsistencyChecker().verify(*this));
+ }
+}
+
+void Graph::lower(void)
+{
+ assert(_phase == Phase::MODEL);
+
+ _op_seqs = nnfw::cpp14::make_unique<Subgraphs>();
+
+ // Lower
+ {
+ // operand::LowerInfo holder
+ OperandIndexMap<std::unique_ptr<operand::LowerInfo>> operands_lower_info;
+
+ _operands.iterate([&](const OperandIndex &index, const Operand &object) {
+ operands_lower_info[index] =
+ nnfw::cpp14::make_unique<operand::LowerInfo>(operand::asShape4D(object.shape()));
+ });
+
+ _lower_info_map = nnfw::cpp14::make_unique<LowerInfoMap>();
+
+ // Make subgraphs while checking whether a node can be merged into a op_seq.
+ makeSubgraphs(operands_lower_info);
+
+ _op_seqs->iterate([&](const SubgraphIndex &, OpSequence &subg) {
+ assert(subg.operations().size() > 0);
+ std::reverse(std::begin(subg.operations()), std::end(subg.operations()));
+ });
+
+ _op_seqs->dump("merged and sorted operations without permutation");
+
+ pass::ConstantInsertionPass ci_pass(*this);
+ ci_pass.run();
+
+ // Set LowerInfo for each operand from the operand::LowerInfo holder
+ manipulateLowerInfo(operands_lower_info);
+
+ dumpLowerInfo();
+ }
+
+ // Run Permutation Passes
+ {
+ pass::PermutationOperationPass po_pass(*this);
+ po_pass.run();
+
+ pass::PermutationInsertionPass pi_pass(*this);
+ pi_pass.run();
+ // Implemented code no longer works.
+ // pass::PermutationEliminationPass pe_pass(*this);
+ // pe_pass.run();
+
+ // TODO merge perm subgraphs if possible
+ _op_seqs->dump("merged and sorted operations with permutation");
+ }
+
+ // Graph verifications for the LOWERED phase
+ {
+ assert(verifier::DAGChecker().verify(*this));
+ assert(verifier::EdgeConsistencyChecker().verify(*this));
+ }
+}
+
+void Graph::initializeUseDef()
+{
+ operations().iterate([&](const OperationIndex &index, const Operation &node) -> void {
+ auto outputs = node.getOutputs();
+ for (auto output : outputs)
+ {
+ operands().at(output).appendDef(index);
+ }
+
+ auto inputs = node.getInputs();
+ for (auto input : inputs)
+ {
+ operands().at(input).appendUse(index);
+ }
+ });
+}
+
+const operation::LowerInfo *Graph::getLowerInfo(const SubgraphIndex &subg_index) const
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operation.find(subg_index);
+ if (itr == _lower_info_map->operation.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+void Graph::setLowerInfo(const SubgraphIndex &subg_index,
+ std::unique_ptr<operation::LowerInfo> &&lower_info)
+{
+ assert(_lower_info_map);
+ _lower_info_map->operation.insert(std::make_pair(subg_index, std::move(lower_info)));
+}
+
+void Graph::removeLowerInfo(const SubgraphIndex &subg_index)
+{
+ auto &subg_lower_info = _lower_info_map->operation;
+ assert(subg_lower_info.find(subg_index) != subg_lower_info.end());
+ for (auto it = subg_lower_info.begin(); it != subg_lower_info.end(); ++it)
+ {
+ if (it->first == subg_index)
+ {
+ subg_lower_info.erase(it);
+ break;
+ }
+ }
+}
+
+const operand::LowerInfo *Graph::getLowerInfo(const OperandIndex &index) const
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operand.find(index);
+ if (itr == _lower_info_map->operand.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+operand::LowerInfo *Graph::getLowerInfo(const OperandIndex &index)
+{
+ if (!_lower_info_map)
+ return nullptr;
+ auto itr = _lower_info_map->operand.find(index);
+ if (itr == _lower_info_map->operand.end())
+ return nullptr;
+ return itr->second.get();
+}
+
+void Graph::setLowerInfo(const OperandIndex &index,
+ std::unique_ptr<operand::LowerInfo> &&lower_info)
+{
+ assert(_lower_info_map);
+ _lower_info_map->operand.insert(std::make_pair(index, std::move(lower_info)));
+}
+
+void Graph::removeLowerInfo(const OperandIndex &index) { _lower_info_map->operand.erase(index); }
+
+void Graph::makeSubgraphs(OperandIndexMap<std::unique_ptr<operand::LowerInfo>> &operands_lower_info)
+{
+ // if SUBG_MAX_NODE == 0, no limit on nodes of a op_seq
+ const int subg_max_node = util::getConfigInt(util::config::SUBG_MAX_NODE);
+ assert(subg_max_node >= 0);
+
+ bool is_profiling = util::getConfigBool(util::config::PROFILING_MODE);
+ OpSequence *subg = nullptr;
+ SubgraphIndex subg_index;
+
+ // NOTE: The below method appends nodes while making one op_seq if needed. If something better
+ // ways, happy to update this code.
+ PostDfsConstIterator{}.iterate(*this, [&](const OperationIndex &node_index,
+ const Operation &node) {
+ // LowerInfo for in/output operands
+ auto backend = _backend_resolver->getBackend(node_index);
+
+ // TODO How to get frontend layout of this node from IR
+ auto frontend_layout = Layout::NHWC;
+ auto backend_layout = frontend_layout;
+
+ // The layout of each backend should be set at another place
+ // TODO Change setting layout of each backend at another place
+ // TODO Remove getting id of backend
+ if (backend->config()->id() == "acl_cl" || backend->config()->id() == "acl_neon")
+ {
+ const std::string acl_layout_str = util::getConfigString(util::config::ACL_LAYOUT);
+ if (acl_layout_str == "NHWC")
+ {
+ backend_layout = Layout::NHWC;
+ }
+ else if (acl_layout_str == "NCHW")
+ {
+ backend_layout = Layout::NCHW;
+ }
+ }
+ else if (backend->config()->id() == "srcn")
+ {
+ const std::string ncnn_layout_str = util::getConfigString(util::config::NCNN_LAYOUT);
+ if (ncnn_layout_str == "NHWC")
+ {
+ backend_layout = Layout::NHWC;
+ }
+ else if (ncnn_layout_str == "NCHW")
+ {
+ backend_layout = Layout::NCHW;
+ }
+ }
+ else if (backend->config()->id() == "cpu")
+ {
+ backend_layout = Layout::NHWC;
+ }
+
+ for (auto operand : node.getInputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ lower_info->addUsePermuteFactor(operand::PermuteFactor{backend, backend_layout});
+ }
+ for (auto operand : node.getOutputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ lower_info->addDefPermuteFactor(operand::PermuteFactor{backend, backend_layout});
+ }
+
+ bool new_subg =
+ (subg == nullptr ||
+ (subg_max_node != 0 && subg->operations().size() >= static_cast<size_t>(subg_max_node)));
+
+ // for profiling each op_seq must contain just one node,
+ // so that we can measure a node separately
+ if (new_subg || is_profiling || !mergeable(subg_index, node_index, backend_layout))
+ {
+ auto new_subg_index = appendFreshSingleOpSubgraph(node_index, node, frontend_layout);
+
+ // OpSequence LowerInfo
+ setLowerInfo(new_subg_index,
+ nnfw::cpp14::make_unique<operation::LowerInfo>(backend, backend_layout));
+
+ subg_index = new_subg_index;
+ subg = &(_op_seqs->at(new_subg_index));
+
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " is created for "
+ << "NODE#" << node_index.value() << "(" << node.name() << ")" << std::endl;
+ }
+ else
+ {
+ subg->appendOperation(node_index, node);
+ subg->setInputs(node.getInputs());
+
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " merges "
+ << "NODE#" << node_index.value() << "(" << node.name() << ")" << std::endl;
+ }
+ });
+}
+
+void Graph::manipulateLowerInfo(
+ OperandIndexMap<std::unique_ptr<operand::LowerInfo>> &operands_lower_info)
+{
+ const auto default_backend = backend::BackendManager::get().getDefault();
+ for (auto index : _inputs)
+ {
+ // Pick just any one from the uses, here the first one is chosen
+ // For the other uses, Permute operations will be inserted later
+ auto &&lower_info = operands_lower_info.at(index);
+ assert(lower_info->use_factors().size() > 0);
+ lower_info->addDefPermuteFactor(*lower_info->use_factors().begin());
+ }
+ for (auto index : _outputs)
+ {
+ auto &&lower_info = operands_lower_info.at(index);
+ if (_operands.at(index).isConstant())
+ {
+ lower_info->addDefPermuteFactor(operand::PermuteFactor{
+ default_backend,
+ Layout::NHWC // TODO Get frontend layout of this node from IR
+ });
+ }
+ }
+
+ // Set LowerInfo for each operand from the operand::LowerInfo holder
+ _operands.iterate([&](const OperandIndex &index, Operand &) {
+ setLowerInfo(index, std::move(operands_lower_info[index]));
+ });
+}
+
+void Graph::dumpLowerInfo()
+{
+ if (::neurun::util::logging::ctx.enabled() == false)
+ return;
+
+ std::map<uint32_t, std::string> dumps;
+
+ _operands.iterate([&](const OperandIndex &index, Operand &object) {
+ std::stringstream sstream;
+ if (!getLowerInfo(index)->def_factors().empty() || !getLowerInfo(index)->use_factors().empty())
+ {
+ auto factors_to_string = [](const operand::PermuteFactorSet &factors) {
+ std::string str;
+ for (auto factor : factors)
+ {
+ str += factor.backend()->config()->id();
+ str += "(" + to_string(factor.layout()) + ")";
+ str += " ";
+ }
+ return "{ " + str + "}";
+ };
+
+ auto operation_index_to_string = [](const OperationIndexList &operations) {
+ std::string str;
+ for (auto op : operations.list())
+ {
+ str += std::to_string(op.value());
+ str += " ";
+ }
+ return "{ " + str + "}";
+ };
+
+ const auto lower_info = getLowerInfo(index);
+ const auto &shape = object.shape();
+ const auto &lower_shape = lower_info->shape();
+ std::string def_ops = operation_index_to_string(object.getDef());
+ std::string use_ops = operation_index_to_string(object.getUses());
+ std::string def_layouts = factors_to_string(lower_info->def_factors());
+ std::string use_layouts = factors_to_string(lower_info->use_factors());
+ sstream << "Operand #" << index.value() << " LowerInfo" << std::endl;
+ sstream << " - Shape : { " << (shape.rank() > 0 ? shape.dim(0) : 0) << " "
+ << (shape.rank() > 1 ? shape.dim(1) : 0) << " "
+ << (shape.rank() > 2 ? shape.dim(2) : 0) << " "
+ << (shape.rank() > 3 ? shape.dim(3) : 0) << " "
+ << "}" << std::endl;
+ sstream << " - Def Operations : " << def_ops << std::endl;
+ sstream << " - Use Operations : " << use_ops << std::endl;
+ sstream << " - Lower Info" << std::endl;
+ sstream << " - 4D Shape (NHWC) : { " << lower_shape.n() << " " << lower_shape.h() << " "
+ << lower_shape.w() << " " << lower_shape.c() << " "
+ << "}" << std::endl;
+ sstream << " - Def Backends : " << def_layouts << std::endl;
+ sstream << " - Use Backends : " << use_layouts << std::endl;
+ }
+ dumps.emplace(index.value(), sstream.str());
+ });
+
+ for (const auto &e : dumps)
+ {
+ if (!e.second.empty())
+ {
+ VERBOSE(Lower) << e.second;
+ }
+ }
+}
+
+bool Graph::mergeable(const SubgraphIndex &subg_index, const OperationIndex &node_index,
+ Layout layout)
+{
+ // Are they mergeable?
+ // 1. the same backend id and layout?
+ // 2. Is op_seq or node branched?
+ // 3. if 1 is true, the subg and a node are connected?
+ const auto &subg = _op_seqs->at(subg_index);
+ const auto &node = _operations.at(node_index);
+
+ // The same backend id and layout?
+ {
+ const auto subg_backend_layout = getLowerInfo(subg_index)->layout();
+ const auto &subg_backend_id = getLowerInfo(subg_index)->backend()->config()->id();
+ const auto &node_backend_id = _backend_resolver->getBackend(node_index)->config()->id();
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " { " << subg_backend_id << "("
+ << to_string(subg_backend_layout) << ") } "
+ << " NODE#" << node_index.value() << " (" << node.name() << ") { "
+ << node_backend_id << "(" << to_string(layout) << ") } " << std::endl;
+ if (subg_backend_id != node_backend_id || subg_backend_layout != layout)
+ return false;
+ }
+
+ // Branched?
+ {
+ std::unordered_set<OperationIndex> branched_set;
+
+ // Check for branching up
+ const auto &inputs = subg.getInputs();
+ for (const auto &input : inputs)
+ {
+ const auto &input_obj = _operands.at(input);
+ for (const auto &def : input_obj.getDef().list())
+ {
+ branched_set.insert(def);
+ if (branched_set.size() > 1)
+ {
+ return false;
+ }
+ }
+ }
+ branched_set.clear();
+
+ // Check for branching down
+ const auto &outputs = node.getOutputs();
+ for (const auto &output : outputs)
+ {
+ const auto &output_obj = _operands.at(output);
+ for (const auto &use : output_obj.getUses().list())
+ {
+ branched_set.insert(use);
+ if (branched_set.size() > 1)
+ {
+ return false;
+ }
+ }
+ }
+ }
+
+ // Connected?
+ // an input of one node is an output of the other node? or vice-versa?
+ {
+ const auto &node_inputs = node.getInputs();
+ const auto &node_outputs = node.getOutputs();
+
+ // subg's operations are in order so that we just check the first and the last
+ std::vector<Element> subg_ops{subg.operations()[0]};
+ if (subg.operations().size() > 1)
+ subg_ops.emplace_back(subg.operations()[subg.operations().size() - 1]);
+
+ for (const auto &elem : subg_ops)
+ {
+ const auto &n_index = elem.index;
+ const auto &n = *elem.node;
+
+ // node's output == subg's input?
+ const auto &n_inputs = n.getInputs();
+ for (auto input : n_inputs)
+ {
+ if (node_outputs.contains(input))
+ {
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " 's NODE#" << n_index.value() << "("
+ << n.name() << ") is connected to NODE#" << node_index.value() << "("
+ << node.name() << ")" << std::endl;
+ return true;
+ }
+ }
+
+ // node's input == subg's output?
+ const auto &n_outputs = n.getOutputs();
+ for (auto output : n_outputs)
+ {
+ if (node_inputs.contains(output))
+ {
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " 's NODE#" << n_index.value() << " ("
+ << n.name() << ") is connected to NODE#" << node_index.value()
+ << std::endl;
+ return true;
+ }
+ }
+ }
+
+ VERBOSE(Lower) << "SUBG#" << subg_index.value() << " is not connected to NODE#"
+ << node_index.value() << "(" << node.name() << ")" << std::endl;
+ }
+
+ return false;
+}
+
+SubgraphIndex Graph::appendFreshSingleOpSubgraph(const OperationIndex &node_index,
+ const Operation &node, Layout layout)
+{
+ // Create a fresh op_seq with one operation, and append it to subgraphs
+ // Create a fresh op_seq
+ auto subg = nnfw::cpp14::make_unique<OpSequence>(layout);
+
+ // Add an operation
+ subg->appendOperation(node_index, node);
+
+ // Update input/output
+ subg->setOutputs(node.getOutputs());
+ subg->setInputs(node.getInputs());
+
+ return _op_seqs->emplace(std::move(subg));
+}
+
+void Graph::setBackendResolver(std::unique_ptr<compiler::BackendResolver> &&br)
+{
+ _backend_resolver = std::move(br);
+}
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/GraphIterator.cc b/runtime/neurun/core/src/ir/GraphIterator.cc
new file mode 100644
index 000000000..ce20787ae
--- /dev/null
+++ b/runtime/neurun/core/src/ir/GraphIterator.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GraphIterator.h"
+
+#include "ir/OperationIndexMap.h"
+#include "ir/Graph.h"
+
+namespace neurun
+{
+namespace ir
+{
+
+// Explicit instantiations to have implementation in the source file.
+
+template class DefaultIterator<true>;
+template class DefaultIterator<false>;
+
+template class PostDfsIterator<true>;
+template class PostDfsIterator<false>;
+
+//
+// Graph::DefaultIterator
+//
+
+template <bool is_const>
+void DefaultIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
+{
+ graph.operations().iterate(
+ [&](const OperationIndex &index, NodeRef node) -> void { fn(index, node); });
+}
+
+//
+// Graph::PostDfsIterator
+//
+
+template <bool is_const>
+void PostDfsIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
+{
+ assert(!graph.isBuildingPhase()); // Restrict iteration condition
+
+ OperationIndexMap<bool> visited;
+ graph.operations().iterate([&](const OperationIndex &index, NodeRef) { visited[index] = false; });
+
+ std::function<void(const OperationIndex &, NodeRef)> dfs_recursive =
+ [&](const OperationIndex &index, NodeRef node) -> void {
+ if (visited[index])
+ return;
+ visited[index] = true;
+
+ for (auto output : node.getOutputs())
+ {
+ const auto &operand = graph.operands().at(output);
+ for (const auto &use : operand.getUses().list())
+ {
+ dfs_recursive(use, graph.operations().at(use));
+ }
+ }
+
+ fn(index, node);
+ };
+
+ graph.operations().iterate(dfs_recursive);
+
+ // All of the operations(nodes) must have been visited.
+ assert(std::all_of(visited.begin(), visited.end(),
+ [](const std::pair<const OperationIndex, bool> &v) { return v.second; }));
+}
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/GraphIterator.h b/runtime/neurun/core/src/ir/GraphIterator.h
new file mode 100644
index 000000000..a5bf1c323
--- /dev/null
+++ b/runtime/neurun/core/src/ir/GraphIterator.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_IR_GRAPH_ITERATOR_H__
+#define __NEURUN_IR_GRAPH_ITERATOR_H__
+
+#include <type_traits>
+
+#include "ir/Index.h"
+
+namespace neurun
+{
+namespace ir
+{
+
+class Graph;
+class Operation;
+
+template <bool is_const> class Iterator
+{
+public:
+ using GraphRef = typename std::conditional<is_const, const Graph &, Graph &>::type;
+ using IndexRef = const OperationIndex &;
+ using NodeRef = typename std::conditional<is_const, const Operation &, Operation &>::type;
+ using IterFn = std::function<void(IndexRef, NodeRef)>;
+
+public:
+ virtual ~Iterator() = default;
+ virtual void iterate(GraphRef graph, const IterFn &fn) const = 0;
+};
+
+template <bool is_const = false> class DefaultIterator final : public Iterator<is_const>
+{
+public:
+ using GraphRef = typename Iterator<is_const>::GraphRef;
+ using IndexRef = typename Iterator<is_const>::IndexRef;
+ using NodeRef = typename Iterator<is_const>::NodeRef;
+ using IterFn = typename Iterator<is_const>::IterFn;
+
+public:
+ void iterate(GraphRef graph, const IterFn &fn) const;
+};
+using DefaultConstIterator = DefaultIterator<true>;
+
+template <bool is_const = false> class PostDfsIterator final : public Iterator<is_const>
+{
+public:
+ using GraphRef = typename Iterator<is_const>::GraphRef;
+ using IndexRef = typename Iterator<is_const>::IndexRef;
+ using NodeRef = typename Iterator<is_const>::NodeRef;
+ using IterFn = typename Iterator<is_const>::IterFn;
+
+public:
+ void iterate(GraphRef graph, const IterFn &fn) const;
+};
+using PostDfsConstIterator = PostDfsIterator<true>;
+
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_IR_GRAPH_ITERATOR_H__
diff --git a/runtime/neurun/core/src/ir/LayoutSet.cc b/runtime/neurun/core/src/ir/LayoutSet.cc
new file mode 100644
index 000000000..025ba45dc
--- /dev/null
+++ b/runtime/neurun/core/src/ir/LayoutSet.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LayoutSet.h"
+
+namespace neurun
+{
+namespace ir
+{
+
+LayoutSet::LayoutSet(std::initializer_list<Layout> layouts)
+{
+ for (auto layout : layouts)
+ {
+ _set.insert(layout);
+ }
+}
+
+LayoutSet LayoutSet::operator|(const LayoutSet &other) const
+{
+ auto ret = *this;
+ for (auto layout : other)
+ {
+ ret.add(layout);
+ }
+ return ret;
+}
+
+LayoutSet LayoutSet::operator&(const LayoutSet &other) const
+{
+ LayoutSet ret;
+ for (auto layout : other)
+ {
+ if (contains(layout))
+ {
+ ret.add(layout);
+ }
+ }
+ return ret;
+}
+
+LayoutSet LayoutSet::operator-(const LayoutSet &other) const
+{
+ auto ret = *this;
+ for (auto layout : other)
+ {
+ ret.remove(layout);
+ }
+ return ret;
+}
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/LayoutSet.h b/runtime/neurun/core/src/ir/LayoutSet.h
new file mode 100644
index 000000000..e38ef3ce2
--- /dev/null
+++ b/runtime/neurun/core/src/ir/LayoutSet.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_IR_LAYOUT_SET_H__
+#define __NEURUN_IR_LAYOUT_SET_H__
+
+#include <initializer_list>
+#include <unordered_set>
+
+#include "ir/Layout.h"
+
+namespace neurun
+{
+namespace ir
+{
+
+class LayoutSet
+{
+public:
+ LayoutSet() = default;
+ LayoutSet(std::initializer_list<Layout> layouts);
+
+public:
+ void add(const Layout &layout) { _set.insert(layout); }
+ void remove(const Layout &layout) { _set.erase(layout); }
+ uint32_t size() const { return static_cast<uint32_t>(_set.size()); }
+ bool contains(const Layout &layout) const { return _set.find(layout) != _set.end(); }
+
+public:
+ LayoutSet operator|(const LayoutSet &other) const; // Union
+ LayoutSet operator&(const LayoutSet &other) const; // Intersect
+ LayoutSet operator-(const LayoutSet &other) const; // Minus
+
+public:
+ std::unordered_set<Layout>::const_iterator begin() const { return _set.begin(); }
+ std::unordered_set<Layout>::const_iterator end() const { return _set.end(); }
+
+private:
+ std::unordered_set<Layout> _set;
+};
+
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_IR_LAYOUT_SET_H__
diff --git a/runtime/neurun/core/src/ir/OpCode.cc b/runtime/neurun/core/src/ir/OpCode.cc
new file mode 100644
index 000000000..e6552a275
--- /dev/null
+++ b/runtime/neurun/core/src/ir/OpCode.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/OpCode.h"
+
+#include <unordered_map>
+
+namespace neurun
+{
+namespace ir
+{
+
+const char *toString(OpCode opcode)
+{
+ static const std::unordered_map<OpCode, const char *> map{{OpCode::Invalid, "Invalid"},
+#define OP(Name) {OpCode::Name, #Name},
+#include "ir/Operations.lst"
+#undef OP
+ {OpCode::COUNT, "COUNT"}};
+ return map.at(opcode);
+}
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/OpSequence.cc b/runtime/neurun/core/src/ir/OpSequence.cc
new file mode 100644
index 000000000..13a6cbe27
--- /dev/null
+++ b/runtime/neurun/core/src/ir/OpSequence.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/OpSequence.h"
+#include "ir/OperationVisitor.h"
+#include <sstream>
+
+namespace neurun
+{
+namespace ir
+{
+
+OpSequence::OpSequence(Layout layout) : _layout{layout}
+{
+ // DO NOTHING
+}
+
+void OpSequence::accept(OperationVisitor &v) const { v.visit(*this); }
+
+// TODO: Impl Dumper instead of this method
+std::string OpSequence::getStr() const
+{
+ // " OpSequence IN(xx,xx,xx) -> { op0, op1, op2 } -> OUT(yy,yy,yy)"
+ std::stringstream ss;
+ ss << " OpSequence IN(";
+ for (const auto &index : getInputs())
+ {
+ ss << " " << index.value();
+ }
+ ss << " ) -> {";
+ for (const auto &elem : _operations)
+ {
+ ss << " " << elem.index.value() << "(" << elem.node->name() << ")";
+ }
+ ss << " } -> OUT(";
+ for (const auto &index : getOutputs())
+ {
+ ss << " " << index.value();
+ }
+ ss << " )";
+ return ss.str();
+}
+
+void OpSequence::remove(const OperationIndex &index)
+{
+ assert(exist(index));
+ for (auto it = _operations.cbegin(); it != _operations.cend(); ++it)
+ {
+ if (it->index == index)
+ {
+ _operations.erase(it);
+ break;
+ }
+ }
+}
+
+bool OpSequence::exist(const OperationIndex &index) const
+{
+ for (const auto &element : _operations)
+ {
+ if (element.index == index)
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/Operand.cc b/runtime/neurun/core/src/ir/Operand.cc
new file mode 100644
index 000000000..335dd17b9
--- /dev/null
+++ b/runtime/neurun/core/src/ir/Operand.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/Operand.h"
+
+namespace neurun
+{
+namespace ir
+{
+
+size_t Operand::operandSize(void) const
+{
+ const uint32_t ranks = shape().rank();
+ int32_t elements = 1;
+
+ for (uint32_t rank = 0; rank < ranks; rank++)
+ {
+ elements *= shape().dim(rank);
+ }
+
+ DataType type = typeInfo().type();
+ size_t element_size = sizeOfDataType(type);
+
+ // Value of type is matched with OperandCode enum in NeuralNetworks.h
+ return element_size * elements;
+}
+
+void Operand::appendUse(const OperationIndex &idx) { _uses.append(idx); }
+
+void Operand::removeUse(const OperationIndex &idx) { _uses.remove(idx); }
+
+void Operand::appendDef(const OperationIndex &idx)
+{
+ assert(!isConstant());
+ assert(_def.size() == 0);
+
+ _def.append(idx);
+}
+
+void Operand::removeDef(const OperationIndex &idx)
+{
+ assert(_def.contains(idx));
+
+ _def.remove(idx);
+}
+
+void Operand::parent_info(std::unique_ptr<operand::ParentInfo> &&parent_info)
+{
+ _parent_info = std::move(parent_info);
+}
+
+const operand::ParentInfo *Operand::parent_info() const { return _parent_info.get(); }
+
+operand::ParentInfo *Operand::parent_info() { return _parent_info.get(); }
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/OperandIndexSequence.cc b/runtime/neurun/core/src/ir/OperandIndexSequence.cc
new file mode 100644
index 000000000..302444125
--- /dev/null
+++ b/runtime/neurun/core/src/ir/OperandIndexSequence.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/OperandIndexSequence.h"
+
+#include <algorithm>
+
+namespace neurun
+{
+namespace ir
+{
+
+OperandIndexSequence::OperandIndexSequence(std::initializer_list<OperandIndex> list) : _set(list)
+{
+ // DO NOTHING
+}
+
+OperandIndexSequence::OperandIndexSequence(std::initializer_list<int32_t> list)
+{
+ for (auto val : list)
+ {
+ _set.emplace_back(static_cast<uint32_t>(val));
+ }
+}
+
+OperandIndexSequence::OperandIndexSequence(std::initializer_list<uint32_t> list)
+{
+ for (auto val : list)
+ {
+ _set.emplace_back(val);
+ }
+}
+
+bool OperandIndexSequence::contains(const OperandIndex &index) const
+{
+ return std::find(_set.begin(), _set.end(), index) != _set.end();
+}
+
+void OperandIndexSequence::replace(const OperandIndex &from, const OperandIndex &to)
+{
+ std::replace(_set.begin(), _set.end(), from, to);
+}
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/Operation.cc b/runtime/neurun/core/src/ir/Operation.cc
new file mode 100644
index 000000000..3e4b606f2
--- /dev/null
+++ b/runtime/neurun/core/src/ir/Operation.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/Operation.h"
+
+#include <cassert>
+
+namespace neurun
+{
+namespace ir
+{
+
+Operation::Operation(OperandConstraint input_constr, const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : _input_constr{input_constr}, _inputs{inputs}, _outputs{outputs}
+{
+}
+
+Operation::Operation(OperandConstraint input_constr) : _input_constr{input_constr} {}
+
+Operation::~Operation() = default;
+
+void Operation::setInputs(const OperandIndexSequence &indexes)
+{
+ assert(_input_constr.check(indexes.size()));
+ _inputs = indexes;
+}
+
+void Operation::setOutputs(const OperandIndexSequence &indexes) { _outputs = indexes; }
+
+void Operation::replaceInput(const OperandIndex &from, const OperandIndex &to)
+{
+ _inputs.replace(from, to);
+}
+
+void Operation::replaceOutput(const OperandIndex &from, const OperandIndex &to)
+{
+ _outputs.replace(from, to);
+}
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/OperationIndexList.cc b/runtime/neurun/core/src/ir/OperationIndexList.cc
new file mode 100644
index 000000000..261cc5ce6
--- /dev/null
+++ b/runtime/neurun/core/src/ir/OperationIndexList.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/OperationIndexList.h"
+
+#include <algorithm>
+
+namespace neurun
+{
+namespace ir
+{
+
+OperationIndexList::OperationIndexList(std::initializer_list<OperationIndex> list) : _list(list)
+{
+ // DO NOTHING
+}
+
+bool OperationIndexList::contains(const OperationIndex &index) const
+{
+ return std::find(_list.begin(), _list.end(), index) != _list.end();
+}
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/Shape.cc b/runtime/neurun/core/src/ir/Shape.cc
new file mode 100644
index 000000000..2679f83c6
--- /dev/null
+++ b/runtime/neurun/core/src/ir/Shape.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/Shape.h"
+#include "util/Utils.h"
+
+#include <cassert>
+#include <functional>
+#include <numeric>
+
+namespace neurun
+{
+namespace ir
+{
+
+FeatureShape Shape::asFeature(Layout layout) const
+{
+ assert(rank() == 4);
+
+ if (layout == Layout::NHWC)
+ {
+ // Feature Map in NHWC layout
+ // - Dimension(0) -> Batch
+ // - Dimension(1) -> Height
+ // - Dimension(2) -> Width
+ // - Dimension(3) -> Depth
+ const auto batch = dim(0);
+ const auto depth = dim(3);
+ const auto height = dim(1);
+ const auto width = dim(2);
+
+ return {batch, depth, height, width};
+ }
+ else if (layout == Layout::NCHW)
+ {
+ // Feature Map in NHWC layout
+ // - Dimension(0) -> Batch
+ // - Dimension(1) -> Depth
+ // - Dimension(2) -> Height
+ // - Dimension(3) -> Width
+ const auto batch = dim(0);
+ const auto depth = dim(1);
+ const auto height = dim(2);
+ const auto width = dim(3);
+
+ return {batch, depth, height, width};
+ }
+ else
+ {
+ throw std::runtime_error("Wrong Layout");
+ }
+}
+
+// Extended dimension is filled with 1.
+void Shape::extendRank(int to_rank)
+{
+ assert(to_rank - rank() >= 0);
+ _dimensions.insert(_dimensions.cbegin(), to_rank - rank(), 1);
+}
+
+uint64_t Shape::num_elements() const
+{
+ // All of the nodes must have non-negative dimension
+ assert(std::all_of(_dimensions.begin(), _dimensions.end(),
+ [](const int32_t &v) { return (v >= 0); }));
+
+ return std::accumulate(_dimensions.cbegin(), _dimensions.cend(), UINT64_C(1),
+ std::multiplies<uint64_t>());
+}
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/Subgraphs.cc b/runtime/neurun/core/src/ir/Subgraphs.cc
new file mode 100644
index 000000000..780fc8c28
--- /dev/null
+++ b/runtime/neurun/core/src/ir/Subgraphs.cc
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/Subgraphs.h"
+#include "util/logging.h"
+#include "cpp14/memory.h"
+
+#include <cassert>
+#include <string>
+
+namespace neurun
+{
+namespace ir
+{
+
+SubgraphIndex Subgraphs::emplace(const OperationIndex &index, const Operation &node, Layout layout)
+{
+ std::unique_ptr<OpSequence> subg = nnfw::cpp14::make_unique<OpSequence>(layout);
+ subg->appendOperation(index, node);
+ return push(std::move(subg));
+}
+
+SubgraphIndex Subgraphs::emplace(std::unique_ptr<OpSequence> &&subg)
+{
+ return push(std::move(subg));
+}
+
+bool Subgraphs::containsOperation(const OperationIndex &operation_index) const
+{
+ return findOperation(operation_index).valid();
+}
+
+SubgraphIndex Subgraphs::getOperation(const OperationIndex &operation_index) const
+{
+ SubgraphIndex ret = findOperation(operation_index);
+ assert(ret.valid());
+ return ret;
+}
+
+// TODO: Extract this into external helper function
+void Subgraphs::dump(const std::string &msg) const
+{
+ VERBOSE(Subgraphs) << "Subgraphs(" << msg << ")" << std::endl;
+ iterate([&](const SubgraphIndex &idx, const OpSequence &subg) {
+ VERBOSE(Subgraphs) << idx.value() << "] " << subg.getStr() << std::endl;
+ });
+}
+
+void Subgraphs::removeFromSubgraph(const OperationIndex &operation_index)
+{
+ const auto subg_index = findOperation(operation_index);
+ auto &subg = at(subg_index);
+ subg.remove(operation_index);
+ if (subg.size() == 0)
+ {
+ remove(subg_index);
+ }
+}
+
+SubgraphIndex Subgraphs::findOperation(const OperationIndex &operation_index) const
+{
+ SubgraphIndex ret;
+ iterate([&](const SubgraphIndex &index, const OpSequence &object) {
+ for (const auto &elem : object.operations())
+ {
+ if (elem.index == operation_index)
+ ret = index;
+ }
+ });
+ return ret;
+}
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/TypeInfo.cc b/runtime/neurun/core/src/ir/TypeInfo.cc
new file mode 100644
index 000000000..280146b51
--- /dev/null
+++ b/runtime/neurun/core/src/ir/TypeInfo.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/TypeInfo.h"
+
+namespace neurun
+{
+namespace ir
+{
+
+bool operator==(const TypeInfo &lhs, const TypeInfo &rhs)
+{
+ if (lhs.type() != rhs.type())
+ {
+ return false;
+ }
+
+ if (lhs.offset() != rhs.offset())
+ {
+ return false;
+ }
+
+ if (lhs.scale() != rhs.scale())
+ {
+ return false;
+ }
+
+ return true;
+}
+
+bool operator!=(const TypeInfo &lhs, const TypeInfo &rhs) { return !(lhs == rhs); }
+
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/dumper/Dumper.cc b/runtime/neurun/core/src/ir/dumper/Dumper.cc
new file mode 100644
index 000000000..ddfd1a47a
--- /dev/null
+++ b/runtime/neurun/core/src/ir/dumper/Dumper.cc
@@ -0,0 +1,633 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dumper.h"
+
+#include <string>
+
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace dumper
+{
+
+using namespace operation;
+
+void Dumper::visit(const Abs &node)
+{
+ VERBOSE(LIR) << "* Abs" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Abs::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Add &node)
+{
+ VERBOSE(LIR) << "* Add" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Add::Input::LHS).value() << ", "
+ << node.getInputs().at(Add::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ArgMax &node)
+{
+ VERBOSE(LIR) << "* ArgMax" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ArgMax::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const AvgPool2D &node)
+{
+ VERBOSE(LIR) << "* AvgPool2D(Implicit)" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(AvgPool2D::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Cast &node)
+{
+ VERBOSE(LIR) << "* Cast" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Cast::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Comparison &node)
+{
+ VERBOSE(LIR) << "* Comparison" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Comparison::Input::INPUT0).value()
+ << ", " << node.getInputs().at(Comparison::Input::INPUT1).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Concat &node)
+{
+ VERBOSE(LIR) << "* Concat" << std::endl;
+ std::string inputs;
+ for (auto i : node.getInputs())
+ {
+ inputs += std::to_string(i.value()) + ",";
+ }
+ VERBOSE(LIR) << " - Inputs : IFM(" << inputs << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Conv2D &node)
+{
+ std::string padding_type =
+ node.param().padding.type == PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ VERBOSE(LIR) << "* Conv2D(" << padding_type << ")" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(Conv2D::Input::INPUT).value()
+ << ") Kernel(" << node.getInputs().at(Conv2D::Input::KERNEL).value() << ") Bias("
+ << node.getInputs().at(Conv2D::Input::BIAS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const DepthToSpace &node)
+{
+ VERBOSE(LIR) << "* DepthToSpace" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(DepthToSpace::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const DepthwiseConv2D &node)
+{
+ std::string padding_type =
+ node.param().padding.type == PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ VERBOSE(LIR) << "* DepthwiseConv2D(" << padding_type << ")" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(DepthwiseConv2D::Input::INPUT).value()
+ << ") Kernel(" << node.getInputs().at(DepthwiseConv2D::Input::KERNEL).value()
+ << ") Bias(" << node.getInputs().at(DepthwiseConv2D::Input::BIAS).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Dequantize &node)
+{
+ VERBOSE(LIR) << "* Dequantize" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Dequantize::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Div &node)
+{
+ VERBOSE(LIR) << "* Div" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Div::Input::LHS).value() << ", "
+ << node.getInputs().at(Div::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const EmbeddingLookup &node)
+{
+ VERBOSE(LIR) << "* EmbeddingLookup" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Lookups("
+ << node.getInputs().at(EmbeddingLookup::Input::LOOKUPS).value() << ") VALUES("
+ << node.getInputs().at(EmbeddingLookup::Input::VALUES).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Exp &node)
+{
+ VERBOSE(LIR) << "* Exp" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Exp::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Floor &node)
+{
+ VERBOSE(LIR) << "* Floor" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Floor::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const FullyConnected &node)
+{
+ VERBOSE(LIR) << "* FullyConnected" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(FullyConnected::Input::INPUT).value()
+ << ") Weight(" << node.getInputs().at(FullyConnected::Input::WEIGHT).value()
+ << ") Bias(" << node.getInputs().at(FullyConnected::Input::BIAS).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Gather &node)
+{
+ VERBOSE(LIR) << "* Gather" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Gather::Input::INPUT).value()
+ << ") Indices(" << node.getInputs().at(Gather::Input::INDICES).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const HashtableLookup &node)
+{
+ VERBOSE(LIR) << "* HashTableLookup" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Lookups("
+ << node.getInputs().at(HashtableLookup::Input::LOOKUPS).value() << ") Keys("
+ << node.getInputs().at(HashtableLookup::Input::KEYS).value() << ") Values("
+ << node.getInputs().at(HashtableLookup::Input::VALUES).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Outputs : Output("
+ << node.getInputs().at(HashtableLookup::Output::OUTPUT).value() << ") Hits("
+ << node.getInputs().at(HashtableLookup::Output::HITS).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const InstanceNorm &node)
+{
+ VERBOSE(LIR) << "* InstanceNorm" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(InstanceNorm::Input::INPUT).value()
+ << ") Gamma(" << node.getInputs().at(InstanceNorm::Input::GAMMA).value() << ") Beta("
+ << node.getInputs().at(InstanceNorm::Input::BETA).value() << ") Epsilon("
+ << node.param().epsilon << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const L2Normalization &node)
+{
+ VERBOSE(LIR) << "* L2Normalization" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(L2Normalization::Input::INPUT).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const L2Pool2D &node)
+{
+ VERBOSE(LIR) << "* L2Pool2D" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(L2Pool2D::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LocalResponseNormalization &node)
+{
+ VERBOSE(LIR) << "* LocalResponseNormalization" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(LocalResponseNormalization::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LSTM &node)
+{
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(LSTM::Input::INPUT).value()
+ << ") Input To Input Weights("
+ << node.getInputs().at(LSTM::Input::INPUT_TO_INPUT_WEIGHTS).value()
+ << ") Input To Forget Weights("
+ << node.getInputs().at(LSTM::Input::INPUT_TO_FORGET_WEIGHTS).value()
+ << ") Input To Cell Weights("
+ << node.getInputs().at(LSTM::Input::INPUT_TO_CELL_WEIGHTS).value()
+ << ") Input To Output Weights("
+ << node.getInputs().at(LSTM::Input::INPUT_TO_OUTPUT_WEIGHTS).value()
+ << ") Recurrent To Input Weights("
+ << node.getInputs().at(LSTM::Input::RECURRENT_TO_INPUT_WEIGHTS).value()
+ << ") Recurrent To Forget Weights("
+ << node.getInputs().at(LSTM::Input::RECURRENT_TO_FORGET_WEIGHTS).value()
+ << ") Recurrent To Cell Weights("
+ << node.getInputs().at(LSTM::Input::RECURRENT_TO_CELL_WEIGHTS).value()
+ << ") Recurrent To Output Weights("
+ << node.getInputs().at(LSTM::Input::RECURRENT_TO_OUTPUT_WEIGHTS).value()
+ << ") Cell To Input Weights("
+ << node.getInputs().at(LSTM::Input::CELL_TO_INPUT_WEIGHTS).value()
+ << ") Cell To Forget Weights("
+ << node.getInputs().at(LSTM::Input::CELL_TO_FORGET_WEIGHTS).value()
+ << ") Cell To OUTPUT Weights("
+ << node.getInputs().at(LSTM::Input::CELL_TO_OUTPUT_WEIGHTS).value()
+ << ") Input Gate Bias(" << node.getInputs().at(LSTM::Input::INPUT_GATE_BIAS).value()
+ << ") Forget Gate Bias("
+ << node.getInputs().at(LSTM::Input::FORGET_GATE_BIAS).value() << ") Cell Bias("
+ << node.getInputs().at(LSTM::Input::CELL_BIAS).value() << ") Output Gate Bias("
+ << node.getInputs().at(LSTM::Input::OUTPUT_GATE_BIAS).value()
+ << ") Projection Weights("
+ << node.getInputs().at(LSTM::Input::PROJECTION_WEIGHTS).value()
+ << ") Projection Bias(" << node.getInputs().at(LSTM::Input::PROJECTION_BIAS).value()
+ << ") Output State In(" << node.getInputs().at(LSTM::Input::OUTPUT_STATE_IN).value()
+ << ") Cell State In(" << node.getInputs().at(LSTM::Input::CELL_STATE_IN).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Scratch Buffer("
+ << node.getOutputs().at(LSTM::Output::SCRATCH_BUFFER).value()
+ << ") Output State Out("
+ << node.getInputs().at(LSTM::Output::OUTPUT_STATE_OUT).value() << ") Cell State Out("
+ << node.getInputs().at(LSTM::Output::CELL_STATE_OUT).value() << ") Output("
+ << node.getInputs().at(LSTM::Output::OUTPUT).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LogicalAnd &node)
+{
+ VERBOSE(LIR) << "* LogicalAnd" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(LogicalAnd::Input::INPUT0).value()
+ << ", " << node.getInputs().at(LogicalAnd::Input::INPUT1).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LogicalNot &node)
+{
+ VERBOSE(LIR) << "* LogicalNot" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(LogicalNot::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const LogicalOr &node)
+{
+ VERBOSE(LIR) << "* LogicalOr" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(LogicalOr::Input::INPUT0).value()
+ << ", " << node.getInputs().at(LogicalOr::Input::INPUT1).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Logistic &node)
+{
+ VERBOSE(LIR) << "* Logistic" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Logistic::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const MaxPool2D &node)
+{
+ std::string padding_type =
+ node.param().padding.type == PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ VERBOSE(LIR) << "* MaxPool2D(" << padding_type << ")" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(MaxPool2D::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Mean &node)
+{
+ VERBOSE(LIR) << "* Mean" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Mean::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Mul &node)
+{
+ VERBOSE(LIR) << "* Mul" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Mul::Input::LHS).value() << ", "
+ << node.getInputs().at(Mul::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Neg &node)
+{
+ VERBOSE(LIR) << "* Neg" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Neg::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Pack &node)
+{
+ VERBOSE(LIR) << "* Pack" << std::endl;
+ std::string inputs;
+ const auto &input_indices = node.getInputs();
+ for (auto it = std::begin(input_indices); it != std::end(input_indices); ++it)
+ {
+ inputs += std::to_string(it->value());
+ if (std::next(it) != std::end(input_indices))
+ inputs += ", ";
+ }
+ VERBOSE(LIR) << " - Inputs : Inputs(" << inputs << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Permute &node)
+{
+ std::string permute_type = "Unknown";
+ switch (node.getPermuteType())
+ {
+ case Permute::Type::COPY:
+ permute_type = "Copy";
+ break;
+ case Permute::Type::NHWC_TO_NCHW:
+ permute_type = "NHWC to NCHW";
+ break;
+ case Permute::Type::NCHW_TO_NHWC:
+ permute_type = "NCHW to NHWC";
+ break;
+ }
+
+ VERBOSE(LIR) << "* Permute(" + permute_type + ")" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(0).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const PReLU &node)
+{
+ VERBOSE(LIR) << "* PReLU" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(PReLU::Input::INPUT).value()
+ << ") Alpha(" << node.getInputs().at(PReLU::Input::ALPHA).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReduceMax &node)
+{
+ VERBOSE(LIR) << "* ReduceMax" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReduceMax::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReduceMin &node)
+{
+ VERBOSE(LIR) << "* ReduceMin" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReduceMin::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReduceSum &node)
+{
+ VERBOSE(LIR) << "* ReduceSum" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReduceSum::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReLU &node)
+{
+ VERBOSE(LIR) << "* ReLU" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReLU::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReLU1 &node)
+{
+ VERBOSE(LIR) << "* ReLU1" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReLU1::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ReLU6 &node)
+{
+ VERBOSE(LIR) << "* ReLU6" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReLU6::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Reshape &node)
+{
+ VERBOSE(LIR) << "* Reshape" << std::endl;
+ // TODO The shape index should be "node.getInputs().at(1).value()" but not valid for now
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Reshape::Input::INPUT).value()
+ << ") Shape("
+ << "?"
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const ResizeBilinear &node)
+{
+ VERBOSE(LIR) << "* ResizeBilinear" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ResizeBilinear::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const RNN &node)
+{
+ VERBOSE(LIR) << "* RNN" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(RNN::Input::INPUT).value()
+ << ") Weights" << node.getInputs().at(RNN::Input::WEIGHTS).value()
+ << ") Recurrent Weights"
+ << node.getInputs().at(RNN::Input::RECURRENT_WEIGHTS).value() << ") Bias"
+ << node.getInputs().at(RNN::Input::BIAS).value() << ") Hidden State"
+ << node.getInputs().at(RNN::Input::HIDDEN_STATE_IN).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(RNN::Output::OUTPUT).value()
+ << ") Hidden State" << node.getInputs().at(RNN::Output::HIDDEN_STATE_OUT).value()
+ << ")" << std::endl;
+}
+
+void Dumper::visit(const RSQRT &node)
+{
+ VERBOSE(LIR) << "* RSQRT" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(RSQRT::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Softmax &node)
+{
+ VERBOSE(LIR) << "* Softmax" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Softmax::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const SpaceToDepth &node)
+{
+ VERBOSE(LIR) << "* SpaceToDepth" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(SpaceToDepth::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Split &node)
+{
+ VERBOSE(LIR) << "* Split" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Split::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const SQRT &node)
+{
+ VERBOSE(LIR) << "* SQRT" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(SQRT::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const SquaredDifference &node)
+{
+ VERBOSE(LIR) << "* SquaredDifference" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input("
+ << node.getInputs().at(SquaredDifference::Input::LHS).value() << ", "
+ << node.getInputs().at(SquaredDifference::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Squeeze &node)
+{
+ VERBOSE(LIR) << "* Squeeze" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Squeeze::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Slice &node)
+{
+ VERBOSE(LIR) << "* Slice" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Slice::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const StridedSlice &node)
+{
+ VERBOSE(LIR) << "* StridedSlice" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(StridedSlice::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Sub &node)
+{
+ VERBOSE(LIR) << "* Sub" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Sub::Input::LHS).value() << ", "
+ << node.getInputs().at(Sub::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Tanh &node)
+{
+ VERBOSE(LIR) << "* TanH" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Tanh::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const TopKV2 &node)
+{
+ VERBOSE(LIR) << "* TopKV2" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(TopKV2::Input::INPUT).value() << ")"
+ << std::endl;
+ VERBOSE(LIR) << " - Outputs : Values("
+ << node.getOutputs().at(TopKV2::Output::OUTPUT_VALUES).value() << ") Indices("
+ << node.getOutputs().at(TopKV2::Output::OUTPUT_INDICES).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const TransposeConv &node)
+{
+ std::string padding_type =
+ node.param().padding.type == PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ VERBOSE(LIR) << "* TransposeConv(" << padding_type << ")" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Output Shape("
+ << node.getInputs().at(TransposeConv::Input::OUTPUT_SHAPE).value() << ") KERNEL("
+ << node.getInputs().at(TransposeConv::Input::KERNEL).value() << ") IFM("
+ << node.getInputs().at(TransposeConv::Input::INPUT).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Transpose &node)
+{
+ VERBOSE(LIR) << "* Transpose" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Transpose::Input::INPUT).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Unpack &node)
+{
+ VERBOSE(LIR) << "* Unpack" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Unpack::Input::INPUT).value() << ")"
+ << std::endl;
+ std::string outputs;
+ const auto &output_indices = node.getOutputs();
+ for (auto it = std::begin(output_indices); it != std::end(output_indices); ++it)
+ {
+ outputs += std::to_string(it->value());
+ if (std::next(it) != std::end(output_indices))
+ outputs += ", ";
+ }
+ VERBOSE(LIR) << " - Outputs : Outputs(" << outputs << ")" << std::endl;
+}
+
+void Dumper::visit(const Min &node)
+{
+ VERBOSE(LIR) << "* Min" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Min::Input::LHS).value() << ", "
+ << node.getInputs().at(Min::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Max &node)
+{
+ VERBOSE(LIR) << "* Max" << std::endl;
+ VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(Max::Input::LHS).value() << ", "
+ << node.getInputs().at(Max::Input::RHS).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const OneHot &node)
+{
+ VERBOSE(LIR) << "* OneHot" << std::endl;
+ VERBOSE(LIR) << " - Inputs : "
+ << "Indices(" << node.getInputs().at(OneHot::Input::INDICES).value() << ") "
+ << "Depth(" << node.getInputs().at(OneHot::Input::DEPTH).value() << ") "
+ << "OnValue(" << node.getInputs().at(OneHot::Input::ON_VALUE).value() << ") "
+ << "OffValue(" << node.getInputs().at(OneHot::Input::OFF_VALUE).value() << ") "
+ << "Axis(" << node.param().axis << ") " << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+} // namespace dumper
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/dumper/Dumper.h b/runtime/neurun/core/src/ir/dumper/Dumper.h
new file mode 100644
index 000000000..458f1c81f
--- /dev/null
+++ b/runtime/neurun/core/src/ir/dumper/Dumper.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_DUMPER_H__
+#define __NEURUN_GRAPH_DUMPER_H__
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace dumper
+{
+
+class Dumper : public OperationVisitor
+{
+public:
+ Dumper() = default;
+
+public:
+ void visit(const operation::Abs &) override;
+ void visit(const operation::Add &node) override;
+ void visit(const operation::ArgMax &) override;
+ void visit(const operation::AvgPool2D &node) override;
+ void visit(const operation::Cast &) override;
+ void visit(const operation::Comparison &) override;
+ void visit(const operation::Concat &node) override;
+ void visit(const operation::Conv2D &node) override;
+ void visit(const operation::DepthToSpace &) override;
+ void visit(const operation::DepthwiseConv2D &node) override;
+ void visit(const operation::Dequantize &) override;
+ void visit(const operation::Div &) override;
+ void visit(const operation::EmbeddingLookup &) override;
+ void visit(const operation::Exp &) override;
+ void visit(const operation::Floor &) override;
+ void visit(const operation::FullyConnected &node) override;
+ void visit(const operation::Gather &) override;
+ void visit(const operation::HashtableLookup &) override;
+ void visit(const operation::InstanceNorm &) override;
+ void visit(const operation::L2Normalization &) override;
+ void visit(const operation::L2Pool2D &) override;
+ void visit(const operation::LocalResponseNormalization &) override;
+ void visit(const operation::LogicalAnd &) override;
+ void visit(const operation::LogicalNot &) override;
+ void visit(const operation::LogicalOr &) override;
+ void visit(const operation::Logistic &) override;
+ void visit(const operation::LSTM &) override;
+ void visit(const operation::MaxPool2D &node) override;
+ void visit(const operation::Mean &) override;
+ void visit(const operation::Mul &) override;
+ void visit(const operation::Neg &) override;
+ void visit(const operation::Pack &) override;
+ void visit(const operation::Permute &node) override;
+ void visit(const operation::PReLU &) override;
+ void visit(const operation::ReduceMax &) override;
+ void visit(const operation::ReduceMin &) override;
+ void visit(const operation::ReduceSum &) override;
+ void visit(const operation::ReLU &) override;
+ void visit(const operation::ReLU1 &) override;
+ void visit(const operation::ReLU6 &) override;
+ void visit(const operation::Reshape &node) override;
+ void visit(const operation::ResizeBilinear &) override;
+ void visit(const operation::RNN &) override;
+ void visit(const operation::RSQRT &) override;
+ void visit(const operation::Softmax &node) override;
+ void visit(const operation::SpaceToDepth &) override;
+ void visit(const operation::Split &) override;
+ void visit(const operation::SQRT &) override;
+ void visit(const operation::SquaredDifference &) override;
+ void visit(const operation::Squeeze &) override;
+ void visit(const operation::Slice &) override;
+ void visit(const operation::StridedSlice &) override;
+ void visit(const operation::Sub &) override;
+ void visit(const operation::Tanh &) override;
+ void visit(const operation::TopKV2 &) override;
+ void visit(const operation::TransposeConv &) override;
+ void visit(const operation::Transpose &) override;
+ void visit(const operation::Unpack &) override;
+ void visit(const operation::Min &) override;
+ void visit(const operation::Max &) override;
+ void visit(const operation::OneHot &) override;
+};
+
+} // namespace dumper
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_DUMPER_H__
diff --git a/runtime/neurun/core/src/ir/operand/Shape4DConvert.h b/runtime/neurun/core/src/ir/operand/Shape4DConvert.h
new file mode 100644
index 000000000..feffee89f
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operand/Shape4DConvert.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_SHAPE4D_CONVERT_H__
+#define __NEURUN_GRAPH_OPERAND_SHAPE4D_CONVERT_H__
+
+#include "ir/operand/LowerInfo.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operand
+{
+
+inline LowerInfo::Shape4D asShape4D(const Shape &shape)
+{
+ switch (shape.rank())
+ {
+ case 0u:
+ return LowerInfo::Shape4D(1, 1, 1, 1);
+
+ case 1u:
+ return LowerInfo::Shape4D(1, 1, 1, shape.dim(0));
+
+ case 2u:
+ return LowerInfo::Shape4D(1, 1, shape.dim(0), shape.dim(1));
+
+ case 3u:
+ return LowerInfo::Shape4D(1, shape.dim(0), shape.dim(1), shape.dim(2));
+
+ case 4u:
+ return LowerInfo::Shape4D(shape.dim(0), shape.dim(1), shape.dim(2), shape.dim(3));
+
+ default:
+ throw "Unsupported rank > 4";
+ }
+}
+
+} // namespace operand
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_SHAPE4D_CONVERT_H__
diff --git a/runtime/neurun/core/src/ir/operation/Abs.cc b/runtime/neurun/core/src/ir/operation/Abs.cc
new file mode 100644
index 000000000..9506f83d2
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Abs.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Abs.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Abs::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Abs::Abs(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Add.cc b/runtime/neurun/core/src/ir/operation/Add.cc
new file mode 100644
index 000000000..a7c40c37a
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Add.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Add.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Add::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Add::Add(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/ArgMax.cc b/runtime/neurun/core/src/ir/operation/ArgMax.cc
new file mode 100644
index 000000000..200abc7dd
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/ArgMax.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/ArgMax.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void ArgMax::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ArgMax::ArgMax(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/AvgPool2D.cc b/runtime/neurun/core/src/ir/operation/AvgPool2D.cc
new file mode 100644
index 000000000..21ec052eb
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/AvgPool2D.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/AvgPool2D.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void AvgPool2D::accept(OperationVisitor &v) const { v.visit(*this); }
+
+AvgPool2D::AvgPool2D(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/BatchToSpaceND.cc b/runtime/neurun/core/src/ir/operation/BatchToSpaceND.cc
new file mode 100644
index 000000000..042144c12
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/BatchToSpaceND.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/BatchToSpaceND.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void BatchToSpaceND::accept(OperationVisitor &v) const { v.visit(*this); }
+
+BatchToSpaceND::BatchToSpaceND(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Cast.cc b/runtime/neurun/core/src/ir/operation/Cast.cc
new file mode 100644
index 000000000..095225eca
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Cast.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Cast.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Cast::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Cast::Cast(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Comparison.cc b/runtime/neurun/core/src/ir/operation/Comparison.cc
new file mode 100644
index 000000000..995d56764
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Comparison.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Comparison.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Comparison::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Comparison::Comparison(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Concat.cc b/runtime/neurun/core/src/ir/operation/Concat.cc
new file mode 100644
index 000000000..1772da1fc
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Concat.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Concat.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Concat::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Concat::Concat(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createAtLeast(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Conv2D.cc b/runtime/neurun/core/src/ir/operation/Conv2D.cc
new file mode 100644
index 000000000..505e916a9
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Conv2D.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Conv2D.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Conv2D::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Conv2D::Conv2D(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Custom.cc b/runtime/neurun/core/src/ir/operation/Custom.cc
new file mode 100644
index 000000000..67f36d588
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Custom.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Custom.h"
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Custom::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Custom::Custom(OperandConstraint input_constr, const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, std::string id, const Userdata &userdata)
+ : Operation{input_constr, inputs, outputs}, _id(std::move(id)), _userdata(userdata)
+{
+}
+
+const std::string &Custom::id() const { return _id; }
+
+const Custom::Userdata &Custom::userdata() const { return _userdata; }
+
+Custom::~Custom() { delete[] _userdata.data; }
+
+std::string Custom::name() const { return id(); }
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/DepthToSpace.cc b/runtime/neurun/core/src/ir/operation/DepthToSpace.cc
new file mode 100644
index 000000000..fd1d1f1aa
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/DepthToSpace.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/DepthToSpace.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void DepthToSpace::accept(OperationVisitor &v) const { v.visit(*this); }
+
+DepthToSpace::DepthToSpace(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/DepthwiseConv2D.cc b/runtime/neurun/core/src/ir/operation/DepthwiseConv2D.cc
new file mode 100644
index 000000000..ed76594a3
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/DepthwiseConv2D.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/DepthwiseConv2D.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void DepthwiseConv2D::accept(OperationVisitor &v) const { v.visit(*this); }
+
+DepthwiseConv2D::DepthwiseConv2D(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Dequantize.cc b/runtime/neurun/core/src/ir/operation/Dequantize.cc
new file mode 100644
index 000000000..e99a59cb7
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Dequantize.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Dequantize.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Dequantize::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Dequantize::Dequantize(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Div.cc b/runtime/neurun/core/src/ir/operation/Div.cc
new file mode 100644
index 000000000..484406ff3
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Div.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Div.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Div::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Div::Div(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/EmbeddingLookup.cc b/runtime/neurun/core/src/ir/operation/EmbeddingLookup.cc
new file mode 100644
index 000000000..206e6bfaa
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/EmbeddingLookup.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/EmbeddingLookup.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void EmbeddingLookup::accept(OperationVisitor &v) const { v.visit(*this); }
+
+EmbeddingLookup::EmbeddingLookup(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Exp.cc b/runtime/neurun/core/src/ir/operation/Exp.cc
new file mode 100644
index 000000000..3c0e0cf9b
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Exp.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Exp.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Exp::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Exp::Exp(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Floor.cc b/runtime/neurun/core/src/ir/operation/Floor.cc
new file mode 100644
index 000000000..75373cd41
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Floor.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Floor.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Floor::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Floor::Floor(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/FullyConnected.cc b/runtime/neurun/core/src/ir/operation/FullyConnected.cc
new file mode 100644
index 000000000..9560c0593
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/FullyConnected.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/FullyConnected.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void FullyConnected::accept(OperationVisitor &v) const { v.visit(*this); }
+
+FullyConnected::FullyConnected(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Gather.cc b/runtime/neurun/core/src/ir/operation/Gather.cc
new file mode 100644
index 000000000..f98cef9ae
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Gather.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Gather.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Gather::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Gather::Gather(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/HashtableLookup.cc b/runtime/neurun/core/src/ir/operation/HashtableLookup.cc
new file mode 100644
index 000000000..ecb9d3195
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/HashtableLookup.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/HashtableLookup.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void HashtableLookup::accept(OperationVisitor &v) const { v.visit(*this); }
+
+HashtableLookup::HashtableLookup(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(3u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/InstanceNorm.cc b/runtime/neurun/core/src/ir/operation/InstanceNorm.cc
new file mode 100644
index 000000000..69e47abd4
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/InstanceNorm.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/InstanceNorm.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void InstanceNorm::accept(OperationVisitor &v) const { v.visit(*this); }
+
+InstanceNorm::InstanceNorm(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/L2Normalization.cc b/runtime/neurun/core/src/ir/operation/L2Normalization.cc
new file mode 100644
index 000000000..67085989e
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/L2Normalization.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/L2Normalization.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void L2Normalization::accept(OperationVisitor &v) const { v.visit(*this); }
+
+L2Normalization::L2Normalization(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/L2Pool2D.cc b/runtime/neurun/core/src/ir/operation/L2Pool2D.cc
new file mode 100644
index 000000000..0815cb5ab
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/L2Pool2D.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/L2Pool2D.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void L2Pool2D::accept(OperationVisitor &v) const { v.visit(*this); }
+
+L2Pool2D::L2Pool2D(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/LSTM.cc b/runtime/neurun/core/src/ir/operation/LSTM.cc
new file mode 100644
index 000000000..58e2aa32e
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/LSTM.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/LSTM.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void LSTM::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LSTM::LSTM(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(23u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/LocalResponseNormalization.cc b/runtime/neurun/core/src/ir/operation/LocalResponseNormalization.cc
new file mode 100644
index 000000000..dcba7f1cb
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/LocalResponseNormalization.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/LocalResponseNormalization.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void LocalResponseNormalization::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LocalResponseNormalization::LocalResponseNormalization(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/LogicalAnd.cc b/runtime/neurun/core/src/ir/operation/LogicalAnd.cc
new file mode 100644
index 000000000..51f4f0ee0
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/LogicalAnd.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/LogicalAnd.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void LogicalAnd::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LogicalAnd::LogicalAnd(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/LogicalNot.cc b/runtime/neurun/core/src/ir/operation/LogicalNot.cc
new file mode 100644
index 000000000..48c25142a
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/LogicalNot.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/LogicalNot.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void LogicalNot::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LogicalNot::LogicalNot(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/LogicalOr.cc b/runtime/neurun/core/src/ir/operation/LogicalOr.cc
new file mode 100644
index 000000000..663b7deb5
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/LogicalOr.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/LogicalOr.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void LogicalOr::accept(OperationVisitor &v) const { v.visit(*this); }
+
+LogicalOr::LogicalOr(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Logistic.cc b/runtime/neurun/core/src/ir/operation/Logistic.cc
new file mode 100644
index 000000000..3ed2f3453
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Logistic.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Logistic.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Logistic::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Logistic::Logistic(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/LowerInfo.cc b/runtime/neurun/core/src/ir/operation/LowerInfo.cc
new file mode 100644
index 000000000..6133be3f8
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/LowerInfo.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/LowerInfo.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+LowerInfo::LowerInfo(const backend::Backend *backend, Layout layout)
+ : _permute_factor{backend, layout}
+{
+ // DO NOTHING
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Max.cc b/runtime/neurun/core/src/ir/operation/Max.cc
new file mode 100644
index 000000000..be4bdd365
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Max.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Max.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Max::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Max::Max(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/MaxPool2D.cc b/runtime/neurun/core/src/ir/operation/MaxPool2D.cc
new file mode 100644
index 000000000..8f1b70cd6
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/MaxPool2D.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/MaxPool2D.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void MaxPool2D::accept(OperationVisitor &v) const { v.visit(*this); }
+
+MaxPool2D::MaxPool2D(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Mean.cc b/runtime/neurun/core/src/ir/operation/Mean.cc
new file mode 100644
index 000000000..016b5dd85
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Mean.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Mean.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Mean::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Mean::Mean(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Min.cc b/runtime/neurun/core/src/ir/operation/Min.cc
new file mode 100644
index 000000000..a864405dc
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Min.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Min.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Min::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Min::Min(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Mul.cc b/runtime/neurun/core/src/ir/operation/Mul.cc
new file mode 100644
index 000000000..0b2d67a9d
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Mul.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Mul.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Mul::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Mul::Mul(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Neg.cc b/runtime/neurun/core/src/ir/operation/Neg.cc
new file mode 100644
index 000000000..65922d57c
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Neg.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Neg.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Neg::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Neg::Neg(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/OneHot.cc b/runtime/neurun/core/src/ir/operation/OneHot.cc
new file mode 100644
index 000000000..0ba3c9d60
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/OneHot.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/OneHot.h"
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void OneHot::accept(OperationVisitor &v) const { v.visit(*this); }
+
+OneHot::OneHot(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(4u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/PReLU.cc b/runtime/neurun/core/src/ir/operation/PReLU.cc
new file mode 100644
index 000000000..b8555ccbd
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/PReLU.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/PReLU.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void PReLU::accept(OperationVisitor &v) const { v.visit(*this); }
+
+PReLU::PReLU(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Pack.cc b/runtime/neurun/core/src/ir/operation/Pack.cc
new file mode 100644
index 000000000..412c744ea
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Pack.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ir/operation/Pack.h"
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+void Pack::accept(OperationVisitor &v) const { v.visit(*this); }
+Pack::Pack(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createAtLeast(3u), inputs, outputs}, _param{param}
+{
+}
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Pad.cc b/runtime/neurun/core/src/ir/operation/Pad.cc
new file mode 100644
index 000000000..a08be12a6
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Pad.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Pad.h"
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Pad::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Pad::Pad(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Permute.cc b/runtime/neurun/core/src/ir/operation/Permute.cc
new file mode 100644
index 000000000..ec3d969c8
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Permute.cc
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Permute.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Permute::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Permute::Permute(const OperandIndex &input, const OperandIndex &output,
+ const backend::BackendContext *input_backend_ctx,
+ const backend::BackendContext *output_backend_ctx, Type type, DataType data_type)
+ : Operation{OperandConstraint::createExact(1u)}, _param{input_backend_ctx, output_backend_ctx},
+ _type{type}, _dataType{data_type}
+{
+ setInputs({input});
+ setOutputs({output});
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/RNN.cc b/runtime/neurun/core/src/ir/operation/RNN.cc
new file mode 100644
index 000000000..8db5cbceb
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/RNN.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/RNN.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void RNN::accept(OperationVisitor &v) const { v.visit(*this); }
+
+RNN::RNN(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(5u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/RSQRT.cc b/runtime/neurun/core/src/ir/operation/RSQRT.cc
new file mode 100644
index 000000000..ec13b20ec
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/RSQRT.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/RSQRT.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void RSQRT::accept(OperationVisitor &v) const { v.visit(*this); }
+
+RSQRT::RSQRT(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/ReLU.cc b/runtime/neurun/core/src/ir/operation/ReLU.cc
new file mode 100644
index 000000000..6b3f7e72d
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/ReLU.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/ReLU.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void ReLU::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReLU::ReLU(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/ReLU1.cc b/runtime/neurun/core/src/ir/operation/ReLU1.cc
new file mode 100644
index 000000000..d7b4e1b11
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/ReLU1.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/ReLU1.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void ReLU1::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReLU1::ReLU1(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/ReLU6.cc b/runtime/neurun/core/src/ir/operation/ReLU6.cc
new file mode 100644
index 000000000..245eb923f
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/ReLU6.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/ReLU6.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void ReLU6::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReLU6::ReLU6(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/ReduceMax.cc b/runtime/neurun/core/src/ir/operation/ReduceMax.cc
new file mode 100644
index 000000000..b7ef2c5a9
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/ReduceMax.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/ReduceMax.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void ReduceMax::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReduceMax::ReduceMax(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/ReduceMin.cc b/runtime/neurun/core/src/ir/operation/ReduceMin.cc
new file mode 100644
index 000000000..84d7e0cc5
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/ReduceMin.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/ReduceMin.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void ReduceMin::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReduceMin::ReduceMin(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/ReduceSum.cc b/runtime/neurun/core/src/ir/operation/ReduceSum.cc
new file mode 100644
index 000000000..7e3b19bd6
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/ReduceSum.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/ReduceSum.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void ReduceSum::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ReduceSum::ReduceSum(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Reshape.cc b/runtime/neurun/core/src/ir/operation/Reshape.cc
new file mode 100644
index 000000000..bae37e12f
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Reshape.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Reshape.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Reshape::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Reshape::Reshape(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/ResizeBilinear.cc b/runtime/neurun/core/src/ir/operation/ResizeBilinear.cc
new file mode 100644
index 000000000..55ae4815d
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/ResizeBilinear.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/ResizeBilinear.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void ResizeBilinear::accept(OperationVisitor &v) const { v.visit(*this); }
+
+ResizeBilinear::ResizeBilinear(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/SQRT.cc b/runtime/neurun/core/src/ir/operation/SQRT.cc
new file mode 100644
index 000000000..6c6daa3a0
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/SQRT.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/SQRT.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void SQRT::accept(OperationVisitor &v) const { v.visit(*this); }
+
+SQRT::SQRT(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Slice.cc b/runtime/neurun/core/src/ir/operation/Slice.cc
new file mode 100644
index 000000000..88014d1e4
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Slice.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Slice.h"
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Slice::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Slice::Slice(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Softmax.cc b/runtime/neurun/core/src/ir/operation/Softmax.cc
new file mode 100644
index 000000000..6b3a6b164
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Softmax.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Softmax.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Softmax::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Softmax::Softmax(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/SpaceToBatchND.cc b/runtime/neurun/core/src/ir/operation/SpaceToBatchND.cc
new file mode 100644
index 000000000..a07453504
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/SpaceToBatchND.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/SpaceToBatchND.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void SpaceToBatchND::accept(OperationVisitor &v) const { v.visit(*this); }
+
+SpaceToBatchND::SpaceToBatchND(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(3u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/SpaceToDepth.cc b/runtime/neurun/core/src/ir/operation/SpaceToDepth.cc
new file mode 100644
index 000000000..ca16bd92f
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/SpaceToDepth.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/SpaceToDepth.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void SpaceToDepth::accept(OperationVisitor &v) const { v.visit(*this); }
+
+SpaceToDepth::SpaceToDepth(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Split.cc b/runtime/neurun/core/src/ir/operation/Split.cc
new file mode 100644
index 000000000..a4b15a9b2
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Split.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ir/operation/Split.h"
+#include <cassert>
+#include "ir/OperationVisitor.h"
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+void Split::accept(OperationVisitor &v) const { v.visit(*this); }
+Split::Split(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/SquaredDifference.cc b/runtime/neurun/core/src/ir/operation/SquaredDifference.cc
new file mode 100644
index 000000000..141fb7560
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/SquaredDifference.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/SquaredDifference.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void SquaredDifference::accept(OperationVisitor &v) const { v.visit(*this); }
+
+SquaredDifference::SquaredDifference(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Squeeze.cc b/runtime/neurun/core/src/ir/operation/Squeeze.cc
new file mode 100644
index 000000000..22ee5763d
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Squeeze.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Squeeze.h"
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Squeeze::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Squeeze::Squeeze(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param(param)
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/StridedSlice.cc b/runtime/neurun/core/src/ir/operation/StridedSlice.cc
new file mode 100644
index 000000000..f764dccc0
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/StridedSlice.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/StridedSlice.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void StridedSlice::accept(OperationVisitor &v) const { v.visit(*this); }
+
+StridedSlice::StridedSlice(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Sub.cc b/runtime/neurun/core/src/ir/operation/Sub.cc
new file mode 100644
index 000000000..7d83e3d74
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Sub.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Sub.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Sub::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Sub::Sub(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Tanh.cc b/runtime/neurun/core/src/ir/operation/Tanh.cc
new file mode 100644
index 000000000..a3125e947
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Tanh.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Tanh.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Tanh::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Tanh::Tanh(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/TopKV2.cc b/runtime/neurun/core/src/ir/operation/TopKV2.cc
new file mode 100644
index 000000000..6fabd34a3
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/TopKV2.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/TopKV2.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void TopKV2::accept(OperationVisitor &v) const { v.visit(*this); }
+
+TopKV2::TopKV2(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Transpose.cc b/runtime/neurun/core/src/ir/operation/Transpose.cc
new file mode 100644
index 000000000..74239b0f6
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Transpose.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/Transpose.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void Transpose::accept(OperationVisitor &v) const { v.visit(*this); }
+
+Transpose::Transpose(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/TransposeConv.cc b/runtime/neurun/core/src/ir/operation/TransposeConv.cc
new file mode 100644
index 000000000..30664e974
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/TransposeConv.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ir/operation/TransposeConv.h"
+
+#include <cassert>
+
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+
+void TransposeConv::accept(OperationVisitor &v) const { v.visit(*this); }
+
+TransposeConv::TransposeConv(const OperandIndexSequence &inputs,
+ const OperandIndexSequence &outputs, const Param &param)
+ : Operation{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/operation/Unpack.cc b/runtime/neurun/core/src/ir/operation/Unpack.cc
new file mode 100644
index 000000000..7c2c24892
--- /dev/null
+++ b/runtime/neurun/core/src/ir/operation/Unpack.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ir/operation/Unpack.h"
+#include "ir/OperationVisitor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace operation
+{
+void Unpack::accept(OperationVisitor &v) const { v.visit(*this); }
+Unpack::Unpack(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param)
+ : Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+} // namespace operation
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc
new file mode 100644
index 000000000..8f8ebff1b
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConstantInsertionPass.h"
+
+#include "backend/Backend.h"
+#include <ir/Graph.h>
+#include "ir/operand/Shape4DConvert.h"
+#include <util/Utils.h>
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+void ConstantInsertionPass::callback(const OperationIndex &node_index, Operation &node)
+{
+ const auto &subgraph_index = _graph.subgraphs().getOperation(node_index);
+ const auto subg_lower_info = _graph.getLowerInfo(subgraph_index);
+ const auto backend = subg_lower_info->backend();
+ const auto layout = subg_lower_info->layout();
+ const auto factor = operand::PermuteFactor{backend, layout};
+
+ for (const auto input : node.getInputs())
+ {
+ auto &object = _graph.operands().at(input);
+
+ if (object.isConstant())
+ {
+ const auto key = ReplaceKey{input, factor};
+ if (_replace_operands_map.count(key) == 0)
+ {
+ auto new_object = object;
+ // TODO Remove const_case
+ const_cast<std::list<OperationIndex> &>(new_object.getDef().list()).clear();
+ const_cast<std::list<OperationIndex> &>(new_object.getUses().list()).clear();
+ const auto new_index = _graph.operands().emplace(new_object);
+ _replace_operands_map[key] = new_index;
+
+ _graph.setLowerInfo(new_index, nnfw::cpp14::make_unique<operand::LowerInfo>(
+ operand::asShape4D(new_object.shape())));
+ _graph.getLowerInfo(new_index)->addDefPermuteFactor(factor);
+ }
+
+ const auto replaced_input = _replace_operands_map[key];
+ // Update op_seq
+ if (_graph.subgraphs().at(subgraph_index).getInputs().contains(input))
+ {
+ _graph.subgraphs().at(subgraph_index).replaceInput(input, replaced_input);
+ }
+
+ // Update node
+ node.replaceInput(input, replaced_input);
+
+ // Update operand
+ auto &replaced_object = _graph.operands().at(replaced_input);
+ replaced_object.appendUse(node_index);
+
+ // Update lower_info
+ auto replaced_lower_info = _graph.getLowerInfo(replaced_input);
+ replaced_lower_info->addUsePermuteFactor(factor);
+
+ // Remove this node from def and uses of origin operand
+ if (object.getDef().contains(node_index))
+ {
+ object.removeDef(node_index);
+ }
+ object.removeUse(node_index);
+
+ // Remove origin operand
+ if (object.getDef().size() == 0 && object.getUses().size() == 0)
+ {
+ _graph.removeOperand(input);
+ _graph.removeLowerInfo(input);
+ }
+ }
+ }
+
+ // Now this runtime does not support the node making output as constant
+ for (const auto &output : node.getOutputs())
+ {
+ UNUSED_RELEASE(output);
+ assert(!_graph.operands().at(output).isConstant());
+ }
+}
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h
new file mode 100644
index 000000000..40476b20e
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/ConstantInsertionPass.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_CONSTANT_INSERTION_PASS_H__
+#define __NEURUN_GRAPH_PASS_CONSTANT_INSERTION_PASS_H__
+
+#include <ir/operand/PermuteFactor.h>
+#include <ir/Index.h>
+#include "OperationPass.h"
+#include <unordered_map>
+#include <utility>
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+class ConstantInsertionPass : public OperationPass
+{
+public:
+ using OperationPass::OperationPass;
+
+public:
+ std::string id() final { return "ConstantInsertionPass"; }
+
+public:
+ void callback(const OperationIndex &index, Operation &node) final;
+
+private:
+ struct ReplaceKey
+ {
+ OperandIndex index;
+ operand::PermuteFactor factor;
+
+ bool operator==(const ReplaceKey &other) const
+ {
+ return index == other.index && factor == other.factor;
+ }
+ };
+
+ /**
+ * @brief Structure that provides hash function of ReplaceKey
+ */
+ struct KeyHasher
+ {
+ std::size_t operator()(const ReplaceKey &key) const noexcept
+ {
+ using std::hash;
+ return hash<OperandIndex>()(key.index) ^ (hash<operand::PermuteFactor>()(key.factor) << 1);
+ }
+ };
+
+ std::unordered_map<ReplaceKey, OperandIndex, KeyHasher> _replace_operands_map;
+};
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_CONSTANT_INSERTION_PASS_H__
diff --git a/runtime/neurun/core/src/ir/pass/OperandPass.cc b/runtime/neurun/core/src/ir/pass/OperandPass.cc
new file mode 100644
index 000000000..f31d0d850
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/OperandPass.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperandPass.h"
+
+#include "ir/Graph.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+void OperandPass::run()
+{
+ _graph.operands().iterate(
+ [&](const OperandIndex &index, Operand &object) { callback(index, object); });
+}
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/pass/OperandPass.h b/runtime/neurun/core/src/ir/pass/OperandPass.h
new file mode 100644
index 000000000..c9fbf541d
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/OperandPass.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_OPERAND_PASS_H__
+#define __NEURUN_GRAPH_PASS_OPERAND_PASS_H__
+
+#include "Pass.h"
+#include "ir/Index.h"
+
+namespace neurun
+{
+namespace ir
+{
+class Operand;
+} // namespace ir
+} // namespace neurun
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+class OperandPass : public Pass
+{
+public:
+ using Pass::Pass;
+
+public:
+ std::string id() override = 0;
+ void run() override final;
+ virtual void callback(const OperandIndex &i, Operand &o) = 0;
+};
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_OPERAND_PASS_H__
diff --git a/runtime/neurun/core/src/ir/pass/OperationPass.cc b/runtime/neurun/core/src/ir/pass/OperationPass.cc
new file mode 100644
index 000000000..c9438ee39
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/OperationPass.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperationPass.h"
+
+#include "ir/Index.h"
+#include "ir/Operation.h"
+#include "ir/Graph.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+void OperationPass::run()
+{
+ _graph.operations().iterate(
+ [&](const OperationIndex &index, Operation &node) { callback(index, node); });
+}
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/pass/OperationPass.h b/runtime/neurun/core/src/ir/pass/OperationPass.h
new file mode 100644
index 000000000..4b7de7109
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/OperationPass.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file OperationPass.h
+ * @brief This file contains OperationPass class
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_OPERATION_PASS_H__
+#define __NEURUN_GRAPH_PASS_OPERATION_PASS_H__
+
+#include "Pass.h"
+#include "ir/Index.h"
+
+namespace neurun
+{
+namespace ir
+{
+class Operation;
+} // namespace ir
+} // namespace neurun
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+/**
+ * @brief Class to iterate over operations and calls callback() method
+ */
+class OperationPass : public Pass
+{
+public:
+ using Pass::Pass;
+
+public:
+ /**
+ * @brief Returns string id for this pass. Same with class name.
+ *
+ * @return string id
+ */
+ std::string id() override = 0;
+
+ /**
+ * @brief Be called for all nodes of graph.
+ * @param index is the index of a node in graph
+ * @param node is the node in graph
+ */
+ virtual void callback(const OperationIndex &index, Operation &node) = 0;
+
+ /**
+ * @brief Run the pass
+ */
+ void run() final;
+};
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_OPERATION_PASS_H__
diff --git a/runtime/neurun/core/src/ir/pass/Pass.h b/runtime/neurun/core/src/ir/pass/Pass.h
new file mode 100644
index 000000000..0aa0f36a6
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/Pass.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_PASS_H__
+#define __NEURUN_GRAPH_PASS_PASS_H__
+
+#include <string>
+
+namespace neurun
+{
+namespace ir
+{
+class Graph;
+} // namespace ir
+} // namespace neurun
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+class Pass
+{
+public:
+ Pass(Graph &graph) : _graph{graph} {}
+ virtual ~Pass() = default;
+
+public:
+ virtual std::string id() = 0;
+ virtual void run() = 0;
+
+protected:
+ Graph &_graph;
+};
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_PASS_H__
diff --git a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc
new file mode 100644
index 000000000..71f3d7e82
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PermutationEliminationPass.h"
+
+#include "ir/Operand.h"
+#include "ir/operand/LowerInfo.h"
+#include "ir/Graph.h"
+#include "backend/IConfig.h"
+#include "util/logging.h"
+#include "compiler/BackendResolver.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+void PermutationEliminationPass::callback(const OperandIndex &inp_index, Operand &object)
+{
+ if (_graph.getInputs().contains(inp_index))
+ {
+ eliminateInput(inp_index, object);
+ }
+ else if (_graph.getOutputs().contains(inp_index))
+ {
+ eliminateOutput(inp_index, object);
+ }
+}
+
+void PermutationEliminationPass::eliminateInput(const OperandIndex &inp_index, Operand &object)
+{
+ auto &model_inputs = _graph.getInputs();
+
+ // get uses of the model's given input
+ auto uses = object.getUses();
+
+ // input must be used just by permutation
+ if (uses.size() != 1)
+ {
+ return;
+ }
+
+ for (auto input_use : uses.list())
+ {
+ auto &perm_operation = _graph.operations().at(input_use);
+ auto perm_inputs = perm_operation.getInputs();
+
+ auto perm_outputs = perm_operation.getOutputs();
+
+ if (!isPermuteLayerToEliminate(perm_inputs, perm_outputs, true))
+ {
+ return;
+ }
+
+ assert(perm_inputs.at(0) == inp_index);
+
+ VERBOSE(PermutationEliminationPass::EliminateInput) << "remove NHWC_TO_NCHW permutation\n";
+
+ // set model's new input, which was output of permutation
+ model_inputs.replace(inp_index, perm_outputs.at(0));
+
+ // remove model's input, which is also input of permutation
+ _graph.removeOperand(inp_index);
+
+ // remove permutation operation
+ assert(_graph.subgraphs().containsOperation(input_use));
+ auto subg_idx = _graph.subgraphs().getOperation(input_use);
+ _graph.subgraphs().remove(subg_idx);
+ _graph.operations().remove(input_use);
+
+ VERBOSE(PermutationEliminationPass::EliminateInput)
+ << inp_index.value() << " is model's input and is removed. New input is "
+ << perm_outputs.at(0).value() << "\n"
+ << input_use.value() << " is removed permutation operation\n";
+ }
+}
+
+void PermutationEliminationPass::eliminateOutput(const OperandIndex &out_index, Operand &object)
+{
+ auto &model_outputs = _graph.getOutputs();
+
+ // get defs of the model's given output
+ auto defs = object.getDef();
+
+ // output must use just permutation
+ if (defs.size() != 1)
+ {
+ return;
+ }
+
+ for (auto output_def : defs.list())
+ {
+ auto &perm_operation = _graph.operations().at(output_def);
+ auto perm_outputs = perm_operation.getOutputs();
+
+ auto perm_inputs = perm_operation.getInputs();
+ if (!isPermuteLayerToEliminate(perm_inputs, perm_outputs, false))
+ {
+ return;
+ }
+
+ assert(perm_outputs.at(0) == out_index);
+
+ VERBOSE(PermutationEliminationPass::EliminateOutput) << "remove NCHW_TO_NHWC permutation\n";
+
+ // Update operations' output that is used by permute operand
+ for (auto perm_input_index : perm_inputs)
+ {
+ auto &perm_input_operand = _graph.operands().at(perm_input_index);
+ perm_input_operand.removeUse(output_def);
+ }
+
+ // set model's new output, which was input of permutation
+ model_outputs.replace(out_index, perm_inputs.at(0));
+
+ // remove model's output, which is also output of permutation
+ _graph.removeOperand(out_index);
+
+ // remove permutation operation
+ assert(_graph.subgraphs().containsOperation(output_def));
+ auto subg_idx = _graph.subgraphs().getOperation(output_def);
+ _graph.subgraphs().remove(subg_idx);
+ _graph.operations().remove(output_def);
+
+ VERBOSE(PermutationEliminationPass::EliminateOutput)
+ << out_index.value() << " is model's output and is removed. New output is "
+ << perm_inputs.at(0).value() << "\n"
+ << output_def.value() << " is removed permutation operation\n";
+ }
+}
+
+bool PermutationEliminationPass::isPermuteLayerToEliminate(const OperandIndexSequence &inp_indexes,
+ const OperandIndexSequence &out_indexes,
+ bool is_for_model_input)
+{
+ auto input_def_factors = _graph.getLowerInfo(inp_indexes.at(0))->def_factors();
+ auto output_def_factors = _graph.getLowerInfo(out_indexes.at(0))->def_factors();
+
+ auto input_layout = input_def_factors.getOnlyElement().layout();
+ auto output_layout = output_def_factors.getOnlyElement().layout();
+
+ if (input_def_factors.size() != 1 || output_def_factors.size() != 1)
+ {
+ return false;
+ }
+
+ // all operands' factor must be the same
+ for (auto index : inp_indexes)
+ {
+ auto op_factor_set = _graph.getLowerInfo(index)->def_factors();
+ if (op_factor_set.size() != 1 ||
+ input_layout != _graph.getLowerInfo(index)->def_factors().getOnlyElement().layout())
+ {
+ return false;
+ }
+ }
+ // all operands' factor must be the same
+ for (auto index : out_indexes)
+ {
+ auto op_factor_set = _graph.getLowerInfo(index)->def_factors();
+ if (op_factor_set.size() != 1 ||
+ output_layout != _graph.getLowerInfo(index)->def_factors().getOnlyElement().layout())
+ {
+ return false;
+ }
+ }
+
+ if (is_for_model_input)
+ {
+ // check if this is NHWC_TO_NCHW permutation: must have single input, which is model's input
+ return (inp_indexes.size() == 1 && input_layout == Layout::NHWC &&
+ output_layout == Layout::NCHW);
+ }
+
+ // check if this is NCHW_TO_NHWC permutation: must have single output, which is model's output
+ return (out_indexes.size() == 1 && input_layout == Layout::NCHW && output_layout == Layout::NHWC);
+}
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h
new file mode 100644
index 000000000..4431eabbc
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__
+#define __NEURUN_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__
+
+#include "OperandPass.h"
+#include "ir/Operand.h"
+#include "ir/OperandIndexSequence.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+class PermutationEliminationPass : public OperandPass
+{
+public:
+ using OperandPass::OperandPass;
+
+public:
+ std::string id() override { return "PermutationEliminationPass"; }
+
+ void callback(const OperandIndex &index, Operand &object) override;
+
+private:
+ /**
+ * @brief Remove Permute operation that permutates input
+ *
+ * Note: This function aslo removes model's input and
+ * sets output of permutation as model's new input
+ *
+ * @param inp_index is the target operand index for the elimination
+ * @param object is the target operand object for the elimination
+ *
+ * @return
+ */
+ void eliminateInput(const OperandIndex &inp_index, Operand &object);
+
+ /**
+ * @brief Remove Permute operation that permutates output of a model
+ *
+ * Note: This function aslo removes model's output and
+ * sets input of permutation as model's new output
+ *
+ * @param out_index is the target operand index for the elimination
+ * @param object is the target operand object for the elimination
+ *
+ * @return
+ */
+ void eliminateOutput(const OperandIndex &out_index, Operand &object);
+
+ /**
+ * @brief Determine if passed operands are permute layer's input and output, that must be
+ * eliminated
+ *
+ * @param inp_index indexes of the input operand to operation
+ * @param out_index indexes of the output operand to operation
+ * @param is_for_model_input checking for model's input or output
+ *
+ * @return if it is permutation layer
+ */
+ bool isPermuteLayerToEliminate(const OperandIndexSequence &inp_indexes,
+ const OperandIndexSequence &out_indexes, bool is_for_model_input);
+};
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__
diff --git a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc
new file mode 100644
index 000000000..052e3026a
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PermutationInsertionPass.h"
+
+#include <cassert>
+#include <utility>
+#include <unordered_map>
+
+#include "ir/Operand.h"
+#include "ir/operation/LowerInfo.h"
+#include "ir/Graph.h"
+#include "backend/IConfig.h"
+#include "util/logging.h"
+#include "cpp14/memory.h"
+#include "ir/operation/Permute.h"
+#include "ir/operand/Shape4DConvert.h"
+#include "compiler/BackendResolver.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+void PermutationInsertionPass::callback(const OperandIndex &index, Operand &object)
+{
+ auto &&operand_li = _graph.getLowerInfo(index);
+ assert(operand_li);
+
+ // NOTE Later, constants also will have Def
+ // Ignore constants
+ if (operand_li->def_factors().size() == 0)
+ {
+ return;
+ }
+
+ std::list<OperationIndex> permute_indexes;
+
+ // Build a map for all necessary type of operands
+ std::unordered_map<operand::PermuteFactor, OperandIndex> factor_to_index;
+ {
+ assert(operand_li->def_factors().size() == 1);
+ for (auto factor : operand_li->def_factors())
+ {
+ factor_to_index.emplace(factor, index);
+ }
+
+ auto insert_set = operand_li->use_factors() - operand_li->def_factors();
+ for (auto factor : insert_set)
+ {
+ const auto permute_operation_index = insertPermute(index, factor);
+ permute_indexes.push_back(permute_operation_index);
+ VERBOSE(PermutationInsertionPass) << "Insert 'Permute' operation for operand "
+ << index.value() << std::endl;
+ const auto &permute_operation = _graph.operations().at(permute_operation_index);
+ const auto permuted_operand_index = permute_operation.getOutputs().at(0);
+ factor_to_index.emplace(factor, permuted_operand_index);
+ }
+ }
+
+ // Update operations' input that uses this operand
+ {
+ std::list<OperationIndex> remove_list;
+
+ auto uses = object.getUses();
+ for (auto use : uses.list())
+ {
+ // If permute operation, ignore it
+ if (std::find(permute_indexes.begin(), permute_indexes.end(), use) != permute_indexes.end())
+ continue;
+
+ auto &operation = _graph.operations().at(use);
+ assert(_graph.subgraphs().containsOperation(use));
+ auto subg_index = _graph.subgraphs().getOperation(use);
+ auto subg_li = _graph.getLowerInfo(subg_index);
+ assert(subg_li);
+ const auto subg_layout = subg_li->layout();
+ const backend::Backend *backend = subg_li->backend();
+ assert(backend);
+ auto use_node_inputs = operation.getInputs();
+ assert(use_node_inputs.contains(index));
+
+ auto new_index = factor_to_index.at({backend, subg_layout});
+ if (index != new_index)
+ {
+ // Update from op_seq
+ _graph.subgraphs().at(subg_index).replaceInput(index, new_index);
+
+ // Update from operation
+ operation.replaceInput(index, new_index);
+
+ // Update from operand
+ remove_list.push_back(
+ use); // Removal should be done in another loop since we are in the loop
+ _graph.operands().at(new_index).appendUse(use);
+ }
+ }
+
+ for (auto &operation : remove_list)
+ {
+ object.removeUse(operation);
+ }
+ }
+}
+
+OperationIndex PermutationInsertionPass::insertPermute(const OperandIndex &operand_index,
+ const operand::PermuteFactor &factor)
+{
+ assert(!_graph.isBuildingPhase());
+
+ auto &operand = _graph.operands().at(operand_index);
+
+ // Generate output operand and permute operation
+ auto out_operand_index = _graph.addOperand(operand.shape(), operand.typeInfo());
+ // change model output if operand_index is model output index
+ auto &model_outputs = _graph.getOutputs();
+ if (model_outputs.contains(operand_index))
+ {
+ model_outputs.replace(operand_index, out_operand_index);
+ }
+
+ // Find Permute information
+ auto input_backend = _graph.getLowerInfo(operand_index)->def_factors().getOnlyElement().backend();
+ auto output_backend = factor.backend();
+ // NOTE Permute may not have specific layout because the layout of input and output may be
+ // different.
+ const auto permute_node_layout = Layout::UNKNOWN;
+ const auto permute_node_backend = backend::BackendManager::get().getDefault();
+ const operand::PermuteFactor permute_node_factor{permute_node_backend, permute_node_layout};
+
+ // Update LowerInfo of input operand
+ auto operand_lower_info = _graph.getLowerInfo(operand_index);
+ operand_lower_info->removeUsePermuteFactor(factor);
+ operand_lower_info->addUsePermuteFactor(permute_node_factor);
+
+ // Update LowerInfo of output operand
+ auto out_operand_li =
+ nnfw::cpp14::make_unique<operand::LowerInfo>(operand::asShape4D(operand.shape()));
+
+ // The input and output factors of all nodes will be the same except Permute. So Tensor's
+ // allocators allocates memory using only the information of def permutation factor now.
+ // TODO Change param to permute_node_factor
+ out_operand_li->addDefPermuteFactor(factor);
+ out_operand_li->addUsePermuteFactor(factor);
+ _graph.setLowerInfo(out_operand_index, std::move(out_operand_li));
+
+ auto input_backend_ctx = _graph.backend_resolver()->getBackendContext(input_backend);
+ auto output_backend_ctx = _graph.backend_resolver()->getBackendContext(output_backend);
+
+ // Insert permute operation to the graph
+ const auto input_layout =
+ _graph.getLowerInfo(operand_index)->def_factors().getOnlyElement().layout();
+ const auto output_layout = factor.layout();
+ using Permute = operation::Permute;
+ const auto permute_type = [&]() {
+ if (input_layout == Layout::NHWC && output_layout == Layout::NCHW)
+ {
+ return Permute::Type::NHWC_TO_NCHW;
+ }
+ else if (input_layout == Layout::NCHW && output_layout == Layout::NHWC)
+ {
+ return Permute::Type::NCHW_TO_NHWC;
+ }
+ else
+ {
+ return Permute::Type::COPY;
+ }
+ }();
+ auto insert_node = nnfw::cpp14::make_unique<Permute>(
+ operand_index, out_operand_index, input_backend_ctx, output_backend_ctx, permute_type);
+
+ auto node_index = _graph.operations().push(std::move(insert_node));
+ const auto &node = _graph.operations().at(node_index);
+
+ // OpSequence
+ {
+ auto subg_index = _graph.subgraphs().emplace(node_index, node, permute_node_layout);
+ auto &subg = _graph.subgraphs().at(subg_index);
+ subg.setInputs(node.getInputs());
+ subg.setOutputs(node.getOutputs());
+ _graph.setLowerInfo(subg_index, nnfw::cpp14::make_unique<operation::LowerInfo>(
+ permute_node_backend, permute_node_layout));
+ }
+
+ // Update Use/Def info
+ {
+ _graph.operands().at(operand_index).appendUse(node_index);
+ _graph.operands().at(out_operand_index).appendDef(node_index);
+ }
+ return node_index;
+}
+} // namespace pass
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h
new file mode 100644
index 000000000..4065fc6ac
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_PERMUTATION_INSERTION_PASS_H__
+#define __NEURUN_GRAPH_PASS_PERMUTATION_INSERTION_PASS_H__
+
+#include "OperandPass.h"
+#include "ir/Operand.h" //for OperationIndex
+#include "backend/BackendManager.h"
+#include "ir/operand/PermuteFactor.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+class PermutationInsertionPass : public OperandPass
+{
+public:
+ using OperandPass::OperandPass;
+
+public:
+ std::string id() override { return "PermutationInsertionPass"; }
+ void callback(const OperandIndex &index, Operand &object) override;
+
+ /**
+ * @brief Insert Permute operation that has given operand as input
+ *
+ * @param operand_index is the target operand index for the insertion
+ * @param factor is the output operand's backend type and layout
+ *
+ * @return OperationIndex
+ */
+ OperationIndex insertPermute(const OperandIndex &operand_index,
+ const operand::PermuteFactor &factor);
+
+private:
+};
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_PERMUTATION_INSERTION_PASS_H__
diff --git a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc
new file mode 100644
index 000000000..41a1ad903
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PermutationOperationPass.h"
+
+#include "backend/Backend.h"
+#include "backend/IConfig.h"
+#include "ir/Graph.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+void PermutationOperationPass::callback(const OperationIndex &, Operation &node)
+{
+ node.accept(*this);
+};
+
+void PermutationOperationPass::changeToKeepLayout(const Operation &node)
+{
+ const auto &output_ind = node.getOutputs().at(0);
+ const auto &output_obj = _graph.operands().at(output_ind);
+
+ assert(output_obj.getDef().size() == 1);
+ const auto &node_index = output_obj.getDef().list().front();
+ const auto &subg_index = _graph.subgraphs().getOperation(node_index);
+
+ const auto frontend_layout = _graph.subgraphs().at(subg_index).getLayout();
+ const auto backend_layout = _graph.getLowerInfo(subg_index)->layout();
+
+ if (frontend_layout == backend_layout)
+ {
+ return;
+ }
+
+ // CPU supports only NHWC now
+ if (_graph.getLowerInfo(subg_index)->backend()->config()->id() != "cpu")
+ {
+ // TODO Change backend of this node
+ assert(frontend_layout == Layout::NHWC || backend_layout == Layout::UNKNOWN);
+ }
+
+ // Divide op_seq based on target operation
+ {
+ auto &above_subg = _graph.subgraphs().at(subg_index);
+
+ // Create new op_seq and move information from existing op_seq to new op_seq if target
+ // node is the end of op_seq
+ auto it = above_subg.begin();
+ // Find iterator of target node in op_seq
+ while ((it++)->index != node_index)
+ ;
+ if (it != above_subg.end())
+ {
+ const auto &below_subg_index =
+ _graph.subgraphs().emplace(it->index, *it->node, above_subg.getLayout());
+ auto &below_subg = _graph.subgraphs().at(below_subg_index);
+ below_subg.setInputs(it->node->getInputs());
+ below_subg.setOutputs(it->node->getOutputs());
+
+ std::vector<OperationIndex> remove_list;
+ remove_list.emplace_back(it->index);
+ while (++it != above_subg.end())
+ {
+ below_subg.appendOperation(it->index, *it->node);
+ below_subg.setOutputs(it->node->getOutputs());
+ remove_list.emplace_back(it->index);
+ }
+
+ above_subg.setOutputs(node.getOutputs());
+ for (const auto &index : remove_list)
+ {
+ above_subg.remove(index);
+ }
+
+ const auto subg_li = _graph.getLowerInfo(subg_index);
+ _graph.setLowerInfo(below_subg_index, nnfw::cpp14::make_unique<operation::LowerInfo>(
+ subg_li->backend(), subg_li->layout()));
+ }
+ }
+
+ // Remove target operation from op_seq and insert the target operation to new op_seq
+ {
+ const auto backend = _graph.getLowerInfo(subg_index)->backend();
+
+ // Remove target operation from subraph
+ _graph.subgraphs().removeFromSubgraph(node_index);
+
+ if (!_graph.subgraphs().exist(subg_index))
+ {
+ // Remove lowerinfo for op_seq of target operation if the op_seq does not exist
+ _graph.removeLowerInfo(subg_index);
+ }
+ else
+ {
+ // Update op_seq of target operation if the op_seq exists
+ auto &above_subg = _graph.subgraphs().at(subg_index);
+ const auto last_node = (--above_subg.end())->node;
+ above_subg.setOutputs(last_node->getOutputs());
+ }
+
+ // Create new op_seq and set information to the op_seq
+ auto new_subg_index = _graph.subgraphs().emplace(node_index, node, frontend_layout);
+ auto &new_subg = _graph.subgraphs().at(new_subg_index);
+ new_subg.setInputs(node.getInputs());
+ new_subg.setOutputs(node.getOutputs());
+ _graph.setLowerInfo(new_subg_index,
+ nnfw::cpp14::make_unique<operation::LowerInfo>(backend, frontend_layout));
+ }
+
+ // Change PermuteFactors of operands of target node
+ {
+ const auto &subg_index = _graph.subgraphs().getOperation(node_index);
+ const auto subg_li = _graph.getLowerInfo(subg_index);
+ const auto backend = subg_li->backend();
+ const operand::PermuteFactor removed_factor{backend, backend_layout};
+ const operand::PermuteFactor new_factor{backend, frontend_layout};
+ for (const auto &input : node.getInputs())
+ {
+ bool canRemove = true;
+ for (const auto &use : _graph.operands().at(input).getUses().list())
+ {
+ if (use != node_index)
+ {
+ const auto &use_subg_index = _graph.subgraphs().getOperation(use);
+ auto use_subg_li = _graph.getLowerInfo(use_subg_index);
+ if (use_subg_li->backend() == backend && use_subg_li->layout() == backend_layout)
+ {
+ canRemove = false;
+ break;
+ }
+ }
+ }
+
+ auto lower_info = _graph.getLowerInfo(input);
+ if (canRemove)
+ {
+ lower_info->removeUsePermuteFactor(removed_factor);
+ }
+ lower_info->addUsePermuteFactor(new_factor);
+
+ // Whether if node's input is an input of model or a constant
+ if (_graph.operands().at(input).getDef().size() == 0)
+ {
+ assert(_graph.getInputs().contains(input) || _graph.operands().at(input).isConstant());
+ lower_info->removeDefPermuteFactor(removed_factor);
+ lower_info->addDefPermuteFactor(new_factor);
+ }
+ }
+
+ for (const auto &output : node.getOutputs())
+ {
+ auto lower_info = _graph.getLowerInfo(output);
+ lower_info->removeDefPermuteFactor(removed_factor);
+ lower_info->addDefPermuteFactor(new_factor);
+
+ // Whether if node's output is an output of model
+ if (_graph.operands().at(output).getUses().size() == 0)
+ {
+ assert(_graph.getOutputs().contains(output));
+ lower_info->removeUsePermuteFactor(removed_factor);
+ lower_info->addUsePermuteFactor(new_factor);
+ }
+ }
+ }
+}
+
+void PermutationOperationPass::visit(const operation::FullyConnected &node)
+{
+ const auto &input_ind = node.getInputs().at(operation::FullyConnected::Input::INPUT);
+ const auto &input_obj = _graph.operands().at(input_ind);
+ const auto &input_shape = input_obj.shape();
+
+ if (input_shape.rank() == 4)
+ {
+ changeToKeepLayout(node);
+ }
+}
+
+void PermutationOperationPass::visit(const operation::Gather &node)
+{
+ const auto &input_ind = node.getInputs().at(operation::Gather::Input::INPUT);
+ const auto &input_obj = _graph.operands().at(input_ind);
+ const auto &input_shape = input_obj.shape();
+
+ const auto &output_ind = node.getOutputs().at(0);
+ const auto &output_obj = _graph.operands().at(output_ind);
+ const auto &output_shape = output_obj.shape();
+
+ if (input_shape.rank() >= 4 || output_shape.rank() >= 4)
+ {
+ changeToKeepLayout(node);
+ }
+}
+
+void PermutationOperationPass::visit(const operation::Reshape &node)
+{
+ const auto &input_ind = node.getInputs().at(operation::Reshape::Input::INPUT);
+ const auto &input_obj = _graph.operands().at(input_ind);
+ const auto &input_shape = input_obj.shape();
+
+ const auto &output_ind = node.getOutputs().at(0);
+ const auto &output_obj = _graph.operands().at(output_ind);
+ const auto &output_shape = output_obj.shape();
+
+ if (input_shape.rank() >= 4 || output_shape.rank() >= 4)
+ {
+ changeToKeepLayout(node);
+ }
+}
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h
new file mode 100644
index 000000000..896e0176a
--- /dev/null
+++ b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_PASS_PERMUTATION_OPERATION_PASS_H__
+#define __NEURUN_GRAPH_PASS_PERMUTATION_OPERATION_PASS_H__
+
+#include "ir/OperationVisitor.h"
+#include "OperationPass.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace pass
+{
+
+class PermutationOperationPass : public OperationPass, public OperationVisitor
+{
+public:
+ using OperationPass::OperationPass;
+
+public:
+ std::string id() final { return "PermutationOperationPass"; }
+
+public:
+ void callback(const OperationIndex &i, Operation &n) final;
+
+public:
+ void visit(const operation::FullyConnected &) final;
+ void visit(const operation::Gather &) final;
+ void visit(const operation::Reshape &) final;
+
+private:
+ void changeToKeepLayout(const Operation &);
+};
+
+} // namespace pass
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_PASS_PERMUTATION_OPERATION_PASS_H__
diff --git a/runtime/neurun/core/src/ir/verifier/Verifier.cc b/runtime/neurun/core/src/ir/verifier/Verifier.cc
new file mode 100644
index 000000000..7bd8ac512
--- /dev/null
+++ b/runtime/neurun/core/src/ir/verifier/Verifier.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Verifier.h"
+
+#include "ir/Graph.h"
+#include "ir/OperationIndexMap.h"
+
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace ir
+{
+namespace verifier
+{
+
+//
+// DAGChecker
+//
+
+bool DAGChecker::verify(const Graph &graph) const
+{
+ auto &operations = graph.operations();
+ bool cyclic = false;
+
+ OperationIndexMap<bool> visited;
+ operations.iterate(
+ [&](const OperationIndex &index, const Operation &) { visited[index] = false; });
+ OperationIndexMap<bool> on_stack = visited; // Copy from visited
+
+ std::function<void(const OperationIndex &index, const Operation &)> dfs_recursive =
+ [&](const OperationIndex &index, const Operation &node) -> void {
+ if (on_stack[index])
+ cyclic = true;
+ if (visited[index])
+ return;
+ visited[index] = true;
+ on_stack[index] = true;
+
+ for (auto output : node.getOutputs())
+ {
+ const auto &operand = graph.operands().at(output);
+ for (const auto &use : operand.getUses().list())
+ {
+ dfs_recursive(use, graph.operations().at(use));
+ }
+ }
+
+ on_stack[index] = false;
+ };
+
+ operations.iterate(dfs_recursive);
+
+ return !cyclic;
+}
+
+//
+// EdgeConsistencyVerifier
+//
+
+bool EdgeConsistencyChecker::verify(const Graph &graph) const
+{
+ auto &operations = graph.operations();
+ uint32_t mismatches = 0;
+ operations.iterate([&](const OperationIndex &index, const Operation &node) {
+ for (auto operand_index : node.getInputs())
+ {
+ auto &operand = graph.operands().at(operand_index);
+ mismatches += (operand.getUses().contains(index) ? 0 : 1);
+ }
+ for (auto operand_index : node.getOutputs())
+ {
+ auto &operand = graph.operands().at(operand_index);
+ mismatches += (operand.getDef().contains(index) ? 0 : 1);
+ }
+ });
+ return mismatches == 0;
+}
+
+} // namespace verifier
+} // namespace ir
+} // namespace neurun
diff --git a/runtime/neurun/core/src/ir/verifier/Verifier.h b/runtime/neurun/core/src/ir/verifier/Verifier.h
new file mode 100644
index 000000000..0993a239e
--- /dev/null
+++ b/runtime/neurun/core/src/ir/verifier/Verifier.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_VERIFIER_VERIFIER_H__
+#define __NEURUN_GRAPH_VERIFIER_VERIFIER_H__
+
+namespace neurun
+{
+namespace ir
+{
+class Graph;
+} // namespace ir
+} // namespace neurun
+
+namespace neurun
+{
+namespace ir
+{
+namespace verifier
+{
+
+struct IVerifier
+{
+ virtual ~IVerifier() = default;
+ virtual bool verify(const Graph &graph) const = 0;
+};
+
+} // namespace verifier
+} // namespace ir
+} // namespace neurun
+
+namespace neurun
+{
+namespace ir
+{
+namespace verifier
+{
+
+class DAGChecker : public IVerifier
+{
+public:
+ bool verify(const Graph &graph) const override;
+};
+
+class EdgeConsistencyChecker : public IVerifier
+{
+public:
+ bool verify(const Graph &graph) const override;
+};
+
+} // namespace verifier
+} // namespace ir
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_VERIFIER_VERIFIER_H__