summaryrefslogtreecommitdiff
path: root/runtimes/neurun/core/src/exec/interp/Interpreter.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/core/src/exec/interp/Interpreter.cc')
-rw-r--r--runtimes/neurun/core/src/exec/interp/Interpreter.cc202
1 files changed, 202 insertions, 0 deletions
diff --git a/runtimes/neurun/core/src/exec/interp/Interpreter.cc b/runtimes/neurun/core/src/exec/interp/Interpreter.cc
new file mode 100644
index 000000000..81de27c36
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/interp/Interpreter.cc
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Interpreter.h"
+
+#include <stack>
+#include <unordered_set>
+
+#include "Registration.h"
+
+#include "model/OperandIndexMap.h"
+#include "util/logging.h"
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+// TODO more structured execution kernel implementation
+// TODO use cker for execution
+// TODO divide tensor prepare and execution
+// TODO introduce memory manager (buffer allocate and free)
+class OperationExecutor : model::OperationVisitor
+{
+public:
+#define OP(InternalName, IsNnApi) InternalName,
+ enum class NodeName
+ {
+#include "model/Operations.lst"
+ };
+#undef OP
+
+public:
+ OperationExecutor(ExecEnv *env) : _env{env}
+ {
+ _kernels[NodeName::AddNode] = getAddNode();
+ _kernels[NodeName::Conv2DNode] = getConv2DNode();
+ _kernels[NodeName::MaxPool2DNode] = getMaxPool2DNode();
+ _kernels[NodeName::ConcatNode] = getConcatNode();
+ _kernels[NodeName::AvgPool2DNode] = getAvgPool2DNode();
+ _kernels[NodeName::FullyConnectedNode] = getFullyConnectedNode();
+ _kernels[NodeName::SoftmaxNode] = getSoftMaxNode();
+ _kernels[NodeName::ReshapeNode] = getReshapeNode();
+ _kernels[NodeName::DepthwiseConv2DNode] = getDepthwiseConvNode();
+ }
+
+ void execute(const model::OperationIndex &idx)
+ {
+ const auto nodeName = _env->model().operations.at(idx).getName();
+ VERBOSE(INTERPRETER) << "Prepare output operands and execute " << nodeName
+ << " operation (id: " << idx.value() << ")" << std::endl;
+ _env->model().operations.at(idx).accept(*this);
+ }
+
+private:
+#define OP(InternalName, IsNnApi) \
+ virtual void visit(const model::operation::InternalName &node) override \
+ { \
+ if (_kernels[NodeName::InternalName]->prepare != nullptr) \
+ { \
+ _kernels[NodeName::InternalName]->prepare(_env, node); \
+ } \
+ _kernels[NodeName::InternalName]->invoke(_env, node); \
+ }
+#include "model/Operations.lst"
+#undef OP
+
+private:
+ ExecEnv *_env;
+ std::unordered_map<NodeName, OpKernel *> _kernels;
+};
+
+void Interpreter::run()
+{
+ VERBOSE(INTERPRETER) << "Interpreter is invoked " << std::endl;
+
+ // operand_stack: save operands prepared to use
+ std::stack<model::OperandIndex> operand_stack;
+
+ // Note: We should push input first, then constant.
+ // We use use-def for find operators ready to execution,
+ // but Use-Def cannot handle parameters (maybe constant, but not always)
+ // Note: If all model inputs are constant, it may not work (depend on tensors' order).
+ // But that scenario may not exist
+ for (auto ind : _env->model().inputs)
+ {
+ VERBOSE(INTERPRETER) << "Input: Push to operand stack " << ind.value() << std::endl;
+
+ operand_stack.push(ind);
+ }
+
+ _env->model().operands.iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ if (obj.isConstant())
+ {
+ VERBOSE(INTERPRETER) << "Constant: Push to operand stack " << ind.value() << std::endl;
+
+ operand_stack.push(ind);
+ }
+ });
+
+ // Execution
+ std::unordered_set<model::OperandIndex> ready_check;
+ std::unordered_set<model::OperationIndex> executed;
+ OperationExecutor executor{_env.get()};
+ while (!operand_stack.empty())
+ {
+ const auto current_operand_index = operand_stack.top();
+ operand_stack.pop();
+ VERBOSE(INTERPRETER) << "Poped operand " << current_operand_index.value()
+ << " is checked ready to use" << std::endl;
+
+ assert(ready_check.find(current_operand_index) == ready_check.end());
+ ready_check.insert(current_operand_index);
+
+ // Find prepared operations by scan use of current operand
+ std::stack<model::OperationIndex> operation_stack;
+ const auto use_operators = _env->model().operands.at(current_operand_index).getUses();
+ for (auto use_operator : use_operators.list())
+ {
+ // Assumption: all parameters are ready to use
+ bool operator_ready = true;
+ for (auto input_index : _env->model().operations.at(use_operator).getInputs())
+ {
+ if (ready_check.find(input_index) == ready_check.end())
+ {
+ operator_ready = false;
+ break;
+ }
+ }
+
+ if (operator_ready)
+ {
+ VERBOSE(INTERPRETER) << "Ready to execute operation " << use_operator.value() << std::endl;
+ operation_stack.push(use_operator);
+ }
+ }
+
+ while (!operation_stack.empty())
+ {
+ const auto current_operation_index = operation_stack.top();
+ operation_stack.pop();
+ VERBOSE(INTERPRETER) << "Poped operation: " << current_operation_index.value() << "("
+ << _env->model().operations.at(current_operation_index).getName() << ")"
+ << std::endl;
+
+ // execution
+ // 1. Prepare output tensor
+ // 2. Call operation kernel
+ executor.execute(current_operation_index);
+ executed.insert(current_operation_index);
+
+ // 3. Push each output into operand stack
+ const auto def_operands = _env->model().operations.at(current_operation_index).getOutputs();
+ for (auto def_operand : def_operands)
+ {
+ VERBOSE(INTERPRETER) << "Buffer: Push to operand stack " << def_operand.value()
+ << std::endl;
+ operand_stack.push(def_operand);
+ }
+
+ // 4. Free if lifetime of buffer operands used by input is finished
+ for (auto input_index : _env->model().operations.at(current_operation_index).getInputs())
+ {
+ const auto use_operators = _env->model().operands.at(input_index).getUses();
+ bool dead_buffer = true;
+ for (auto use_operator : use_operators.list())
+ {
+ if (executed.find(use_operator) == executed.end())
+ {
+ dead_buffer = false;
+ break;
+ }
+ }
+
+ if (dead_buffer)
+ {
+ _env->freeIfAllocated(input_index);
+ }
+ }
+ }
+ }
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun