summaryrefslogtreecommitdiff
path: root/runtime/neurun/core/src/exec/ExecutorBase.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/neurun/core/src/exec/ExecutorBase.cc')
-rw-r--r--runtime/neurun/core/src/exec/ExecutorBase.cc145
1 files changed, 145 insertions, 0 deletions
diff --git a/runtime/neurun/core/src/exec/ExecutorBase.cc b/runtime/neurun/core/src/exec/ExecutorBase.cc
new file mode 100644
index 000000000..9692c2ba7
--- /dev/null
+++ b/runtime/neurun/core/src/exec/ExecutorBase.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ExecutorBase.h"
+#include "util/logging.h"
+namespace neurun
+{
+namespace exec
+{
+
+ExecutorBase::ExecutorBase(const ir::Graph &graph,
+ const std::shared_ptr<compiler::OperandContext> &operand_context,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs)
+ : _graph{graph}, _operand_context{operand_context}, _tensor_mgrs{std::move(tensor_mgrs)},
+ _mutex()
+{
+ // DO NOTHING
+}
+
+std::unique_ptr<ISource> ExecutorBase::source(const ir::IOIndex &index, const ir::TypeInfo &type,
+ const void *buffer, size_t length,
+ ir::Layout io_layout)
+{
+ using ir::DataType;
+ switch (type.type())
+ {
+ case DataType::FLOAT32:
+ return source<float>(index, buffer, length, io_layout);
+ case DataType::INT32:
+ return source<int32_t>(index, buffer, length, io_layout);
+ case DataType::UINT32:
+ return source<uint32_t>(index, buffer, length, io_layout);
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ case DataType::UINT8:
+ return source<uint8_t>(index, buffer, length, io_layout);
+ case DataType::QUANT8_SYMM:
+ return source<int8_t>(index, buffer, length, io_layout);
+ default:
+ throw std::runtime_error("Not supported yet");
+ }
+}
+
+std::unique_ptr<ISink> ExecutorBase::sink(const ir::IOIndex &index, const ir::TypeInfo &type,
+ void *buffer, size_t length, ir::Layout io_layout)
+{
+ using ir::DataType;
+ switch (type.type())
+ {
+ case DataType::FLOAT32:
+ return sink<float>(index, buffer, length, io_layout);
+ case DataType::INT32:
+ return sink<int32_t>(index, buffer, length, io_layout);
+ case DataType::UINT32:
+ return sink<uint32_t>(index, buffer, length, io_layout);
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ case DataType::UINT8:
+ return sink<uint8_t>(index, buffer, length, io_layout);
+ case DataType::QUANT8_SYMM:
+ return sink<int8_t>(index, buffer, length, io_layout);
+ default:
+ throw std::runtime_error("Not supported yet");
+ }
+}
+
+void ExecutorBase::execute(const IODescription &desc)
+{
+ // For thread-safe, use mutex
+ // TODO: if all used backends on this executor are thread-safe,
+ // do not need to use mutex (otherwise, use mutex)
+ std::lock_guard<std::mutex> lock(_mutex);
+
+ std::vector<std::unique_ptr<ISource>> sources{_graph.getInputs().size()};
+ std::vector<std::unique_ptr<ISink>> sinks{_graph.getOutputs().size()};
+
+ // Set input(s)
+ for (uint32_t n = 0; n < _graph.getInputs().size(); ++n)
+ {
+ ir::IOIndex input_index{n};
+ ir::OperandIndex index{_graph.getInputs().at(input_index)};
+
+ if (desc.inputs.at(n) == nullptr)
+ {
+ // Optional input
+ continue;
+ }
+
+ const auto operand_li = _graph.getLowerInfo()->operand.at(index).get();
+ if (operand_li->def_factors().empty())
+ {
+ // This input is not used (i.e. constant, EX. reshape's axis)
+ continue;
+ }
+
+ const auto &input = *desc.inputs.at(n);
+ sources.at(n) =
+ source(input_index, input.info.typeInfo(), input.buffer, input.size, input.layout);
+
+ auto setter = [&](::neurun::backend::operand::ITensor &tensor) { sources.at(n)->push(tensor); };
+
+ auto object = _operand_context->at(index);
+
+ object->access(setter);
+ }
+
+ executeImpl();
+
+ // Get output(s)
+ for (uint32_t n = 0; n < _graph.getOutputs().size(); ++n)
+ {
+ ir::IOIndex output_index{n};
+ // Optional output
+ if (desc.outputs.at(n) == nullptr)
+ {
+ continue;
+ }
+ const auto &output = *desc.outputs.at(n);
+ sinks.at(n) =
+ sink(output_index, output.info.typeInfo(), output.buffer, output.size, output.layout);
+
+ auto getter = [&](::neurun::backend::operand::ITensor &tensor) { sinks.at(n)->pull(tensor); };
+
+ ir::OperandIndex index{_graph.getOutputs().at(output_index)};
+ auto object = _operand_context->at(index);
+
+ object->access(getter);
+ }
+}
+
+} // namespace exec
+} // namespace neurun