summaryrefslogtreecommitdiff
path: root/runtimes/neurun/core/src/exec/ExecutorBase.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/core/src/exec/ExecutorBase.cc')
-rw-r--r--runtimes/neurun/core/src/exec/ExecutorBase.cc140
1 files changed, 140 insertions, 0 deletions
diff --git a/runtimes/neurun/core/src/exec/ExecutorBase.cc b/runtimes/neurun/core/src/exec/ExecutorBase.cc
new file mode 100644
index 000000000..827d4dc8b
--- /dev/null
+++ b/runtimes/neurun/core/src/exec/ExecutorBase.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ExecutorBase.h"
+#include "util/logging.h"
+namespace neurun
+{
+namespace exec
+{
+
+ExecutorBase::ExecutorBase(const std::shared_ptr<const model::Model> &model,
+ std::unique_ptr<model::Subgraphs> subgraphs,
+ const std::shared_ptr<compiler::OperandContext> &operand_context,
+ std::unique_ptr<graph::LowerInfoMap> lower_info,
+ std::unique_ptr<backend::TensorManagerSet> tensor_mgrs)
+ : _observers(), _model{model}, _subgraphs{std::move(subgraphs)},
+ _operand_context{operand_context}, _lower_info{std::move(lower_info)},
+ _tensor_mgrs{std::move(tensor_mgrs)}, _mutex()
+{
+ // DO NOTHING
+}
+
+std::unique_ptr<ISource> ExecutorBase::source(const model::IOIndex &index,
+ const model::TypeInfo &type, const void *buffer,
+ size_t length)
+{
+ using ::neurun::model::DataType;
+ switch (type.type())
+ {
+ case DataType::FLOAT32:
+ return source<float>(index, buffer, length);
+ case DataType::INT32:
+ return source<int32_t>(index, buffer, length);
+ case DataType::UINT32:
+ return source<uint32_t>(index, buffer, length);
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ return source<uint8_t>(index, buffer, length);
+ default:
+ throw std::runtime_error("Not supported yet");
+ }
+}
+
+std::unique_ptr<ISink> ExecutorBase::sink(const model::IOIndex &index, const model::TypeInfo &type,
+ void *buffer, size_t length)
+{
+ using ::neurun::model::DataType;
+ switch (type.type())
+ {
+ case DataType::FLOAT32:
+ return sink<float>(index, buffer, length);
+ case DataType::INT32:
+ return sink<int32_t>(index, buffer, length);
+ case DataType::UINT32:
+ return sink<uint32_t>(index, buffer, length);
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ return sink<uint8_t>(index, buffer, length);
+ default:
+ throw std::runtime_error("Not supported yet");
+ }
+}
+
+void ExecutorBase::execute(const IODescription &desc)
+{
+ // For thread-safe, use mutex
+ // TODO: if all used backends on this executor are thread-safe,
+ // do not need to use mutex (otherwise, use mutex)
+ std::lock_guard<std::mutex> lock(_mutex);
+
+ std::vector<std::unique_ptr<ISource>> sources{_model->inputs.size()};
+ std::vector<std::unique_ptr<ISink>> sinks{_model->outputs.size()};
+
+ // Set input(s)
+ for (uint32_t n = 0; n < _model->inputs.size(); ++n)
+ {
+ model::IOIndex input_index{n};
+ model::OperandIndex index{_model->inputs.at(input_index)};
+
+ if (desc.inputs.at(n) == nullptr)
+ {
+ // Optional input
+ continue;
+ }
+
+ const auto operand_li = _lower_info->operand.at(index).get();
+ if (operand_li->def_factors().empty())
+ {
+ // This input is not used (i.e. constant, EX. reshape's axis)
+ continue;
+ }
+
+ const auto &input = *desc.inputs.at(n);
+ sources.at(n) = source(input_index, input.info.typeInfo(), input.buffer, input.size);
+
+ auto setter = [&](::neurun::backend::operand::ITensor &tensor) { sources.at(n)->push(tensor); };
+
+ auto object = _operand_context->at(index);
+
+ object->access(setter);
+ }
+
+ executeImpl();
+
+ // Get output(s)
+ for (uint32_t n = 0; n < _model->outputs.size(); ++n)
+ {
+ neurun::model::IOIndex output_index{n};
+ // Optional output
+ if (desc.outputs.at(n) == nullptr)
+ {
+ continue;
+ }
+ const auto &output = *desc.outputs.at(n);
+ sinks.at(n) = sink(output_index, output.info.typeInfo(), output.buffer, output.size);
+
+ auto getter = [&](::neurun::backend::operand::ITensor &tensor) { sinks.at(n)->pull(tensor); };
+
+ ::neurun::model::OperandIndex index{_model->outputs.at(output_index)};
+ auto object = _operand_context->at(index);
+
+ object->access(getter);
+ }
+}
+
+} // namespace exec
+} // namespace neurun