/* * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "ExecutorBase.h" #include "util/logging.h" namespace neurun { namespace exec { ExecutorBase::ExecutorBase(const std::shared_ptr &model, std::unique_ptr subgraphs, const std::shared_ptr &operand_context, std::unique_ptr lower_info, std::unique_ptr tensor_mgrs) : _observers(), _model{model}, _subgraphs{std::move(subgraphs)}, _operand_context{operand_context}, _lower_info{std::move(lower_info)}, _tensor_mgrs{std::move(tensor_mgrs)}, _mutex() { // DO NOTHING } std::unique_ptr ExecutorBase::source(const model::IOIndex &index, const model::TypeInfo &type, const void *buffer, size_t length) { using ::neurun::model::DataType; switch (type.type()) { case DataType::FLOAT32: return source(index, buffer, length); case DataType::INT32: return source(index, buffer, length); case DataType::UINT32: return source(index, buffer, length); case DataType::BOOL8: case DataType::QUANT8_ASYMM: return source(index, buffer, length); default: throw std::runtime_error("Not supported yet"); } } std::unique_ptr ExecutorBase::sink(const model::IOIndex &index, const model::TypeInfo &type, void *buffer, size_t length) { using ::neurun::model::DataType; switch (type.type()) { case DataType::FLOAT32: return sink(index, buffer, length); case DataType::INT32: return sink(index, buffer, length); case DataType::UINT32: return sink(index, buffer, length); case DataType::BOOL8: case DataType::QUANT8_ASYMM: return sink(index, buffer, length); default: throw std::runtime_error("Not supported yet"); } } void ExecutorBase::execute(const IODescription &desc) { // For thread-safe, use mutex // TODO: if all used backends on this executor are thread-safe, // do not need to use mutex (otherwise, use mutex) std::lock_guard lock(_mutex); std::vector> sources{_model->inputs.size()}; std::vector> sinks{_model->outputs.size()}; // Set input(s) for (uint32_t n = 0; n < _model->inputs.size(); ++n) { model::IOIndex input_index{n}; model::OperandIndex index{_model->inputs.at(input_index)}; if (desc.inputs.at(n) == nullptr) { // Optional input continue; } const auto operand_li = _lower_info->operand.at(index).get(); if (operand_li->def_factors().empty()) { // This input is not used (i.e. constant, EX. reshape's axis) continue; } const auto &input = *desc.inputs.at(n); sources.at(n) = source(input_index, input.info.typeInfo(), input.buffer, input.size); auto setter = [&](::neurun::backend::operand::ITensor &tensor) { sources.at(n)->push(tensor); }; auto object = _operand_context->at(index); object->access(setter); } executeImpl(); // Get output(s) for (uint32_t n = 0; n < _model->outputs.size(); ++n) { neurun::model::IOIndex output_index{n}; // Optional output if (desc.outputs.at(n) == nullptr) { continue; } const auto &output = *desc.outputs.at(n); sinks.at(n) = sink(output_index, output.info.typeInfo(), output.buffer, output.size); auto getter = [&](::neurun::backend::operand::ITensor &tensor) { sinks.at(n)->pull(tensor); }; ::neurun::model::OperandIndex index{_model->outputs.at(output_index)}; auto object = _operand_context->at(index); object->access(getter); } } } // namespace exec } // namespace neurun