diff options
author | Chunseok Lee <chunseok.lee@samsung.com> | 2019-01-08 17:36:34 +0900 |
---|---|---|
committer | Chunseok Lee <chunseok.lee@samsung.com> | 2019-01-08 17:36:34 +0900 |
commit | bd11b24234d7d43dfe05a81c520aa01ffad06e42 (patch) | |
tree | 57d0d4044977e4fa0e50cd9ba40b32006dff19eb /runtimes/neurun/src/frontend/execution.cc | |
parent | 91f4ba45449f700a047a4aeea00b1a7c84e94c75 (diff) | |
download | nnfw-bd11b24234d7d43dfe05a81c520aa01ffad06e42.tar.gz nnfw-bd11b24234d7d43dfe05a81c520aa01ffad06e42.tar.bz2 nnfw-bd11b24234d7d43dfe05a81c520aa01ffad06e42.zip |
Imported Upstream version 0.3upstream/0.3
Diffstat (limited to 'runtimes/neurun/src/frontend/execution.cc')
-rw-r--r-- | runtimes/neurun/src/frontend/execution.cc | 221 |
1 files changed, 157 insertions, 64 deletions
diff --git a/runtimes/neurun/src/frontend/execution.cc b/runtimes/neurun/src/frontend/execution.cc index ff34921b7..5f1729b30 100644 --- a/runtimes/neurun/src/frontend/execution.cc +++ b/runtimes/neurun/src/frontend/execution.cc @@ -22,7 +22,122 @@ #include "frontend/wrapper/execution.h" #include "frontend/wrapper/event.h" -#include "graph/operand/Index.h" +#include "model/operand/DataType.h" +#include "model/operand/Index.h" +#include "graph/operand/Layout.h" +#include "backend/BackendManager.h" +#include "backend/interface/IConfig.h" +#include "compiler/BackendResolver.h" +#include "compiler/TensorInfo.h" +#include "backend/interface/operand/ITensor.h" + +inline void source(ANeuralNetworksExecution *execution, + const ::neurun::model::operand::DataType &type, int32_t index, + const void *buffer, size_t length) +{ + const auto &operands = execution->plan().model().operands(); + neurun::model::operand::IO::Index input_index{index}; + + const auto operand_index = execution->plan().model().getInputs().at(input_index); + auto operand = &operands.at(operand_index); + auto operand_li = operand->lower_info(); + const auto output_backend = operand_li->def_backends().getOnlyElement(); + const auto output_layout = output_backend->config()->getOperandLayout(); + auto input_layout = execution->plan() + .model() + .backend_resolver() + ->getDefaultBackend() + ->config() + ->getOperandLayout(); + if (input_layout == neurun::graph::operand::Layout::NHWC && + output_layout == neurun::graph::operand::Layout::NCHW) + { + const auto tensor_info = neurun::compiler::TensorInfo(operand->shape(), operand->typeInfo()); + + execution->source<::neurun::exec::PermutateSource>(index, buffer, tensor_info.total_size(), + operand->shape()); + return; + } + using ::neurun::model::operand::DataType; + switch (type) + { + case DataType::SCALAR_FLOAT32: + case DataType::TENSOR_FLOAT32: + execution->source<::neurun::exec::Source<float>>( + index, reinterpret_cast<const float *>(buffer), length); + break; + case DataType::SCALAR_INT32: + case DataType::TENSOR_INT32: + execution->source<::neurun::exec::Source<int32_t>>( + index, reinterpret_cast<const int32_t *>(buffer), length); + break; + case DataType::SCALAR_UINT32: + execution->source<::neurun::exec::Source<uint32_t>>( + index, reinterpret_cast<const uint32_t *>(buffer), length); + break; + case DataType::TENSOR_QUANT8_ASYMM: + execution->source<::neurun::exec::Source<uint8_t>>( + index, reinterpret_cast<const uint8_t *>(buffer), length); + break; + default: + throw std::runtime_error("Not supported, yet"); + break; + } +} + +inline void sink(ANeuralNetworksExecution *execution, + const ::neurun::model::operand::DataType &type, int32_t index, void *buffer, + size_t length) +{ + const auto &operands = execution->plan().model().operands(); + neurun::model::operand::IO::Index input_index{index}; + + const auto operand_index = execution->plan().model().getOutputs().at(input_index); + auto operand = &operands.at(operand_index); + auto operand_li = operand->lower_info(); + const auto input_backend = operand_li->def_backends().getOnlyElement(); + const auto input_layout = input_backend->config()->getOperandLayout(); + auto output_layout = execution->plan() + .model() + .backend_resolver() + ->getDefaultBackend() + ->config() + ->getOperandLayout(); + if (input_layout == neurun::graph::operand::Layout::NCHW && + output_layout == neurun::graph::operand::Layout::NHWC) + { + const auto tensor_info = neurun::compiler::TensorInfo(operand->shape(), operand->typeInfo()); + + execution->sink<::neurun::exec::PermutateSink>(index, buffer, tensor_info.total_size(), + operand->shape()); + return; + } + using ::neurun::model::operand::DataType; + switch (type) + { + case DataType::SCALAR_FLOAT32: + case DataType::TENSOR_FLOAT32: + execution->sink<::neurun::exec::Sink<float>>(index, reinterpret_cast<float *>(buffer), + length); + break; + case DataType::SCALAR_INT32: + case DataType::TENSOR_INT32: + execution->sink<::neurun::exec::Sink<int32_t>>(index, reinterpret_cast<int32_t *>(buffer), + length); + break; + case DataType::SCALAR_UINT32: + execution->sink<::neurun::exec::Sink<uint32_t>>(index, reinterpret_cast<uint32_t *>(buffer), + length); + break; + case DataType::TENSOR_QUANT8_ASYMM: + execution->sink<::neurun::exec::Sink<uint8_t>>(index, reinterpret_cast<uint8_t *>(buffer), + length); + break; + default: + throw std::runtime_error("Not supported, yet"); + break; + } +} // // NNAPI Implementation @@ -35,7 +150,13 @@ int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation, return ANEURALNETWORKS_UNEXPECTED_NULL; } - std::shared_ptr<const neurun::codegen::Plan> plan; + // Can handle compiled state only + if (compilation->plan().state() != neurun::compiler::State::COMPILED) + { + return ANEURALNETWORKS_BAD_STATE; + } + + std::shared_ptr<const neurun::compiler::Plan> plan; compilation->publish(plan); @@ -61,36 +182,23 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32 return ANEURALNETWORKS_UNEXPECTED_NULL; } + // TODO Handle optional input + if (buffer == nullptr) + { + throw std::runtime_error("Not supported optional input, yet"); + } + const auto &operands = execution->plan().model().operands(); // TODO Check type conflicts - // NOTE The current implemenation assumes that every input is a feature map. - // TODO Remove this assumption - neurun::graph::operand::IO::Index input_index{index}; + neurun::model::operand::IO::Index input_index{index}; const auto operand_index = execution->plan().model().getInputs().at(input_index); + const auto data_type = operands.at(operand_index).typeInfo().type(); + const auto operand_shape = operands.at(operand_index).shape(); - if (operands.at(operand_index).shape().rank() == 2) - { - assert(operands.at(operand_index).shape().dim(0) == 1); - - const auto len = operands.at(operand_index).shape().dim(1); - - execution->source<neurun::exec::VectorSource>( - index, len, reinterpret_cast<const uint8_t *>(buffer), length); - } - else if (operands.at(operand_index).shape().rank() == 4) - { - const auto &operand_shape = operands.at(operand_index).shape().asFeature(); - - execution->source<neurun::exec::FeatureSource>( - index, operand_shape, reinterpret_cast<const uint8_t *>(buffer), length); - } - else - { - throw std::runtime_error{"Not supported, yet"}; - } + source(execution, data_type, index, buffer, length); return ANEURALNETWORKS_NO_ERROR; } @@ -108,36 +216,23 @@ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int3 return ANEURALNETWORKS_UNEXPECTED_NULL; } + // Handle optional output + if (buffer == nullptr) + { + return ANEURALNETWORKS_NO_ERROR; + } + const auto &operands = execution->plan().model().operands(); // TODO Check type conflicts - // NOTE The current implemenation assumes that every output is a feature map. - // TODO Remove this assumption - neurun::graph::operand::IO::Index output_index{index}; + neurun::model::operand::IO::Index output_index{index}; const auto operand_index = execution->plan().model().getOutputs().at(output_index); + const auto data_type = operands.at(operand_index).typeInfo().type(); + const auto operand_shape = operands.at(operand_index).shape(); - if (operands.at(operand_index).shape().rank() == 2) - { - assert(operands.at(operand_index).shape().dim(0) == 1); - - const auto len = operands.at(operand_index).shape().dim(1); - - execution->sink<neurun::exec::VectorSink>(index, len, reinterpret_cast<uint8_t *>(buffer), - length); - } - else if (operands.at(operand_index).shape().rank() == 4) - { - const auto &operand_shape = operands.at(operand_index).shape().asFeature(); - - execution->sink<neurun::exec::FeatureSink>(index, operand_shape, - reinterpret_cast<uint8_t *>(buffer), length); - } - else - { - throw std::runtime_error{"Not supported, yet"}; - } + sink(execution, data_type, index, buffer, length); return ANEURALNETWORKS_NO_ERROR; } @@ -163,17 +258,16 @@ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution, // Set input(s) for (uint32_t n = 0; n < model.getInputs().size(); ++n) { - auto setter = [&](::arm_compute::ITensor &tensor) { execution->source(n).push(tensor); }; + auto setter = [&](::neurun::backend::operand::ITensor &tensor) { + execution->source(n).push(tensor); + }; - neurun::graph::operand::IO::Index input_index{n}; + neurun::model::operand::IO::Index input_index{n}; - ::neurun::graph::operand::Index index{model.getInputs().at(input_index)}; - auto objects = plan.operands().at(index); + ::neurun::model::operand::Index index{model.getInputs().at(input_index)}; + auto object = plan.operands().at(index); - for (auto object : objects) - { - object->access(setter); - } + object->access(setter); } const auto &operations = execution->plan().operations(); @@ -186,17 +280,16 @@ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution, // Get output(s) for (uint32_t n = 0; n < model.getOutputs().size(); ++n) { - auto getter = [&](::arm_compute::ITensor &tensor) { execution->sink(n).pull(tensor); }; + auto getter = [&](::neurun::backend::operand::ITensor &tensor) { + execution->sink(n).pull(tensor); + }; - neurun::graph::operand::IO::Index output_index{n}; + neurun::model::operand::IO::Index output_index{n}; - ::neurun::graph::operand::Index index{model.getOutputs().at(output_index)}; - auto objects = plan.operands().at(index); + ::neurun::model::operand::Index index{model.getOutputs().at(output_index)}; + auto object = plan.operands().at(index); - for (auto object : objects) - { - object->access(getter); - } + object->access(getter); } return ANEURALNETWORKS_NO_ERROR; |