summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/frontend/execution.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/frontend/execution.cc')
-rw-r--r--runtimes/neurun/src/frontend/execution.cc328
1 files changed, 0 insertions, 328 deletions
diff --git a/runtimes/neurun/src/frontend/execution.cc b/runtimes/neurun/src/frontend/execution.cc
deleted file mode 100644
index 5f1729b30..000000000
--- a/runtimes/neurun/src/frontend/execution.cc
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-
-#include <new>
-
-#include "frontend/wrapper/compilation.h"
-#include "frontend/wrapper/execution.h"
-#include "frontend/wrapper/event.h"
-
-#include "model/operand/DataType.h"
-#include "model/operand/Index.h"
-#include "graph/operand/Layout.h"
-#include "backend/BackendManager.h"
-#include "backend/interface/IConfig.h"
-#include "compiler/BackendResolver.h"
-#include "compiler/TensorInfo.h"
-#include "backend/interface/operand/ITensor.h"
-
-inline void source(ANeuralNetworksExecution *execution,
- const ::neurun::model::operand::DataType &type, int32_t index,
- const void *buffer, size_t length)
-{
- const auto &operands = execution->plan().model().operands();
- neurun::model::operand::IO::Index input_index{index};
-
- const auto operand_index = execution->plan().model().getInputs().at(input_index);
- auto operand = &operands.at(operand_index);
- auto operand_li = operand->lower_info();
- const auto output_backend = operand_li->def_backends().getOnlyElement();
- const auto output_layout = output_backend->config()->getOperandLayout();
- auto input_layout = execution->plan()
- .model()
- .backend_resolver()
- ->getDefaultBackend()
- ->config()
- ->getOperandLayout();
- if (input_layout == neurun::graph::operand::Layout::NHWC &&
- output_layout == neurun::graph::operand::Layout::NCHW)
- {
- const auto tensor_info = neurun::compiler::TensorInfo(operand->shape(), operand->typeInfo());
-
- execution->source<::neurun::exec::PermutateSource>(index, buffer, tensor_info.total_size(),
- operand->shape());
- return;
- }
- using ::neurun::model::operand::DataType;
- switch (type)
- {
- case DataType::SCALAR_FLOAT32:
- case DataType::TENSOR_FLOAT32:
- execution->source<::neurun::exec::Source<float>>(
- index, reinterpret_cast<const float *>(buffer), length);
- break;
- case DataType::SCALAR_INT32:
- case DataType::TENSOR_INT32:
- execution->source<::neurun::exec::Source<int32_t>>(
- index, reinterpret_cast<const int32_t *>(buffer), length);
- break;
- case DataType::SCALAR_UINT32:
- execution->source<::neurun::exec::Source<uint32_t>>(
- index, reinterpret_cast<const uint32_t *>(buffer), length);
- break;
- case DataType::TENSOR_QUANT8_ASYMM:
- execution->source<::neurun::exec::Source<uint8_t>>(
- index, reinterpret_cast<const uint8_t *>(buffer), length);
- break;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-inline void sink(ANeuralNetworksExecution *execution,
- const ::neurun::model::operand::DataType &type, int32_t index, void *buffer,
- size_t length)
-{
- const auto &operands = execution->plan().model().operands();
- neurun::model::operand::IO::Index input_index{index};
-
- const auto operand_index = execution->plan().model().getOutputs().at(input_index);
- auto operand = &operands.at(operand_index);
- auto operand_li = operand->lower_info();
- const auto input_backend = operand_li->def_backends().getOnlyElement();
- const auto input_layout = input_backend->config()->getOperandLayout();
- auto output_layout = execution->plan()
- .model()
- .backend_resolver()
- ->getDefaultBackend()
- ->config()
- ->getOperandLayout();
- if (input_layout == neurun::graph::operand::Layout::NCHW &&
- output_layout == neurun::graph::operand::Layout::NHWC)
- {
- const auto tensor_info = neurun::compiler::TensorInfo(operand->shape(), operand->typeInfo());
-
- execution->sink<::neurun::exec::PermutateSink>(index, buffer, tensor_info.total_size(),
- operand->shape());
- return;
- }
- using ::neurun::model::operand::DataType;
- switch (type)
- {
- case DataType::SCALAR_FLOAT32:
- case DataType::TENSOR_FLOAT32:
- execution->sink<::neurun::exec::Sink<float>>(index, reinterpret_cast<float *>(buffer),
- length);
- break;
- case DataType::SCALAR_INT32:
- case DataType::TENSOR_INT32:
- execution->sink<::neurun::exec::Sink<int32_t>>(index, reinterpret_cast<int32_t *>(buffer),
- length);
- break;
- case DataType::SCALAR_UINT32:
- execution->sink<::neurun::exec::Sink<uint32_t>>(index, reinterpret_cast<uint32_t *>(buffer),
- length);
- break;
- case DataType::TENSOR_QUANT8_ASYMM:
- execution->sink<::neurun::exec::Sink<uint8_t>>(index, reinterpret_cast<uint8_t *>(buffer),
- length);
- break;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-//
-// NNAPI Implementation
-//
-int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
- ANeuralNetworksExecution **execution)
-{
- if ((compilation == nullptr) || (execution == nullptr))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- // Can handle compiled state only
- if (compilation->plan().state() != neurun::compiler::State::COMPILED)
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- std::shared_ptr<const neurun::compiler::Plan> plan;
-
- compilation->publish(plan);
-
- *execution = new (std::nothrow) ANeuralNetworksExecution{plan};
- if (*execution == nullptr)
- {
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index,
- const ANeuralNetworksOperandType * /* type */,
- const void *buffer, size_t length)
-{
- // Don't check type
- // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
- // If the input or output is optional and omitted then it need not have a fully specified tensor
- // operand type
- if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- // TODO Handle optional input
- if (buffer == nullptr)
- {
- throw std::runtime_error("Not supported optional input, yet");
- }
-
- const auto &operands = execution->plan().model().operands();
-
- // TODO Check type conflicts
-
- neurun::model::operand::IO::Index input_index{index};
-
- const auto operand_index = execution->plan().model().getInputs().at(input_index);
- const auto data_type = operands.at(operand_index).typeInfo().type();
- const auto operand_shape = operands.at(operand_index).shape();
-
- source(execution, data_type, index, buffer, length);
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index,
- const ANeuralNetworksOperandType * /* type */, void *buffer,
- size_t length)
-{
- // Don't check type
- // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
- // If the input or output is optional and omitted then it need not have a fully specified tensor
- // operand type
- if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- // Handle optional output
- if (buffer == nullptr)
- {
- return ANEURALNETWORKS_NO_ERROR;
- }
-
- const auto &operands = execution->plan().model().operands();
-
- // TODO Check type conflicts
-
- neurun::model::operand::IO::Index output_index{index};
-
- const auto operand_index = execution->plan().model().getOutputs().at(output_index);
- const auto data_type = operands.at(operand_index).typeInfo().type();
- const auto operand_shape = operands.at(operand_index).shape();
-
- sink(execution, data_type, index, buffer, length);
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
- ANeuralNetworksEvent **event)
-{
- if ((execution == nullptr) || (event == nullptr))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- // TODO: Handle event
- *event = new (std::nothrow) ANeuralNetworksEvent{};
- if (*event == nullptr)
- {
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- const auto &plan = execution->plan();
- const auto &model = plan.model();
-
- // Set input(s)
- for (uint32_t n = 0; n < model.getInputs().size(); ++n)
- {
- auto setter = [&](::neurun::backend::operand::ITensor &tensor) {
- execution->source(n).push(tensor);
- };
-
- neurun::model::operand::IO::Index input_index{n};
-
- ::neurun::model::operand::Index index{model.getInputs().at(input_index)};
- auto object = plan.operands().at(index);
-
- object->access(setter);
- }
-
- const auto &operations = execution->plan().operations();
-
- for (uint32_t n = 0; n < operations.size(); ++n)
- {
- operations.at(n).run();
- }
-
- // Get output(s)
- for (uint32_t n = 0; n < model.getOutputs().size(); ++n)
- {
- auto getter = [&](::neurun::backend::operand::ITensor &tensor) {
- execution->sink(n).pull(tensor);
- };
-
- neurun::model::operand::IO::Index output_index{n};
-
- ::neurun::model::operand::Index index{model.getOutputs().at(output_index)};
- auto object = plan.operands().at(index);
-
- object->access(getter);
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksExecution_free(ANeuralNetworksExecution * /* execution */) {}
-
-int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution,
- int32_t /* index */,
- const ANeuralNetworksOperandType * /* type */,
- const ANeuralNetworksMemory *memory,
- size_t /* offset */, size_t /* length */)
-{
- if ((execution == nullptr) || (memory == nullptr))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- // NYI
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution,
- int32_t /* index */,
- const ANeuralNetworksOperandType * /* type */,
- const ANeuralNetworksMemory *memory,
- size_t /* offset */, size_t /* length */)
-{
- if ((execution == nullptr) || (memory == nullptr))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- // NYI
- return ANEURALNETWORKS_NO_ERROR;
-}