summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/frontend/model.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/frontend/model.cc')
-rw-r--r--runtimes/neurun/src/frontend/model.cc480
1 files changed, 0 insertions, 480 deletions
diff --git a/runtimes/neurun/src/frontend/model.cc b/runtimes/neurun/src/frontend/model.cc
deleted file mode 100644
index 3aa2aa2ff..000000000
--- a/runtimes/neurun/src/frontend/model.cc
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-#include <NeuralNetworksEx.h>
-
-#include <cassert>
-#include <stdexcept>
-#include <new>
-
-#include "cpp14/memory.h"
-
-#include "graph/Graph.h"
-#include "frontend/wrapper/model.h"
-#include "frontend/wrapper/memory.h"
-#include "model/operation/Node.Include.h"
-
-int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
-{
- if (model == nullptr)
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- *model = new (std::nothrow) ANeuralNetworksModel{};
- if (*model == nullptr)
- {
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; }
-
-int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
- const ANeuralNetworksOperandType *type)
-{
- if ((model == nullptr) || (type == nullptr))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- // scale and zeroPoint should be zero for scalars and non-fixed point tensors
- // Quantized:
- // scale: a 32 bit floating point value greater than zero
- // zeroPoint: a 32 bit integer, in range [0, 255]
- if (type->type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM)
- {
- if (!(type->scale > 0.0f))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if ((type->zeroPoint < 0) || (type->zeroPoint > 255))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- else if ((type->scale != 0.0f) || (type->zeroPoint != 0))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- // dimensionCount should be zero for scalars
- if ((type->dimensionCount != 0) &&
- ((type->type == ANEURALNETWORKS_FLOAT32) || (type->type == ANEURALNETWORKS_INT32) ||
- (type->type == ANEURALNETWORKS_UINT32)))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- ::neurun::model::operand::Shape shape(type->dimensionCount);
- ::neurun::model::operand::TypeInfo typeInfo((OperandCode)(type->type), type->scale,
- type->zeroPoint);
-
- for (uint32_t axis = 0; axis < type->dimensionCount; ++axis)
- {
- shape.dim(axis) = type->dimensions[axis];
- }
-
- model->deref().addOperand(shape, typeInfo);
-
- // NOTE We do NOT allocate CLTensor here as we do not how to interpret this one.
- // TensorFlow Lite may interpret a rank-4 tensor either as a feature map (with batch) or
- // a convolution kernel.
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
- const void *buffer, size_t length)
-{
- const bool isOptional = ((buffer == nullptr) && (length == 0));
-
- if ((model == nullptr) || ((buffer == nullptr) && (length != 0)))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- // Negative index value is not allowed
- if (index < 0)
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
- const neurun::model::operand::Index ind{static_cast<uint32_t>(index)};
-
- if (!model->deref().operands().exist(ind))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- auto &obj = model->deref().operands().at(ind);
- if ((obj.operandSize() != length) && !isOptional)
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
- if (!obj.setAsConstant())
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- using ::neurun::model::operand::CachedData;
- using ::neurun::model::operand::ExternalData;
-
- // Remain operands.at(ind).data()->base() as nullptr for optional operand
- // This will be filled when model finished
- if (isOptional)
- {
- model->setOptionalOperand(ind);
- }
-
- // NNAPI spec in NeuralNetworks.h
- // For values of length greater than ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES,
- // the application is responsible for not changing the content of this region
- // until all executions using this model have completed
- if (length <= ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES)
- {
- model->deref().setOperandValue(ind, nnfw::cpp14::make_unique<CachedData>(
- reinterpret_cast<const uint8_t *>(buffer), length));
- }
- else
- {
- model->deref().setOperandValue(ind, nnfw::cpp14::make_unique<ExternalData>(
- reinterpret_cast<const uint8_t *>(buffer), length));
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index,
- const ANeuralNetworksMemory *memory,
- size_t offset, size_t length)
-{
- if ((model == nullptr) || (memory == nullptr))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- // Negative index value is not allowed
- if (index < 0)
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
- const neurun::model::operand::Index ind{static_cast<uint32_t>(index)};
-
- if (!model->deref().operands().exist(ind))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- auto &obj = model->deref().operands().at(ind);
- if ((obj.operandSize() != length) || (memory->size() < (offset + length)))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
- if (!obj.setAsConstant())
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- using ::neurun::model::operand::ExternalData;
-
- model->deref().setOperandValue(
- ind, nnfw::cpp14::make_unique<ExternalData>(
- reinterpret_cast<const uint8_t *>(memory->base() + offset), length));
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
- ANeuralNetworksOperationType type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- const ANeuralNetworksOperationType FIRST_OPERATION = ANEURALNETWORKS_ADD;
- const ANeuralNetworksOperationType LAST_OPERATION = ANEURALNETWORKS_TRANSPOSE;
- if ((type < FIRST_OPERATION) || (type > LAST_OPERATION))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (model->isFinished())
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- for (uint32_t i = 0; i < outputCount; i++)
- {
- const ::neurun::model::operand::Index ind{outputs[i]};
- auto &obj = model->deref().operands().at(ind);
-
- if (!obj.setAsOperationOutput())
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- auto &graph = model->deref();
-
- auto node_param =
- neurun::model::operation::Node::InitParam{inputCount, inputs, outputCount, outputs};
-
- try
- {
- switch (type)
- {
- case ANEURALNETWORKS_CONV_2D:
- {
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(inputCount == 7 || inputCount == 10);
- assert(outputCount == 1);
-
- if (inputCount == 7)
- {
- using GraphNode = neurun::model::operation::Conv2DNode;
-
- graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
- }
- else
- {
- throw std::runtime_error{"Explicit padding in Conv2D is not supported, yet"};
- }
-
- break;
- }
- case ANEURALNETWORKS_MAX_POOL_2D:
- {
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(inputCount == 7 || inputCount == 10);
- assert(outputCount == 1);
-
- if (inputCount == 7)
- {
- using GraphNode = neurun::model::operation::MaxPool2DNode;
-
- graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
- }
- else
- {
- throw std::runtime_error{"Explicit padding in MaxPool2D is not supported, yet"};
- }
-
- break;
- }
- case ANEURALNETWORKS_AVERAGE_POOL_2D:
- {
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(inputCount == 7 || inputCount == 10);
- assert(outputCount == 1);
-
- if (inputCount == 7)
- {
- using GraphNode = neurun::model::operation::AvgPool2DNode;
-
- graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
- }
- else
- {
- throw std::runtime_error{"Explicit padding in AvgPool2D is not supported, yet"};
- }
-
- break;
- }
- case ANEURALNETWORKS_CONCATENATION:
- {
- using GraphNode = neurun::model::operation::ConcatNode;
-
- graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
-
- break;
- }
- case ANEURALNETWORKS_RESHAPE:
- {
- using GraphNode = neurun::model::operation::ReshapeNode;
-
- graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
-
- break;
- }
- case ANEURALNETWORKS_FULLY_CONNECTED:
- {
- using GraphNode = neurun::model::operation::FullyConnectedNode;
-
- graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
-
- break;
- }
- case ANEURALNETWORKS_SOFTMAX:
- {
- using GraphNode = neurun::model::operation::SoftmaxNode;
-
- graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
-
- break;
- }
- default:
- throw std::runtime_error{"Not supported operation"};
- };
- }
- catch (const std::exception &e)
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
- ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- const ANeuralNetworksOperationTypeEx FIRST_OPERATION = ANEURALNETWORKS_GATHER_EX;
- const ANeuralNetworksOperationTypeEx LAST_OPERATION = ANEURALNETWORKS_PRELU_EX;
- if ((type < FIRST_OPERATION) || (type > LAST_OPERATION))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- for (uint32_t i = 0; i < outputCount; i++)
- {
- const ::neurun::model::operand::Index ind{outputs[i]};
- auto &obj = model->deref().operands().at(ind);
-
- if (!obj.setAsOperationOutput())
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- // Workaround: to avoid compile error by unused-parameter, use inputCount
- if (inputCount == 0)
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- try
- {
- switch (type)
- {
- default:
- throw std::runtime_error{"Not supported operation"};
- }
- }
- catch (const std::exception &e)
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- // NOTE ::neurun::model::operand::Index uses int as its underlying type as various NNAPI
- // functions such as ANeuralNetworksModel_setOperandValue use int to represent operand index
- //
- // ANeuralNetworksModel_identifyInputsAndOutputs, however, uses uint32_t to represent operand
- // index.
- //
- // Below, static_cast<int>(...) is introduced to eliminate compiler warning.
- for (uint32_t n = 0; n < inputCount; ++n)
- {
- const neurun::model::operand::Index ind{static_cast<uint32_t>(inputs[n])};
- model->deref().addInput(ind);
-
- auto &obj = model->deref().operands().at(ind);
- if (!obj.setAsModelInput())
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- for (uint32_t n = 0; n < outputCount; ++n)
- {
- const neurun::model::operand::Index ind{static_cast<uint32_t>(outputs[n])};
- model->deref().addOutput(ind);
-
- auto &obj = model->deref().operands().at(ind);
- // Model output cannot become model input
- if (obj.isModelInput())
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_finish(ANeuralNetworksModel *model)
-{
- if (model == nullptr)
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- return model->finish();
-}