diff options
author | Chunseok Lee <chunseok.lee@samsung.com> | 2020-04-23 14:45:49 +0900 |
---|---|---|
committer | Chunseok Lee <chunseok.lee@samsung.com> | 2020-04-23 14:45:49 +0900 |
commit | e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e (patch) | |
tree | 44a1a7951d168dd4370e13593ed03f4bc6d920c5 /runtime/onert/frontend/nnapi | |
parent | 302e6564a7a76109e1178207e44e45a58631c477 (diff) | |
download | nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.gz nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.bz2 nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.zip |
Imported Upstream version 1.4.0upstream/1.4.0submit/tizen/20200423.054851
Diffstat (limited to 'runtime/onert/frontend/nnapi')
21 files changed, 4224 insertions, 0 deletions
diff --git a/runtime/onert/frontend/nnapi/ANeuralNetworksModel.test.cc b/runtime/onert/frontend/nnapi/ANeuralNetworksModel.test.cc new file mode 100644 index 000000000..15a279a7e --- /dev/null +++ b/runtime/onert/frontend/nnapi/ANeuralNetworksModel.test.cc @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <gtest/gtest.h> + +#include "wrapper/ANeuralNetworksModel.h" + +TEST(MODEL, model_build) +{ + ANeuralNetworksModel model; + ASSERT_EQ(model.isFinished(), false); +} diff --git a/runtime/onert/frontend/nnapi/CMakeLists.txt b/runtime/onert/frontend/nnapi/CMakeLists.txt new file mode 100644 index 000000000..b66b32e89 --- /dev/null +++ b/runtime/onert/frontend/nnapi/CMakeLists.txt @@ -0,0 +1,27 @@ +file(GLOB_RECURSE SOURCES_FRONTEND "*.cc") +file(GLOB_RECURSE TESTS_FRONTEND "*.test.cc") +list(REMOVE_ITEM SOURCES_FRONTEND ${TESTS_FRONTEND}) + +set(LIB_ONERT onert) + +add_library(${LIB_ONERT} SHARED ${SOURCES_FRONTEND}) +target_link_libraries(${LIB_ONERT} PUBLIC nnfw-nnapi-header) +target_link_libraries(${LIB_ONERT} PUBLIC onert_core) # TODO Link PRIVATE onert_core +target_link_libraries(${LIB_ONERT} PRIVATE nnfw_common) +target_link_libraries(${LIB_ONERT} PRIVATE nnfw_coverage) + +set_target_properties(${LIB_ONERT} PROPERTIES OUTPUT_NAME neuralnetworks) + +install(TARGETS ${LIB_ONERT} DESTINATION lib) + +if(NOT ENABLE_TEST) + return() +endif(NOT ENABLE_TEST) + +add_executable(test_onert_frontend_nnapi ${TESTS_FRONTEND}) + +target_link_libraries(test_onert_frontend_nnapi PRIVATE ${LIB_ONERT} dl) +target_link_libraries(test_onert_frontend_nnapi PRIVATE gtest) +target_link_libraries(test_onert_frontend_nnapi PRIVATE gtest_main) + +install(TARGETS test_onert_frontend_nnapi DESTINATION unittest) diff --git a/runtime/onert/frontend/nnapi/compilation.cc b/runtime/onert/frontend/nnapi/compilation.cc new file mode 100644 index 000000000..0823cb456 --- /dev/null +++ b/runtime/onert/frontend/nnapi/compilation.cc @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> + +#include <new> + +#include "wrapper/ANeuralNetworksModel.h" +#include "wrapper/ANeuralNetworksCompilation.h" +#include "util/logging.h" + +// +// NNAPI Implementation +// +int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model, + ANeuralNetworksCompilation **compilation) +{ + if ((model == nullptr) || (compilation == nullptr)) + { + VERBOSE(NNAPI::Compilation) << "create: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (!model->isFinished()) + { + VERBOSE(NNAPI::Compilation) << "create: Model define is not finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + std::shared_ptr<onert::ir::Graph> internal; + + model->release(internal); + + *compilation = new (std::nothrow) ANeuralNetworksCompilation(internal); + if (*compilation == nullptr) + { + VERBOSE(NNAPI::Compilation) << "create: ail to create compilation object" << std::endl; + return ANEURALNETWORKS_OUT_OF_MEMORY; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation) +{ + if (compilation == nullptr) + { + VERBOSE(NNAPI::Compilation) << "finish: Incorrect null pointer parameter" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (compilation->state() != ::onert::compiler::State::CREATED) + { + VERBOSE(NNAPI::Compilation) << "finish: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + if (!compilation->finish()) + { + VERBOSE(NNAPI::Compilation) << "finish: Fail to compile" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation *compilation) +{ + delete compilation; +} + +int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation, + int32_t preference) +{ + if (compilation == nullptr) + { + VERBOSE(NNAPI::Compilation) << "setPreference: Incorrect null pointer parameter" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (compilation->state() != ::onert::compiler::State::CREATED) + { + VERBOSE(NNAPI::Compilation) << "setPreference: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + const PreferenceCode FIRST_PREFERENCE_CODE = ANEURALNETWORKS_PREFER_LOW_POWER; + const PreferenceCode LAST_PREFERENCE_CODE = ANEURALNETWORKS_PREFER_SUSTAINED_SPEED; + if ((preference < FIRST_PREFERENCE_CODE) || (preference > LAST_PREFERENCE_CODE)) + { + VERBOSE(NNAPI::Compilation) << "setPreference: Incorrect preference code" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + // NYI: nothing to set + return ANEURALNETWORKS_NO_ERROR; +} diff --git a/runtime/onert/frontend/nnapi/event.cc b/runtime/onert/frontend/nnapi/event.cc new file mode 100644 index 000000000..593b74e90 --- /dev/null +++ b/runtime/onert/frontend/nnapi/event.cc @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> + +#include "wrapper/ANeuralNetworksEvent.h" + +int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event) +{ + if (event == nullptr) + { + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (!event->waitFinish()) + { + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event) { delete event; } diff --git a/runtime/onert/frontend/nnapi/execution.cc b/runtime/onert/frontend/nnapi/execution.cc new file mode 100644 index 000000000..6aaca1b4c --- /dev/null +++ b/runtime/onert/frontend/nnapi/execution.cc @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> + +#include <new> + +#include "wrapper/ANeuralNetworksCompilation.h" +#include "wrapper/ANeuralNetworksExecution.h" +#include "wrapper/ANeuralNetworksMemory.h" +#include "wrapper/ANeuralNetworksEvent.h" +#include "wrapper/NNAPIConvert.h" +#include "util/logging.h" + +// +// NNAPI Implementation +// +int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation, + ANeuralNetworksExecution **execution) +{ + if ((compilation == nullptr) || (execution == nullptr)) + { + VERBOSE(NNAPI::Execution) << "create: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + std::shared_ptr<onert::exec::IExecutor> executor; + + compilation->publish(executor); + + if (executor == nullptr) + { + VERBOSE(NNAPI::Execution) << "create: Never compiled yet" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + *execution = new (std::nothrow) ANeuralNetworksExecution{executor}; + if (*execution == nullptr) + { + VERBOSE(NNAPI::Execution) << "create: Fail to create execution object" << std::endl; + return ANEURALNETWORKS_OUT_OF_MEMORY; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +// NOTE Handle optional input +// Unspecified shape on model build +// Optional and omitted input on execution: skip input setting (workaround for LSTM) +// Optional but not omitted input on execution: cannot handle +// Normal input on execution: cannot handle +// Fully specified shape on model build +// Optional input on execution: cannot handle +// Normal input: handle normally +int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, const void *buffer, + size_t length) +{ + // Don't check type + // Comment about ANeuralNetworksOperandType in NeuralNetworks.h: + // If the input or output is optional and omitted then it need not have a fully specified tensor + // operand type + if ((execution == nullptr) || ((buffer == nullptr) && (length != 0))) + { + VERBOSE(NNAPI::Execution) << "setInput: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if ((buffer != nullptr) && (length == 0)) + { + VERBOSE(NNAPI::Execution) << "setInput: Zero length input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + const auto operand_index = execution->getInputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "setInput: Invalid input index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + // Omitted optional input + // LSTM operation's some inputs can be optional input + if ((buffer == nullptr) && (length == 0)) + { + if (execution->haveUnspecifiedDims(operand_index)) + { + return ANEURALNETWORKS_NO_ERROR; + } + else + { + VERBOSE(NNAPI::Execution) << "setInput: Cannot handle fully-specified shape on model build " + "but omitted input on execution" + << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (type != nullptr) + { + if (!execution->compareDataType(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInput: Data type mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->compareShape(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInput: Shape mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (NNAPIConvert::calculateSizeFromType(type) != length) + { + VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + else + { + if (execution->haveUnspecifiedDims(operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInput: Unspecified dimension value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (execution->getOperandSize(operand_index) != length) + { + VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!execution->setInput(index, type, buffer, length)) + { + VERBOSE(NNAPI::Execution) << "setInput: Fail to set input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, void *buffer, + size_t length) +{ + // Don't check type + // Comment about ANeuralNetworksOperandType in NeuralNetworks.h: + // If the input or output is optional and omitted then it need not have a fully specified tensor + // operand type + if ((execution == nullptr) || ((buffer == nullptr) && (length != 0))) + { + VERBOSE(NNAPI::Execution) << "setOutput: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if ((buffer != nullptr) && (length == 0)) + { + VERBOSE(NNAPI::Execution) << "setOutput: Zero length output" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + // Handle optional output + if (buffer == nullptr) + { + return ANEURALNETWORKS_NO_ERROR; + } + + const auto operand_index = execution->getOutputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "setOutput: Invalid output index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (type != nullptr) + { + if (!execution->compareDataType(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutput: Data type mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->compareShape(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutput: Shape mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (NNAPIConvert::calculateSizeFromType(type) != length) + { + VERBOSE(NNAPI::Execution) << "setOutput: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + else + { + if (execution->haveUnspecifiedDims(operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutput: Unspecified dimension value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (execution->getOperandSize(operand_index) != length) + { + VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!execution->setOutput(index, type, buffer, length)) + { + VERBOSE(NNAPI::Execution) << "setOutput: Fail to set output" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution, + ANeuralNetworksEvent **event) +{ + if ((execution == nullptr) || (event == nullptr)) + { + VERBOSE(NNAPI::Execution) << "startCompute: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + // TODO: Handle event + auto instance = execution->instance(); + *event = new (std::nothrow) ANeuralNetworksEvent{instance}; + if (*event == nullptr) + { + VERBOSE(NNAPI::Execution) << "startCompute: Fail to create event" << std::endl; + return ANEURALNETWORKS_OUT_OF_MEMORY; + } + + if (!execution->startExecute()) + { + VERBOSE(NNAPI::Execution) << "startCompute: Fail to start execution" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_compute(ANeuralNetworksExecution *execution) +{ + if (execution == nullptr) + { + VERBOSE(NNAPI::Execution) << "Compute: Incorrect null pointer parameter" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (!execution->execute()) + { + VERBOSE(NNAPI::Execution) << "Compute: Fail to execution" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +void ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution) { delete execution; } + +int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, + size_t length) +{ + if ((execution == nullptr) || (memory == nullptr)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (length == 0) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Zero length input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + const auto operand_index = execution->getInputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid input index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (type != nullptr) + { + if (!execution->compareDataType(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Data type mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->compareShape(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Shape mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (NNAPIConvert::calculateSizeFromType(type) != length) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + else + { + if (execution->haveUnspecifiedDims(operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Unspecified dimension value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (execution->getOperandSize(operand_index) != length) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!memory->vaildAccess(offset, length)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid memory access" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->setInput(index, type, reinterpret_cast<const void *>(memory->base() + offset), + length)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Fail to set input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, + size_t length) +{ + if ((execution == nullptr) || (memory == nullptr)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (length == 0) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Zero length input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + const auto operand_index = execution->getOutputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid output index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (type != nullptr) + { + if (!execution->compareDataType(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Data type mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->compareShape(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Shape mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (NNAPIConvert::calculateSizeFromType(type) != length) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + else + { + if (execution->haveUnspecifiedDims(operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Unspecified dimension value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (execution->getOperandSize(operand_index) != length) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!memory->vaildAccess(offset, length)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid memory access" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->setOutput(index, type, reinterpret_cast<void *>(memory->base() + offset), length)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Fail to set input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution *execution, + int32_t index, uint32_t *rank) +{ + if ((execution == nullptr) || (rank == nullptr)) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + const auto operand_index = execution->getOutputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Invalid output index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->getOutputOperandRank(index, rank)) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Fail to get rank" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution *execution, + int32_t index, uint32_t *dimensions) +{ + if ((execution == nullptr) || (dimensions == nullptr)) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + const auto operand_index = execution->getOutputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Invalid output index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->getOutputOperandDimensions(index, dimensions)) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Fail to get rank" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} diff --git a/runtime/onert/frontend/nnapi/memory.cc b/runtime/onert/frontend/nnapi/memory.cc new file mode 100644 index 000000000..6e568a926 --- /dev/null +++ b/runtime/onert/frontend/nnapi/memory.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> +#include <sys/mman.h> +#include <new> +#include <memory> + +#include <memory> +#include "wrapper/ANeuralNetworksMemory.h" + +int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, + ANeuralNetworksMemory **memory) +{ + if (memory == nullptr) + { + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + *memory = new (std::nothrow) ANeuralNetworksMemory{size, protect, fd, offset}; + if (*memory == nullptr) + { + return ANEURALNETWORKS_OUT_OF_MEMORY; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) { delete memory; } diff --git a/runtime/onert/frontend/nnapi/model.cc b/runtime/onert/frontend/nnapi/model.cc new file mode 100644 index 000000000..e201a6753 --- /dev/null +++ b/runtime/onert/frontend/nnapi/model.cc @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> +#include <NeuralNetworksEx.h> + +#include <new> + +#include "wrapper/ANeuralNetworksModel.h" +#include "wrapper/ANeuralNetworksMemory.h" +#include "util/logging.h" + +int ANeuralNetworksModel_create(ANeuralNetworksModel **model) +{ + if (model == nullptr) + { + VERBOSE(NNAPI::Model) << "create: Incorrect null pointer parameter" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + *model = new (std::nothrow) ANeuralNetworksModel{}; + if (*model == nullptr) + { + VERBOSE(NNAPI::Model) << "create: Fail to create model object" << std::endl; + return ANEURALNETWORKS_OUT_OF_MEMORY; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; } + +int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model, + const ANeuralNetworksOperandType *type) +{ + if ((model == nullptr) || (type == nullptr)) + { + VERBOSE(NNAPI::Model) << "addOperand: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "addOperand: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + // scale and zeroPoint should be zero for scalars and non-fixed point tensors + // Quantized: + // scale: a 32 bit floating point value greater than zero + // zeroPoint: a 32 bit integer, in range [0, 255] + if (type->type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) + { + if (!(type->scale > 0.0f)) + { + VERBOSE(NNAPI::Model) << "addOperand: Incorrect scale value for quantization" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if ((type->zeroPoint < 0) || (type->zeroPoint > 255)) + { + VERBOSE(NNAPI::Model) << "addOperand: Incorrect zeroPoint value for quantization" + << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + // NOTE Validation of scale and zeroPoint would be skipped for a while. + // We do not know whether scalar type can have scale and zeroPoint. + // To pass ValidationTest and GeneratedTest, this validation code + // would not be implemented until we can define this issue clearly. + // + // scale and zeroPoint should be zero for scalars and non-fixed point tensors + // else if ((type->scale != 0.0f) || (type->zeroPoint != 0)) + // { + // return ANEURALNETWORKS_BAD_DATA; + // } + + // dimensionCount should be zero for scalars + if ((type->dimensionCount != 0) && + ((type->type == ANEURALNETWORKS_FLOAT32) || (type->type == ANEURALNETWORKS_INT32) || + (type->type == ANEURALNETWORKS_UINT32))) + { + VERBOSE(NNAPI::Model) << "addOperand: Incorrect data type" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!model->addOperand(type)) + { + VERBOSE(NNAPI::Model) << "addOperand: Fail to add operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index, + const void *buffer, size_t length) +{ + const bool optional_operand = ((buffer == nullptr) && (length == 0)); + + if ((model == nullptr) || ((buffer == nullptr) && (length != 0))) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + // Negative index value is not allowed + if (index < 0) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (negative)" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI + // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand + // index + // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index. + // + // Below, static_cast<uint32_t>(...) is introduced to eliminate compiler warning. + uint32_t ind = static_cast<uint32_t>(index); + + if (!model->isExistOperand(ind)) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (not exist)" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!optional_operand && (model->operandSize(ind) != length)) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Invalid data length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (model->isUsageSet(ind)) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Already set operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + // NNAPI spec in NeuralNetworks.h + // For values of length greater than ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES, + // the application is responsible for not changing the content of this region + // until all executions using this model have completed + bool copy_value = false; + if (length <= ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES) + { + copy_value = true; + } + + if (!model->setOperandValue(ind, buffer, length, optional_operand, copy_value)) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Fail to set operand value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index, + const ANeuralNetworksMemory *memory, + size_t offset, size_t length) +{ + if ((model == nullptr) || (memory == nullptr)) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + // Negative index value is not allowed + if (index < 0) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid index value (negative)" + << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI + // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand + // index + // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index. + // + // Below, static_cast<uint32_t>(...) is introduced to eliminate compiler warning. + uint32_t ind = static_cast<uint32_t>(index); + + if (!model->isExistOperand(ind)) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid index value (not exist)" + << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if ((model->operandSize(ind) != length) || (memory->size() < (offset + length))) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid data length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (model->isUsageSet(ind)) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Already set operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!model->setOperandValue(ind, memory->base() + offset, length)) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Fail to set operand value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model, + ANeuralNetworksOperationType type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) +{ + if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr)) + { + VERBOSE(NNAPI::Model) << "addOperation: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "addOperation: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + const ANeuralNetworksOperationType FIRST_OPERATION = ANEURALNETWORKS_ADD; + const ANeuralNetworksOperationType LAST_OPERATION = ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR; + if ((type < FIRST_OPERATION) || (type > LAST_OPERATION)) + { + return ANEURALNETWORKS_BAD_DATA; + } + + for (uint32_t i = 0; i < outputCount; i++) + { + if (model->isUsageSet(outputs[i])) + { + VERBOSE(NNAPI::Model) << "addOperation: Already set output operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!model->addOperation(type, inputCount, inputs, outputCount, outputs)) + { + VERBOSE(NNAPI::Model) << "addOperation: Fail to add operation" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model, + ANeuralNetworksOperationTypeEx type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) +{ + if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr)) + { + VERBOSE(NNAPI::Model) << "addOperation: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "addOperation: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + const ANeuralNetworksOperationTypeEx FIRST_OPERATION = ANEURALNETWORKS_CAST_EX; + const ANeuralNetworksOperationTypeEx LAST_OPERATION = ANEURALNETWORKS_SHAPE_EX; + if ((type < FIRST_OPERATION) || (type > LAST_OPERATION)) + { + VERBOSE(NNAPI::Model) << "addOperation: Invalid operation type" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + for (uint32_t i = 0; i < outputCount; i++) + { + if (model->isUsageSet(outputs[i])) + { + VERBOSE(NNAPI::Model) << "addOperation: Already set output operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!model->addOperationEx(type, inputCount, inputs, outputCount, outputs)) + { + VERBOSE(NNAPI::Model) << "addOperation: Fail to add operation" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) +{ + if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr)) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + for (uint32_t n = 0; n < inputCount; ++n) + { + uint32_t ind = inputs[n]; + if (model->isUsageSet(ind)) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Already set input operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!model->addModelInput(ind)) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Fail to add input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + for (uint32_t n = 0; n < outputCount; ++n) + { + uint32_t ind = outputs[n]; + + if (!model->isOperationOutput(ind)) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Need to set output operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!model->addModelOutput(ind)) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Fail to add output" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_finish(ANeuralNetworksModel *model) +{ + if (model == nullptr) + { + VERBOSE(NNAPI::Model) << "finish: Incorrect null pointer parameter" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "finish: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + if (!model->finish()) + { + VERBOSE(NNAPI::Model) << "finish: Fail to generate internal graph" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel *model, bool) +{ + if (model == nullptr) + { + VERBOSE(NNAPI::Model) << "relaxComputationFloat32toFloat16: Incorrect null pointer parameter" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + // NYI: nothing to set + VERBOSE(NNAPI::Model) << "relaxComputationFloat32toFloat16: Do nothing yet" << std::endl; + + return ANEURALNETWORKS_NO_ERROR; +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc new file mode 100644 index 000000000..03518a88a --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ANeuralNetworksCompilation.h" + +#include "util/logging.h" + +ANeuralNetworksCompilation::ANeuralNetworksCompilation( + const std::shared_ptr<onert::ir::Graph> &model) noexcept + : _compiler{new onert::compiler::Compiler{model}} +{ + // DO NOTHING +} + +bool ANeuralNetworksCompilation::finish() noexcept +{ + try + { + _compiler->compile(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h new file mode 100644 index 000000000..8d72441b2 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __COMPILATION_H__ +#define __COMPILATION_H__ + +#include "compiler/Compiler.h" +#include "ir/Graph.h" +#include "exec/IExecutor.h" + +struct ANeuralNetworksCompilation +{ +public: + ANeuralNetworksCompilation(const std::shared_ptr<onert::ir::Graph> &graph) noexcept; + +public: + bool finish() noexcept; + + onert::compiler::State state(void) noexcept { return _compiler->state(); } + void publish(std::shared_ptr<onert::exec::IExecutor> &executor) noexcept + { + _compiler->release(executor); + } + +private: + std::shared_ptr<onert::compiler::Compiler> _compiler; +}; + +#endif diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc new file mode 100644 index 000000000..2bea729be --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ANeuralNetworksEvent.h" + +#include "exec/Execution.h" +#include "util/logging.h" + +ANeuralNetworksEvent::ANeuralNetworksEvent(const std::shared_ptr<onert::exec::Execution> &execution) + : _execution{execution} +{ + // DO NOTHING +} + +bool ANeuralNetworksEvent::waitFinish(void) noexcept +{ + try + { + _execution->waitFinish(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.h new file mode 100644 index 000000000..7b462d3d6 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __EVENT_H__ +#define __EVENT_H__ + +#include <NeuralNetworks.h> + +#include <memory> + +namespace onert +{ +namespace exec +{ +class Execution; +} // namespace exec +} // namespace onert + +struct ANeuralNetworksEvent +{ +public: + ANeuralNetworksEvent(const std::shared_ptr<onert::exec::Execution> &execution); + +public: + bool waitFinish(void) noexcept; + +private: + const std::shared_ptr<onert::exec::Execution> _execution; +}; + +#endif diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc new file mode 100644 index 000000000..15eb088c6 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ANeuralNetworksExecution.h" +#include "NNAPIConvert.h" +#include "util/logging.h" + +const onert::ir::OperandIndex ANeuralNetworksExecution::getInputOperandIndex(int32_t index) noexcept +{ + if (index < 0) + { + // Negative index: return invalid index + return onert::ir::OperandIndex{}; + } + + uint32_t cast_index = static_cast<uint32_t>(index); + if (cast_index >= _execution->graph().getInputs().size()) + { + // Return invalid index + return onert::ir::OperandIndex{}; + } + + onert::ir::IOIndex input_index{cast_index}; + const auto operand_index = _execution->graph().getInputs().at(input_index); + return operand_index; +} + +const onert::ir::OperandIndex +ANeuralNetworksExecution::getOutputOperandIndex(int32_t index) noexcept +{ + if (index < 0) + { + // Negative index: return invalid index + return onert::ir::OperandIndex{}; + } + + uint32_t cast_index = static_cast<uint32_t>(index); + if (cast_index >= _execution->graph().getOutputs().size()) + { + // Return invalid index + return onert::ir::OperandIndex{}; + } + + onert::ir::IOIndex output_index{cast_index}; + const auto operand_index = _execution->graph().getOutputs().at(output_index); + return operand_index; +} + +bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType *type, + const onert::ir::OperandIndex index) noexcept +{ + try + { + const auto operand_type = _execution->graph().operands().at(index).typeInfo(); + const auto typeInfo = NNAPIConvert::getTypeInfo(type); + + if (operand_type != typeInfo) + { + // Data type mismatch + return false; + } + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *type, + const onert::ir::OperandIndex index) noexcept +{ + // Passed shape should be specified + if (haveUnspecifiedDims(index)) + { + return false; + } + + const auto &operand_shape = _execution->graph().operands().at(index).shape(); + const auto &shape_from_type = NNAPIConvert::getShape(type); + + return operand_shape == shape_from_type; +} + +bool ANeuralNetworksExecution::haveUnspecifiedDims(const onert::ir::OperandIndex index) noexcept +{ + const auto operand_shape = _execution->graph().operands().at(index).shape(); + + return operand_shape.num_elements() == 0; +} + +size_t ANeuralNetworksExecution::getOperandSize(const onert::ir::OperandIndex index) noexcept +{ + try + { + return _execution->graph().operands().at(index).operandSize(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return 0; + } +} + +bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOperandType *type, + const void *buffer, size_t length) noexcept +{ + try + { + onert::ir::IOIndex input_index{index}; + const auto operand_index = getInputOperandIndex(index); + + const auto type_info = _execution->graph().operands().at(operand_index).typeInfo(); + const auto shape = (type != nullptr) ? NNAPIConvert::getShape(type) + : _execution->graph().operands().at(operand_index).shape(); + + // NOTE The nnapi does not provide setting io_layout and not support changing layout. In other + // words, we can assume that io_layout from nnapi always is the same as layout of the used + // model. + // TODO Set layout of model + _execution->setInput(input_index, type_info, shape, buffer, length, onert::ir::Layout::NHWC); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOperandType *type, + void *buffer, size_t length) noexcept +{ + try + { + onert::ir::IOIndex output_index{index}; + const auto operand_index = getOutputOperandIndex(index); + + const auto type_info = _execution->graph().operands().at(operand_index).typeInfo(); + const auto shape = (type != nullptr) ? NNAPIConvert::getShape(type) + : _execution->graph().operands().at(operand_index).shape(); + + // NOTE The nnapi does not provide setting io_layout and not support changing layout. In other + // words, we can assume that io_layout from nnapi always is the same as layout of the used + // model. + // TODO Set layout of model + _execution->setOutput(output_index, type_info, shape, buffer, length, onert::ir::Layout::NHWC); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksExecution::startExecute(void) noexcept +{ + try + { + _execution->startExecute(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksExecution::execute(void) noexcept +{ + try + { + _execution->execute(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +const std::shared_ptr<onert::exec::Execution> ANeuralNetworksExecution::instance(void) noexcept +{ + return _execution; +} + +bool ANeuralNetworksExecution::getOutputOperandRank(uint32_t index, uint32_t *rank) noexcept +{ + try + { + onert::ir::IOIndex output_index{index}; + const auto operand_index = getOutputOperandIndex(index); + bool unspecified = haveUnspecifiedDims(operand_index); + + // TODO Get unspecified output operand's rank + if (unspecified) + { + throw std::runtime_error{"Unsupport feature"}; + } + + // Check execution is finished + // Output rank and shape may be decided after execution if output is unspecified operand + if (!_execution->isFinished()) + { + return false; + } + + *rank = _execution->graph().operands().at(operand_index).shape().rank(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksExecution::getOutputOperandDimensions(uint32_t index, uint32_t *dimensions) +{ + try + { + onert::ir::IOIndex output_index{index}; + const auto operand_index = getOutputOperandIndex(index); + bool unspecified = haveUnspecifiedDims(operand_index); + if (unspecified) + { + throw std::runtime_error{"NYI: Models with unspecified output dimensions"}; + } + + // Check execution is finished + // Output rank and shape may be decided after execution if output is unspecified operand + if (!_execution->isFinished()) + { + return false; + } + + auto shape = _execution->graph().operands().at(operand_index).shape(); + for (int i = 0; i < shape.rank(); i++) + { + auto dim = shape.dim(i); + + if (dim <= 0) + { + throw std::runtime_error{"Invalid dimension value"}; + } + + dimensions[i] = static_cast<uint32_t>(dim); + } + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h new file mode 100644 index 000000000..af2465a81 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __EXECUTION_H__ +#define __EXECUTION_H__ + +#include <NeuralNetworks.h> + +#include <memory> + +#include "exec/Execution.h" + +struct ANeuralNetworksExecution +{ +public: + ANeuralNetworksExecution(const std::shared_ptr<onert::exec::IExecutor> &executor) + : _execution{std::make_shared<onert::exec::Execution>(executor)} + { + // DO NOTHING + } + +public: + bool setInput(uint32_t index, const ANeuralNetworksOperandType *type, const void *buffer, + size_t length) noexcept; + bool setOutput(uint32_t index, const ANeuralNetworksOperandType *type, void *buffer, + size_t length) noexcept; + bool startExecute(void) noexcept; + bool execute(void) noexcept; + + const onert::ir::OperandIndex getInputOperandIndex(int32_t index) noexcept; + const onert::ir::OperandIndex getOutputOperandIndex(int32_t index) noexcept; + bool compareDataType(const ANeuralNetworksOperandType *type, + const onert::ir::OperandIndex index) noexcept; + bool compareShape(const ANeuralNetworksOperandType *type, + const onert::ir::OperandIndex index) noexcept; + bool haveUnspecifiedDims(const onert::ir::OperandIndex index) noexcept; + size_t getOperandSize(const onert::ir::OperandIndex index) noexcept; + const std::shared_ptr<onert::exec::Execution> instance(void) noexcept; + + /** + * @brief Get output operand's rank + * @param[in] index Output index + * @param[out] rank Output operand's rank + * @return @c true if success to get rank, otherwise @c false + */ + bool getOutputOperandRank(uint32_t index, uint32_t *rank) noexcept; + /** + * @brief Get dimensions of the output operand + * @param[in] index Output index + * @param[out] dimensions Output operand's dimensions + * @return @c true if success to get rank, otherwise @c false + * @note This must be called after execution is finished to get resolved output shape + * unspecified in model + */ + bool getOutputOperandDimensions(uint32_t index, uint32_t *dimensions); + +private: + std::shared_ptr<onert::exec::Execution> _execution; +}; + +#endif diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc new file mode 100644 index 000000000..9cc100585 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> +#include <sys/mman.h> + +#include "ANeuralNetworksMemory.h" + +// +// ANeuralNetworksMemory +// +ANeuralNetworksMemory::ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset) +{ + _base = reinterpret_cast<uint8_t *>(mmap(nullptr, size, protect, MAP_PRIVATE, fd, offset)); + _size = size; +} + +ANeuralNetworksMemory::~ANeuralNetworksMemory() { munmap(reinterpret_cast<void *>(_base), _size); } + +bool ANeuralNetworksMemory::vaildAccess(size_t offset, size_t length) const +{ + if ((offset >= _size) || (length > _size)) + { + return false; + } + + if ((offset + length) >= _size) + { + return false; + } + + return true; +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.h new file mode 100644 index 000000000..48a1bc5fc --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEMORY_H__ +#define __MEMORY_H__ + +#include <cstdint> + +struct ANeuralNetworksMemory +{ +public: + ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset); + ~ANeuralNetworksMemory(); + +public: + size_t size(void) const { return _size; } + uint8_t *base(void) { return _base; } + uint8_t *base(void) const { return _base; } + bool vaildAccess(size_t offset, size_t length) const; + +private: + size_t _size; + uint8_t *_base; +}; + +#endif // __MEMORY_H__ diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.cc new file mode 100644 index 000000000..d2d699ae1 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.cc @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ANeuralNetworksModel.h" +#include "OperationFactory.h" +#include "NNAPIConvert.h" + +#include "ir/Operations.Include.h" +#include "util/logging.h" + +#include <memory> + +// +// ANeuralNetworksModel +// +ANeuralNetworksModel::ANeuralNetworksModel() noexcept : _optional_operands{}, _operand_usages{} +{ + _graph = std::make_shared<onert::ir::Graph>(); +} + +bool ANeuralNetworksModel::addOperand(const ANeuralNetworksOperandType *type) noexcept +{ + try + { + const auto shape = NNAPIConvert::getShape(type); + const auto typeInfo = NNAPIConvert::getTypeInfo(type); + _graph->addOperand(shape, typeInfo); + _operand_usages.emplace_back(OperandUsage::NOT_DEFINED); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, size_t length, + bool optional, bool copy) noexcept +{ + const onert::ir::OperandIndex ind{index}; + + try + { + _operand_usages[index] = OperandUsage::CONSTANT; + + // Remain operands.at(ind).data()->base() as nullptr for optional operand + // This will be filled when model finished + if (optional) + { + setOptionalOperand(ind); + } + + using onert::ir::CachedData; + using onert::ir::ExternalData; + if (copy) + { + _graph->operands().at(ind).data( + std::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length)); + } + else + { + _graph->operands().at(ind).data( + std::make_unique<ExternalData>(reinterpret_cast<const uint8_t *>(buffer), length)); + } + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksModel::addOperation(ANeuralNetworksOperationType type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) noexcept +{ + try + { + for (uint32_t i = 0; i < outputCount; i++) + { + _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT; + } + + auto &factory = OperationFactory::get(); + OperationFactory::Param param{inputCount, inputs, outputCount, outputs}; + + auto node = factory.create(type, param, _graph->operands()); + _graph->addOperation(std::unique_ptr<onert::ir::Operation>{node}); + + // TODO Move these codes to delegate.cpp + if (type == ANEURALNETWORKS_FULLY_CONNECTED) + { + const auto &input_operand = + _graph->operands().at(node->getInputs().at(onert::ir::operation::FullyConnected::INPUT)); + auto &weights_operand = + _graph->operands().at(node->getInputs().at(onert::ir::operation::FullyConnected::WEIGHT)); + if (input_operand.typeInfo().type() == onert::ir::DataType::FLOAT32 && + weights_operand.typeInfo().type() == onert::ir::DataType::QUANT8_ASYMM) + { + weights_operand.type(onert::ir::DataType::QUANT8_SYMM); + } + } + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksModel::addOperationEx(ANeuralNetworksOperationTypeEx type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) noexcept +{ + try + { + for (uint32_t i = 0; i < outputCount; i++) + { + _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT; + } + + auto &factory = OperationFactory::get(); + OperationFactory::Param param{inputCount, inputs, outputCount, outputs}; + + auto node = factory.create(type, param, _graph->operands()); + _graph->addOperation(std::unique_ptr<onert::ir::Operation>{node}); + } + catch (const std::exception &e) + { + return false; + } + return true; +} + +bool ANeuralNetworksModel::addModelInput(uint32_t index) noexcept +{ + try + { + _operand_usages[index] = OperandUsage::MODEL_INPUT; + + const onert::ir::OperandIndex ind{index}; + _graph->addInput(ind); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} +bool ANeuralNetworksModel::addModelOutput(uint32_t index) noexcept +{ + try + { + const onert::ir::OperandIndex ind{index}; + + // Duplicated output is not allowed + if (_graph->getOutputs().contains(ind)) + { + return false; + } + + _graph->addOutput(ind); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksModel::finish() noexcept +{ + try + { + fillOptionalOperand(); + + _graph->finishBuilding(); + + _operand_usages.clear(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << '\n'; + + return false; + } + + return true; +} + +bool ANeuralNetworksModel::isFinished() noexcept { return !_graph->isBuildingPhase(); } + +bool ANeuralNetworksModel::isExistOperand(uint32_t index) noexcept +{ + return _graph->operands().exist(onert::ir::OperandIndex{index}); +} + +size_t ANeuralNetworksModel::operandSize(uint32_t index) noexcept +{ + try + { + return _graph->operands().at(onert::ir::OperandIndex{index}).operandSize(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << '\n'; + + return 0; + } +} + +bool ANeuralNetworksModel::isUsageSet(uint32_t index) noexcept +{ + return (_operand_usages[index] != OperandUsage::NOT_DEFINED); +} + +bool ANeuralNetworksModel::isOperationOutput(uint32_t index) noexcept +{ + return (_operand_usages[index] == OperandUsage::OPERATION_OUTPUT); +} + +void ANeuralNetworksModel::setOptionalOperand(const onert::ir::OperandIndex idx) +{ + _optional_operands.insert(idx); +} + +void ANeuralNetworksModel::fillOptionalOperand(void) +{ + _graph->operations().iterate([&](const onert::ir::OperationIndex &, onert::ir::Operation &node) { + for (auto input : node.getInputs()) + { + // TODO fill default value for optional operands + if (_optional_operands.find(input) != _optional_operands.end()) + { + throw std::runtime_error{"Optional operand is not supported yet"}; + } + } + }); +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.h new file mode 100644 index 000000000..3ccd941c7 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MODEL_H__ +#define __MODEL_H__ + +#include <unordered_set> +#include <NeuralNetworks.h> +#include <NeuralNetworksEx.h> + +#include "ir/Graph.h" + +struct ANeuralNetworksModel +{ +public: + enum class OperandUsage + { + NOT_DEFINED = 0, + MODEL_INPUT, + CONSTANT, + OPERATION_OUTPUT, + }; + +public: + ANeuralNetworksModel() noexcept; + +public: + bool addOperand(const ANeuralNetworksOperandType *type) noexcept; + bool setOperandValue(uint32_t index, const void *buffer, size_t length, bool optional = false, + bool copy = false) noexcept; + bool addOperation(ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t *inputs, + uint32_t outputCount, const uint32_t *outputs) noexcept; + bool addOperationEx(ANeuralNetworksOperationTypeEx type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) noexcept; + bool addModelInput(uint32_t index) noexcept; + bool addModelOutput(uint32_t index) noexcept; + bool finish() noexcept; + + onert::ir::Graph &deref(void) { return *_graph; } + bool isFinished() noexcept; + bool isExistOperand(uint32_t index) noexcept; + size_t operandSize(uint32_t index) noexcept; + bool isUsageSet(uint32_t index) noexcept; + bool isOperationOutput(uint32_t index) noexcept; + void release(std::shared_ptr<onert::ir::Graph> &graph) { graph = _graph; } + +private: + void setOptionalOperand(const onert::ir::OperandIndex idx); + void fillOptionalOperand(void); + +private: + std::shared_ptr<onert::ir::Graph> _graph; + std::unordered_set<onert::ir::OperandIndex> _optional_operands; + std::vector<OperandUsage> _operand_usages; +}; + +#endif // __MODEL_H__ diff --git a/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.cc b/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.cc new file mode 100644 index 000000000..e07297241 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.cc @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "NNAPIConvert.h" + +#include <numeric> + +using namespace onert::ir; + +DataType NNAPIConvert::getDataType(OperandCode type) +{ + switch (type) + { + case ANEURALNETWORKS_FLOAT32: + case ANEURALNETWORKS_TENSOR_FLOAT32: + return DataType::FLOAT32; + case ANEURALNETWORKS_INT32: + case ANEURALNETWORKS_TENSOR_INT32: + return DataType::INT32; + case ANEURALNETWORKS_UINT32: + return DataType::UINT32; + case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM: + return DataType::QUANT8_ASYMM; + case ANEURALNETWORKS_TENSOR_QUANT8_SYMM: + return DataType::QUANT8_SYMM; + case ANEURALNETWORKS_BOOL: + case ANEURALNETWORKS_TENSOR_BOOL8: + return DataType::BOOL8; + default: + throw std::runtime_error("Unsupported type"); + } +} + +TypeInfo NNAPIConvert::getTypeInfo(const ANeuralNetworksOperandType *type) +{ + return TypeInfo(getDataType((OperandCode)(type->type)), type->scale, type->zeroPoint); +} + +Shape NNAPIConvert::getShape(const ANeuralNetworksOperandType *type) +{ + Shape shape(type->dimensionCount); + + for (uint32_t axis = 0; axis < type->dimensionCount; ++axis) + { + shape.dim(axis) = type->dimensions[axis]; + } + + return shape; +} + +size_t NNAPIConvert::calculateSizeFromType(const ANeuralNetworksOperandType *type) +{ + auto shape = getShape(type); + auto data_type = getDataType((OperandCode)(type->type)); + + return shape.num_elements() * sizeOfDataType(data_type); +} + +Activation NNAPIConvert::getFusedActivation(FuseCode act) +{ + switch (act) + { + case ANEURALNETWORKS_FUSED_NONE: + return Activation::NONE; + case ANEURALNETWORKS_FUSED_RELU: + return Activation::RELU; + case ANEURALNETWORKS_FUSED_RELU1: + return Activation::RELU1; + case ANEURALNETWORKS_FUSED_RELU6: + return Activation::RELU6; + default: + throw std::runtime_error("Unsupported activation type"); + } +} + +PaddingType NNAPIConvert::getPaddingType(PaddingCode type) +{ + switch (type) + { + case ANEURALNETWORKS_PADDING_SAME: + return PaddingType::SAME; + case ANEURALNETWORKS_PADDING_VALID: + return PaddingType::VALID; + default: + throw std::runtime_error("Unsupported type"); + } +} diff --git a/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.h b/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.h new file mode 100644 index 000000000..4fd985e6e --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file NNAPIConvert.h + * @brief This file contains convereter(s)\n + * from NNAPI frontend's struct to onert's internal struct + */ +#ifndef __ONERT_NNAPI_CONVERT_H__ +#define __ONERT_NNAPI_CONVERT_H__ + +#include <NeuralNetworks.h> + +#include <ir/TypeInfo.h> +#include <ir/Shape.h> +#include <ir/Padding.h> +#include <ir/InternalType.h> + +class NNAPIConvert +{ + +public: + /** + * @brief Convert data type from NNAPI to internal data type + * @param[in] type NNAPI's data type + * @return onert's internal data type + */ + static onert::ir::DataType getDataType(OperandCode type); + + /** + * @brief Convert operand type info from NNAPI to interanl operand type info + * @param[in] type NNAPI's operand type + * @return onert's internal operand type info + */ + static onert::ir::TypeInfo getTypeInfo(const ANeuralNetworksOperandType *type); + + /** + * @brief Convert operand shape info from NNAPI to internal operand shape + * @param[in] type NNAPI's operand type + * @return onert's internal operand shape + */ + static onert::ir::Shape getShape(const ANeuralNetworksOperandType *type); + + /** + * @brief Calcaulate operand size from NNAPI type + * @param[in] type NNAPI's operand type + * @return Operand size + */ + static size_t calculateSizeFromType(const ANeuralNetworksOperandType *type); + + /** + * @brief Convert NNAPI FuseCode to internal activation type + * @param[in] act NNAPI's FuseCode type + * @return onert's internal activation type + */ + static onert::ir::Activation getFusedActivation(FuseCode act); + + /** + * @brief Convert NNAPI PaddingCode to internal padding type + * @param[in] type NNAPI's PaddingCode type + * @return onert's internal padding type + */ + static onert::ir::PaddingType getPaddingType(PaddingCode type); +}; + +#endif // __ONERT_NNAPI_CONVERT_H__ diff --git a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc new file mode 100644 index 000000000..10e7c0341 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc @@ -0,0 +1,1899 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "OperationFactory.h" +#include "NNAPIConvert.h" + +#include <ir/Operations.Include.h> +#include <string.h> + +namespace +{ +using namespace onert::ir; + +void replaceDataType(Operands &operands, const OperandIndex &index, const DataType type) +{ + assert(operands.exist(index)); + operands.at(index).type(type); +} + +ExplicitPadding makeExplicitPadding(Operands &operands, const OperandIndex &left_index, + const OperandIndex &right_index, const OperandIndex &top_index, + const OperandIndex &bottom_index) +{ + auto left = operands.at(left_index).asScalar<int32_t>(); + auto right = operands.at(right_index).asScalar<int32_t>(); + auto top = operands.at(top_index).asScalar<int32_t>(); + auto bottom = operands.at(bottom_index).asScalar<int32_t>(); + + if (left < 0 || right < 0 || top < 0 || bottom < 0) + { + throw std::runtime_error{"Cannot handle negative explicit padding value"}; + } + + ExplicitPadding param; + param.left = static_cast<uint32_t>(left); + param.right = static_cast<uint32_t>(right); + param.top = static_cast<uint32_t>(top); + param.bottom = static_cast<uint32_t>(bottom); + + return param; +} + +Stride makeStride(Operands &operands, const OperandIndex &horizontal_index, + const OperandIndex &vertical_index) +{ + auto horizontal = operands.at(horizontal_index).asScalar<int32_t>(); + auto vertical = operands.at(vertical_index).asScalar<int32_t>(); + + if (vertical < 0 || horizontal < 0) + { + throw std::runtime_error{"Cannot handle negative stride value"}; + } + + Stride stride; + stride.horizontal = static_cast<uint32_t>(horizontal); + stride.vertical = static_cast<uint32_t>(vertical); + + return stride; +} + +uint32_t getUint32Scalar(Operands &operands, const OperandIndex index) +{ + auto int32_value = operands.at(index).asScalar<int32_t>(); + if (int32_value < 0) + { + throw std::runtime_error{"Cannot handle negative value"}; + } + + return static_cast<uint32_t>(int32_value); +} + +} // namespace + +OperationFactory &OperationFactory::get() +{ + static OperationFactory factory; + return factory; +} + +OperationFactory::OperationFactory() +{ + _map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Block size Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::BatchToSpaceND{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert((init_param.input_count == 8 || init_param.input_count == 11) && + init_param.output_count == 1); + + // In common + // 0 -> IFM Tensor Index + // 1 -> Kernel Tensor Index + // 2 -> Bias Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::DepthwiseConv2D::Param param; + if (init_param.input_count == 8) + { + // Imlicit Padding case + // Each input should be interpreted as follows: + // + // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 4 -> Stride (width) Index + // 5 -> Stride (height) INdex + // 6 -> Depthwise multiplier + // 7 -> Activation Index + + const auto padding_index = OperandIndex{init_param.inputs[3]}; + const auto hstride_index = OperandIndex{init_param.inputs[4]}; + const auto vstride_index = OperandIndex{init_param.inputs[5]}; + const auto multiplier_index = OperandIndex{init_param.inputs[6]}; + const auto activation_index = OperandIndex{init_param.inputs[7]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.multiplier = getUint32Scalar(operands, multiplier_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else + { + // Explicit Padding case + // Each input should be interpreted as follows: + // + // 3 -> Padding On the Left + // 4 -> Padding On the Right + // 5 -> Padding On the Top + // 6 -> Padding On the Bottom + // 7 -> Stride (width) Index + // 8 -> Stride (height) Index + // 9 -> Depthwise multiplier + // 10-> Activation Index + + const auto padding_left_index = OperandIndex{init_param.inputs[3]}; + const auto padding_right_index = OperandIndex{init_param.inputs[4]}; + const auto padding_top_index = OperandIndex{init_param.inputs[5]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[6]}; + const auto hstride_index = OperandIndex{init_param.inputs[7]}; + const auto vstride_index = OperandIndex{init_param.inputs[8]}; + const auto multiplier_index = OperandIndex{init_param.inputs[9]}; + const auto activation_index = OperandIndex{init_param.inputs[10]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.multiplier = getUint32Scalar(operands, multiplier_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + + return new operation::DepthwiseConv2D{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_MAX_POOL_2D] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.output_count == 1); + + // In common + // 0 -> IFM Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::MaxPool2D::Param param; + if (init_param.input_count == 7) // support implicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 2 -> Horizontal (over width) Stride Index + // 3 -> Vertial (over height) Stride Index + // 4 -> Filter Width Index + // 5 -> Filter Height Index + // 6 -> FuseCode (activation) Index + + const auto padding_index = OperandIndex{init_param.inputs[1]}; + const auto hstride_index = OperandIndex{init_param.inputs[2]}; + const auto vstride_index = OperandIndex{init_param.inputs[3]}; + const auto kw_index = OperandIndex{init_param.inputs[4]}; + const auto kh_index = OperandIndex{init_param.inputs[5]}; + const auto activation_index = OperandIndex{init_param.inputs[6]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = operands.at(kh_index).asScalar<uint32_t>(); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else if (init_param.input_count == 10) // support explicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding_left index + // 2 -> Padding_right index + // 3 -> Padding_top index + // 4 -> Padding_bottom index + // 5 -> Horizontal (over width) Stride Index + // 6 -> Vertial (over height) Stride Index + // 7 -> Filter Width Index + // 8 -> Filter Height Index + // 9 -> FuseCode (activation) Index + + const auto padding_left_index = OperandIndex{init_param.inputs[1]}; + const auto padding_right_index = OperandIndex{init_param.inputs[2]}; + const auto padding_top_index = OperandIndex{init_param.inputs[3]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; + const auto hstride_index = OperandIndex{init_param.inputs[5]}; + const auto vstride_index = OperandIndex{init_param.inputs[6]}; + const auto kw_index = OperandIndex{init_param.inputs[7]}; + const auto kh_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + return new operation::MaxPool2D{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = [](const OperationFactory::Param &init_param, + Operands &operands) { + // TODO We may reuse code here for MAX_POOL_2D. Seems like these two are identical + assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.output_count == 1); + + // In common + // 0 -> IFM Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::AvgPool2D::Param param; + if (init_param.input_count == 7) // support implicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 2 -> Horizontal (over width) Stride Index + // 3 -> Vertial (over height) Stride Index + // 4 -> Filter Width Index + // 5 -> Filter Height Index + // 6 -> FuseCode (activation) Index + + const auto padding_index = OperandIndex{init_param.inputs[1]}; + const auto hstride_index = OperandIndex{init_param.inputs[2]}; + const auto vstride_index = OperandIndex{init_param.inputs[3]}; + const auto kw_index = OperandIndex{init_param.inputs[4]}; + const auto kh_index = OperandIndex{init_param.inputs[5]}; + const auto activation_index = OperandIndex{init_param.inputs[6]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else if (init_param.input_count == 10) // support explicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding_left index + // 2 -> Padding_right index + // 3 -> Padding_top index + // 4 -> Padding_bottom index + // 5 -> Horizontal (over width) Stride Index + // 6 -> Vertial (over height) Stride Index + // 7 -> Filter Width Index + // 8 -> Filter Height Index + // 9 -> FuseCode (activation) Index + + const auto padding_left_index = OperandIndex{init_param.inputs[1]}; + const auto padding_right_index = OperandIndex{init_param.inputs[2]}; + const auto padding_top_index = OperandIndex{init_param.inputs[3]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; + const auto hstride_index = OperandIndex{init_param.inputs[5]}; + const auto vstride_index = OperandIndex{init_param.inputs[6]}; + const auto kw_index = OperandIndex{init_param.inputs[7]}; + const auto kh_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + + return new operation::AvgPool2D{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count >= 2); // At least one one input tensor and axis + assert(init_param.output_count == 1); + + // When there are N + 1 inputs, each input should be interpreted as follows: + // + // [0, N) -> Input tensors + // N -> Axis + // + + OperandIndexSequence inputs; + for (uint32_t n = 0; n < init_param.input_count - 1; ++n) + { + inputs.append(OperandIndex{init_param.inputs[n]}); + } + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::Concat::Param param; + const OperandIndex axis_index{init_param.inputs[init_param.input_count - 1]}; + param.axis = operands.at(axis_index).asScalar<int32_t>(); + param.rank = operands.at(outputs.at(0)).shape().rank(); + + return new operation::Concat{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> A tensor, specifying the tensor to be reshaped. + // 1 -> A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32, defining the shape of the output + // tensor + + // TODO Second input should be shape tensor (init_param.inputs[1]) + // Currently unused since assume that it is same with output tensor size + OperandIndexSequence inputs{init_param.inputs[0] /* , init_param.inputs[1] */}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Reshape{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_FULLY_CONNECTED] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 4 && init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> A tensor, specifying the input. + // 1 -> A 2-D tensor, specifying the weights + // 2 -> A 1-D tensor, specifying the bias + // 3 -> An INT32 value, and has to be one of the FuseCode values + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::FullyConnected::Param param; + const auto activation_index = OperandIndex{init_param.inputs[3]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::FullyConnected{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SOFTMAX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> A 2-D or 4-D tensor, specifying the tensor to be reshaped. + // 1 -> FLOAT32 value, specifying the positive scaling factor for the exponent, beta. + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + const auto beta_index = OperandIndex{init_param.inputs[1]}; + + operation::Softmax::Param param; + param.beta = operands.at(beta_index).asScalar<float>(); + + return new operation::Softmax{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_CAST] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + // NNAPI uses QUANT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's input/output + if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT8_ASYMM) + { + replaceDataType(operands, inputs.at(0), DataType::UINT8); + } + if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT8_ASYMM) + { + replaceDataType(operands, outputs.at(0), DataType::UINT8); + } + + return new operation::Cast{inputs, outputs}; + }; + + // ANEURALNETWORKS_CAST_EX is deprecated + // TODO Remove ANEURALNETWORKS_CAST_EX + _map[ANEURALNETWORKS_CAST_EX] = _map[ANEURALNETWORKS_CAST]; + + _map[ANEURALNETWORKS_CONV_2D] = [](const OperationFactory::Param &init_param, + Operands &operands) { + using operation::Conv2D; + + // inputCount is either 7 or 10 acccording to NN API specification. + // - Padding is implicit when inputCount is 7 + // - Padding is explicit when inputCount is 10 + assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.output_count == 1); + + // 0 -> IFM Tensor Index + // 1 -> Kernel Tensor Index + // 2 -> Bias Tensor Index + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + Conv2D::Param param; + + if (init_param.input_count == 7) // support implicit padding + { + // Each input should be interpreted as follows: + // + // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 4 -> Stride (width) Index + // 5 -> Stride (height) INdex + // 6 -> Activation Index + + const auto padding_index = OperandIndex{init_param.inputs[3]}; + const auto hstride_index = OperandIndex{init_param.inputs[4]}; + const auto vstride_index = OperandIndex{init_param.inputs[5]}; + const auto activation_index = OperandIndex{init_param.inputs[6]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else if (init_param.input_count == 10) // support explicit padding + { + // Each input should be interpreted as follows: + // + // 3 -> Padding_left index + // 4 -> Padding_right index + // 5 -> Padding_top index + // 6 -> Padding_bottom index + // 7 -> Stride (width) Index + // 8 -> Stride (height) INdex + // 9 -> Activation Index + + const auto padding_left_index = OperandIndex{init_param.inputs[3]}; + const auto padding_right_index = OperandIndex{init_param.inputs[4]}; + const auto padding_top_index = OperandIndex{init_param.inputs[5]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[6]}; + const auto hstride_index = OperandIndex{init_param.inputs[7]}; + const auto vstride_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + + return new Conv2D{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_ADD] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3); + assert(init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> Lefthand side operand + // 1 -> Righthand side operand + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::Add::Param param; + + const auto activation_index = OperandIndex{init_param.inputs[2]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::Add{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_REDUCE_SUM] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 3); + assert(init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Reduced Axes Tensor Index + // 2 -> keep_dims Index + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + std::vector<std::int32_t> axes = + operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>(); + + operation::ReduceSum::Param param; + param.axes.assign(axes.cbegin(), axes.cend()); + param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::ReduceSum{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_REDUCE_SUM_EX is deprecated + // TODO Remove ANEURALNETWORKS_REDUCE_SUM_EX + _map[ANEURALNETWORKS_REDUCE_SUM_EX] = _map[ANEURALNETWORKS_REDUCE_SUM]; + + _map[ANEURALNETWORKS_SUB] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3); + assert(init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> Lefthand side operand + // 1 -> Righthand side operand + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::Sub::Param param; + + const auto activation_index = OperandIndex{init_param.inputs[2]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::Sub{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SLICE] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Begins Tensor Index + // 2 -> Sizes Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + + operation::Slice::Param param; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Slice{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 7 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2], + init_param.inputs[3]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 1 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the starts of + // the dimensions of the input tensor to be sliced. The length must be + // of rank(input0). + // 2 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the ends of + // the dimensions of the input tensor to be sliced. The length must be + // of rank(input0). + // 3 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the strides of + // the dimensions of the input tensor to be sliced. The length must be + // of rank(input0). + // 4 -> An {@link ANEURALNETWORKS_INT32} scalar, begin_mask. If the ith bit + // of begin_mask is set, begin[i] is ignored and the fullest possible + // range in that dimension is used instead. + // 5 -> An {@link ANEURALNETWORKS_INT32} scalar, end_mask. If the ith bit of + // end_mask is set, end[i] is ignored and the fullest possible range in + // that dimension is used instead. + // 6 -> An {@link ANEURALNETWORKS_INT32} scalar, shrink_axis_mask. An int32 + // mask. If the ith bit of shrink_axis_mask is set, it implies that the + // ith specification shrinks the dimensionality by 1. A slice of size 1 + // starting from begin[i] in the dimension must be preserved. + + operation::StridedSlice::Param param; + + param.begin_mask = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>(); + param.end_mask = operands.at(OperandIndex{init_param.inputs[5]}).asScalar<std::int32_t>(); + param.shrink_axis_mask = + operands.at(OperandIndex{init_param.inputs[6]}).asScalar<std::int32_t>(); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::StridedSlice{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_TRANSPOSE] = [](const OperationFactory::Param &init_param, + Operands &operands) { + // TODO make this work with init_param.input_count == 1 (when permutation vector is optional) + + // Inputs + // 0: An n-D tensor, specifying the tensor to be transposed. + // 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, + // the permutation of the dimensions of the input tensor. + // The returned tensor's dimension i corresponds to the input dimension + // perm[i]. If perm is not given, it is set to (n-1...0), where n is the + // rank of the input tensor. Hence by default, this operation performs a + // regular matrix transpose on 2-D input Tensors. + assert(init_param.input_count == 2); + assert(init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + std::vector<std::int32_t> perm = + operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>(); + + operation::Transpose::Param param; + param.perm.assign(perm.cbegin(), perm.cend()); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Transpose{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_MUL] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> LHS Tensor Index + // 1 -> RHS Tensor Index + // 2 -> Activation Index + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Mul::Param param; + + const auto activation_index = OperandIndex{init_param.inputs[2]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::Mul{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 1 || init_param.input_count == 2); + assert(init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> An n-D tensor, the tensor to be squeezed. + // 1 -> An optional 1-D tensor of ANEURALNETWORKS_TENSOR_INT32. The dimensions to squeeze. + // If specified only squeezes the dimensions listed. Otherwise, squeezes all dimensions. + // The dimension index starts at 0. An error must be reported if squeezing a dimension that + // is not 1. + + // Add mandatory input index + OperandIndexSequence inputs{init_param.inputs[0]}; + + // Add dims index if specified + operation::Squeeze::Param param{}; + if (init_param.input_count == 2) + { + auto squeeze_dims_idx = OperandIndex{init_param.inputs[1]}; + assert(operands.at(squeeze_dims_idx).shape().rank() == 1); + assert(operands.at(squeeze_dims_idx).shape().dim(0) >= 0); + assert(static_cast<uint32_t>(operands.at(squeeze_dims_idx).shape().dim(0)) <= + sizeof(param.dims)); + param.ndim = operands.at(squeeze_dims_idx).shape().dim(0); + if (param.ndim > 0) + { + assert(operands.at(squeeze_dims_idx).data()); + memcpy(param.dims, operands.at(squeeze_dims_idx).data()->base(), + param.ndim * sizeof(param.dims[0])); + } + } + + return new operation::Squeeze{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_TANH] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Tanh{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Logistic{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_DIV] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> LHS Tensor Index + // 1 -> RHS Tensor Index + // 2 -> Activation Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Div::Param param; + + const auto activation_index = OperandIndex{init_param.inputs[2]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::Div{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_EXP] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Exp{inputs, outputs}; + }; + + // ANEURALNETWORKS_EXP_EX is deprecated + // TODO Remove ANEURALNETWORKS_EXP_EX + _map[ANEURALNETWORKS_EXP_EX] = _map[ANEURALNETWORKS_EXP]; + + _map[ANEURALNETWORKS_GREATER] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::Greater; + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_GREATER_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual; + + return new operation::Comparison{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_GREATER_EQUAL_EX is deprecated + // TODO Remove ANEURALNETWORKS_GREATER_EQUAL_EX + _map[ANEURALNETWORKS_GREATER_EQUAL_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual; + + // Output operand type must be boolean + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_LESS] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::Less; + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_LESS_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::LessEqual; + + return new operation::Comparison{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_LESS_EX is deprecated + // TODO Remove ANEURALNETWORKS_LESS_EX + _map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::Less; + + // Output operand type must be boolean + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_REDUCE_MAX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Axis Tensor Index + // 2 -> keep_dims Index + OperandIndexSequence inputs{init_param.inputs[0]}; + std::vector<std::int32_t> axes = + operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>(); + + operation::ReduceMax::Param param; + param.axes.assign(axes.cbegin(), axes.cend()); + param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::ReduceMax{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_REDUCE_MAX_EX is deprecated + // TODO Remove ANEURALNETWORKS_REDUCE_MAX_EX + _map[ANEURALNETWORKS_REDUCE_MAX_EX] = _map[ANEURALNETWORKS_REDUCE_MAX]; + + _map[ANEURALNETWORKS_NOT_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input1 Tensor Index + // 1 -> input2 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::NotEqual; + + return new operation::Comparison{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_NOT_EQUAL_EX is deprecated + // TODO Remove ANEURALNETWORKS_NOT_EQUAL_EX + _map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input1 Tensor Index + // 1 -> input2 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::NotEqual; + + // Output operand type must be boolean + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_LOGICAL_AND] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::LogicalAnd{inputs, outputs}; + }; + + // ANEURALNETWORKS_LOGICAL_AND_EX is deprecated + // TODO Remove ANEURALNETWORKS_LOGICAL_AND_EX + _map[ANEURALNETWORKS_LOGICAL_AND_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + // This operation's operands must be boolean type. + replaceDataType(operands, inputs.at(0), DataType::BOOL8); + replaceDataType(operands, inputs.at(1), DataType::BOOL8); + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::LogicalAnd{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_RSQRT] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::RSQRT{inputs, outputs}; + }; + + // ANEURALNETWORKS_RSQRT_EX is deprecated + // TODO Remove ANEURALNETWORKS_RSQRT_EX + _map[ANEURALNETWORKS_RSQRT_EX] = _map[ANEURALNETWORKS_RSQRT]; + + _map[ANEURALNETWORKS_RELU] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::ReLU{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> IFM Index + // 1 -> Height Index + // 2 -> Width Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::ResizeBilinear::Param param; + param.height_out = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>(); + param.width_out = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>(); + + return new operation::ResizeBilinear{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::ReLU1{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::ReLU6{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 6 && init_param.output_count == 2); + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Weights Tensor Index + // 2 -> Recurrent Weights Tensor Index + // 3 -> Bias Tensor Index + // 4 -> Hidden state (in) Index + // 5 -> Activation Index + + OperandIndexSequence inputs; + for (uint32_t n = 0; n < init_param.input_count - 1; ++n) + { + inputs.append(OperandIndex{init_param.inputs[n]}); + } + OperandIndexSequence outputs; + for (uint32_t n = 0; n < init_param.output_count; ++n) + { + outputs.append(OperandIndex{init_param.outputs[n]}); + } + + operation::RNN::Param param; + const auto activation_index = OperandIndex{init_param.inputs[5]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::RNN{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Floor{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Block size Index + // 2 -> Paddings Index + OperandIndexSequence inputs; + for (uint32_t n = 0; n < init_param.input_count; ++n) + { + inputs.append(OperandIndex{init_param.inputs[n]}); + } + + return new operation::SpaceToBatchND{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_SPACE_TO_DEPTH] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Block size Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::SpaceToDepth::Param param; + param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + + return new operation::SpaceToDepth{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_L2_POOL_2D] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 10 || init_param.input_count == 7); + assert(init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> IFM Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::L2Pool2D::Param param; + + if (init_param.input_count == 7) // Imlicit Padding case + { + // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 2 -> Horizontal (over width) Stride Index + // 3 -> Vertial (over height) Stride Index + // 4 -> Filter Width Index + // 5 -> Filter Height Index + // 6 -> FuseCode (activation) Index + const auto padding_index = OperandIndex{init_param.inputs[1]}; + const auto hstride_index = OperandIndex{init_param.inputs[2]}; + const auto vstride_index = OperandIndex{init_param.inputs[3]}; + const auto kw_index = OperandIndex{init_param.inputs[4]}; + const auto kh_index = OperandIndex{init_param.inputs[5]}; + const auto activation_index = OperandIndex{init_param.inputs[6]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else // Explicit Padding case + { + // 1 -> Padding_left index + // 2 -> Padding_right index + // 3 -> Padding_top index + // 4 -> Padding_bottom index + // 5 -> Horizontal (over width) Stride Index + // 6 -> Vertial (over height) Stride Index + // 7 -> Filter Width Index + // 8 -> Filter Height Index + // 9 -> FuseCode (activation) Index + const auto padding_left_index = OperandIndex{init_param.inputs[1]}; + const auto padding_right_index = OperandIndex{init_param.inputs[2]}; + const auto padding_top_index = OperandIndex{init_param.inputs[3]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; + const auto hstride_index = OperandIndex{init_param.inputs[5]}; + const auto vstride_index = OperandIndex{init_param.inputs[6]}; + const auto kw_index = OperandIndex{init_param.inputs[7]}; + const auto kh_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + + return new operation::L2Pool2D{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Lookups Index + // 1 -> Values Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::EmbeddingLookup{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_L2_NORMALIZATION] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::L2Normalization::Param param; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::L2Normalization{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_HASHTABLE_LOOKUP] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 3 && init_param.output_count == 2); + + // Each output should be interpreted as follows: + // + // 0 -> Output Index + // 1 -> Hits Index + OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]}; + + // Each input should be interpreted as follows: + // + // 0 -> Lookups Index + // 1 -> Keys Index + // 2 -> Values Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + + return new operation::HashtableLookup{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_PRELU] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + // 1 -> alpha Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::PReLU{inputs, outputs}; + }; + + // ANEURALNETWORKS_PRELU_EX is deprecated + // TODO Remove ANEURALNETWORKS_PRELU_EX + _map[ANEURALNETWORKS_PRELU_EX] = _map[ANEURALNETWORKS_PRELU]; + + _map[ANEURALNETWORKS_TRANSPOSE_CONV_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 6 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Output Shape Index + // 1 -> Weights Index + // 2 -> Input Tensor Index + // 3 -> Padding Type + // 4 -> Stride width + // 5 -> Stride height + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + + operation::TransposeConv::Param param; + + const auto padding_index = OperandIndex{init_param.inputs[3]}; + const auto hstride_index = OperandIndex{init_param.inputs[4]}; + const auto vstride_index = OperandIndex{init_param.inputs[5]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + + return new operation::TransposeConv{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SQRT] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // 0 -> input Tensor Index + + OperandIndexSequence inputs{init_param.inputs[0]}; + return new operation::SQRT{inputs, outputs}; + }; + + // ANEURALNETWORKS_SQRT_EX is deprecated + // TODO Remove ANEURALNETWORKS_SQRT_EX + _map[ANEURALNETWORKS_SQRT_EX] = _map[ANEURALNETWORKS_SQRT]; + + _map[ANEURALNETWORKS_LOGICAL_OR] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::LogicalOr{inputs, outputs}; + }; + + // ANEURALNETWORKS_LOGICAL_OR_EX is deprecated + // TODO Remove ANEURALNETWORKS_LOGICAL_OR_EX + _map[ANEURALNETWORKS_LOGICAL_OR_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + // This operation's operands must be boolean type. + replaceDataType(operands, inputs.at(0), DataType::BOOL8); + replaceDataType(operands, inputs.at(1), DataType::BOOL8); + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::LogicalOr{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_LOGICAL_NOT] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::LogicalNot{inputs, outputs}; + }; + + // ANEURALNETWORKS_LOGICAL_NOT_EX is deprecated + // TODO Remove ANEURALNETWORKS_LOGICAL_NOT_EX + _map[ANEURALNETWORKS_LOGICAL_NOT_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + // This operation's operands must be boolean type. + replaceDataType(operands, inputs.at(0), DataType::BOOL8); + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::LogicalNot{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 23 && init_param.output_count == 4); + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Input to Input Tensor Index + // 2 -> Input to Forget Tensor Index + // 3 -> Input to Cell Tensor Index + // 4 -> Input to Output Tensor Index + // 5 -> Recurrent to Input Weights Tensor Index + // 6 -> Recurrent to Forget Weights Tensor Index + // 7 -> Recurrent to Cell Weights Tensor Index + // 8 -> Recurrent to Output Weights Tensor Index + // 9 -> Cell to Input Weights Tensor Index + // 10 -> Cell to Forget Weights Tensor Index + // 11 -> Cell to Output Weights Tensor Index + // 12 -> Input Gate Bias Tensor Index + // 13 -> Forget Gate Bias Tensor Index + // 14 -> Cell Bias Tensor Index + // 15 -> Output Gate Bias Tensor Index + // 16 -> Projection Weights Tensor Index + // 17 -> Projection Bias Tensor Index + // 18 -> Output State In Tensor Index + // 19 -> Cell State In Tensor Index + OperandIndexSequence inputs; + for (uint32_t n = 0; n < init_param.input_count - 3; ++n) + { + inputs.append(OperandIndex{init_param.inputs[n]}); + } + + // Each output should be interpreted as follows: + // + // 0 -> Scratch Buffer Tensor Index + // 1 -> Output State Out Tensor Index + // 2 -> Cell State Out Tensor Index + // 3 -> Output Tensor Index + OperandIndexSequence outputs; + for (uint32_t n = 0; n < init_param.output_count; ++n) + { + outputs.append(OperandIndex{init_param.outputs[n]}); + } + + operation::LSTM::Param param; + const auto activation_index = OperandIndex{init_param.inputs[20]}; + switch (operands.at(activation_index).asScalar<int32_t>()) + { + case 0: + param.activation = Activation::NONE; + break; + case 1: + param.activation = Activation::RELU; + break; + case 2: + param.activation = Activation::RELU1; + break; + case 3: + param.activation = Activation::RELU6; + break; + case 4: + param.activation = Activation::TANH; + break; + case 6: + param.activation = Activation::SIGMOID; + break; + default: + throw std::runtime_error("Unsupported activation type"); + break; + } + param.cell_threshold = operands.at(OperandIndex{init_param.inputs[21]}).asScalar<float>(); + param.projection_threshold = operands.at(OperandIndex{init_param.inputs[22]}).asScalar<float>(); + + return new operation::LSTM{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::Equal; + + return new operation::Comparison{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_EQUAL_EX is deprecated + // TODO Remove ANEURALNETWORKS_EQUAL_EX + _map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::Equal; + + // Output operand type must be boolean + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SQUARED_DIFFERENCE_EX] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> LHS Tensor Index + // 1 -> RHS Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::SquaredDifference{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_TOPK_V2] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 2); + + // Each output should be interpreted as follows: + // + // 0 -> Index for Output Values + // 1 -> Index for Output Indices + OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]}; + + // Each input should be interpreted as follows: + // + // 0 -> Index for Input Data + // 1 -> Index for K + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::TopKV2::Param param; + param.k = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + + return new operation::TopKV2{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_CAST_EX is deprecated + // TODO Remove ANEURALNETWORKS_CAST_EX + _map[ANEURALNETWORKS_TOPK_V2_EX] = _map[ANEURALNETWORKS_TOPK_V2]; + + _map[ANEURALNETWORKS_GATHER] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + // 1 -> axis Index + // 2 -> indices Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[2]}; + + operation::Gather::Param param; + param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>(); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Gather{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_GATHER_EX is deprecated + // TODO Remove ANEURALNETWORKS_GATHER_EX + _map[ANEURALNETWORKS_GATHER_EX] = _map[ANEURALNETWORKS_GATHER]; + + _map[ANEURALNETWORKS_NEG] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Neg{inputs, outputs}; + }; + + // ANEURALNETWORKS_NEG_EX is deprecated + // TODO Remove ANEURALNETWORKS_NEG_EX + _map[ANEURALNETWORKS_NEG_EX] = _map[ANEURALNETWORKS_NEG]; + + _map[ANEURALNETWORKS_ABS] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Abs{inputs, outputs}; + }; + + // ANEURALNETWORKS_ABS_EX is deprecated + // TODO Remove ANEURALNETWORKS_ABS_EX + _map[ANEURALNETWORKS_ABS_EX] = _map[ANEURALNETWORKS_ABS]; + + _map[ANEURALNETWORKS_ARGMAX] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Axis Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::ArgMax::Param param; + param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::ArgMax{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_ARGMAX_EX is deprecated + // TODO Remove ANEURALNETWORKS_ARGMAX_EX + _map[ANEURALNETWORKS_ARGMAX_EX] = _map[ANEURALNETWORKS_ARGMAX]; + + _map[ANEURALNETWORKS_DEQUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Dequantize{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> ifm Tensor Index + // 1 -> axis Tensor Index + // 2 -> keep_dims Index + OperandIndexSequence inputs{init_param.inputs[0]}; + std::vector<std::int32_t> axes = + operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>(); + + operation::Mean::Param param; + param.axes.assign(axes.cbegin(), axes.cend()); + param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>() != 0; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Mean{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 5 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::LocalResponseNormalization::Param param; + param.radius = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + param.bias = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<float>(); + param.alpha = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<float>(); + param.beta = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<float>(); + + return new operation::LocalResponseNormalization{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_DEPTH_TO_SPACE] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Block size Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::DepthToSpace::Param param; + param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + + return new operation::DepthToSpace{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_PACK_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count >= 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + OperandIndexSequence inputs; + for (uint32_t n = 0; n < init_param.input_count - 2; ++n) + { + inputs.append(OperandIndex{init_param.inputs[n]}); + } + + operation::Pack::Param param; + const auto num_index = OperandIndex{init_param.inputs[init_param.input_count - 2]}; + const auto axis_index = OperandIndex{init_param.inputs[init_param.input_count - 1]}; + param.num = operands.at(num_index).asScalar<int32_t>(); + param.axis = operands.at(axis_index).asScalar<int32_t>(); + param.rank = operands.at(outputs.at(0)).shape().rank(); + + return new operation::Pack{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_REDUCE_MIN] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Axis Tensor Index + // 2 -> keep_dims Index + OperandIndexSequence inputs{init_param.inputs[0]}; + std::vector<std::int32_t> axes = + operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>(); + + operation::ReduceMin::Param param; + param.axes.assign(axes.cbegin(), axes.cend()); + param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::ReduceMin{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_REDUCE_MIN_EX is deprecated + // TODO Remove ANEURALNETWORKS_REDUCE_MIN_EX + _map[ANEURALNETWORKS_REDUCE_MIN_EX] = _map[ANEURALNETWORKS_REDUCE_MIN]; + + _map[ANEURALNETWORKS_SPLIT] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3); + assert(init_param.output_count >= 1); // At least one output tensor and axis + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs; + for (uint32_t n = 0; n < init_param.output_count; ++n) + { + outputs.append(OperandIndex{init_param.outputs[n]}); + } + + operation::Split::Param param; + param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + param.num_splits = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<std::int32_t>(); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Split{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_SPLIT_EX is deprecated + // TODO Remove ANEURALNETWORKS_SPLIT_EX + _map[ANEURALNETWORKS_SPLIT_EX] = _map[ANEURALNETWORKS_SPLIT]; + + _map[ANEURALNETWORKS_UNPACK_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count >= 1); + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs; + for (uint32_t n = 0; n < init_param.output_count; ++n) + { + outputs.append(OperandIndex{init_param.outputs[n]}); + } + + operation::Unpack::Param param; + const auto num_index = OperandIndex{init_param.inputs[1]}; + const auto axis_index = OperandIndex{init_param.inputs[2]}; + param.num = operands.at(num_index).asScalar<int32_t>(); + param.axis = operands.at(axis_index).asScalar<int32_t>(); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Unpack{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_PAD] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count >= 1); + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::Pad::Param param; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Pad{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_MINIMUM] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Min{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_MAXIMUM] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Max{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_ONE_HOT_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 5); + assert(init_param.output_count == 1); + // Each input should be interpreted as follows: + // + // 0 -> indices tensor + // 1 -> depth scalar + // 2 -> on_value scalar + // 3 -> off_value scalar + // 4 -> axis scalar + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::OneHot::Param param; + param.depth = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + param.on_value = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<float>(); + param.off_value = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<float>(); + param.axis = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>(); + + return new operation::OneHot{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SIN] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Sin{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_SHAPE_EX] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Shape{inputs, outputs}; + }; +} + +Operation *OperationFactory::create(ANeuralNetworksOperationType type, + const OperationFactory::Param ¶m, Operands &operands) +{ + auto it = _map.find(type); + if (it == _map.end()) + { + throw std::runtime_error("Unsupported operation type: " + std::to_string(type)); + } + return it->second(param, operands); +} diff --git a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.h b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.h new file mode 100644 index 000000000..367cf74db --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __OPERATION_FACTORY_H__ +#define __OPERATION_FACTORY_H__ + +#include <unordered_map> + +#include "ir/Operands.h" +#include "ir/Operation.h" +#include "NeuralNetworks.h" +#include "NeuralNetworksEx.h" + +/** + * @brief A class to create a onert operation object from NN API input parameters + */ +class OperationFactory +{ +public: + struct Param + { + uint32_t input_count; + const uint32_t *inputs; + uint32_t output_count; + const uint32_t *outputs; + }; + +public: + using Generator = + std::function<onert::ir::Operation *(const OperationFactory::Param &, onert::ir::Operands &)>; + +public: + static OperationFactory &get(); + +private: + OperationFactory(); + +public: + onert::ir::Operation *create(ANeuralNetworksOperationType, const OperationFactory::Param ¶m, + onert::ir::Operands &operands); + // TODO add "register" method for separating registration, possibly supporting custom-ops + +private: + std::unordered_map<ANeuralNetworksOperationType, Generator> _map; +}; + +#endif // __OPERATION_FACTORY_H__ |