summaryrefslogtreecommitdiff
path: root/runtime/libs/rua
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-03-04 18:09:24 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-03-04 18:09:24 +0900
commit302e6564a7a76109e1178207e44e45a58631c477 (patch)
tree6cc4bd95e5e438331fc2c53234af4ed0e0f3bc20 /runtime/libs/rua
parentbd11b24234d7d43dfe05a81c520aa01ffad06e42 (diff)
downloadnnfw-upstream/1.1.0.tar.gz
nnfw-upstream/1.1.0.tar.bz2
nnfw-upstream/1.1.0.zip
Diffstat (limited to 'runtime/libs/rua')
-rw-r--r--runtime/libs/rua/CMakeLists.txt4
-rw-r--r--runtime/libs/rua/README.md4
-rw-r--r--runtime/libs/rua/anchor/CMakeLists.txt9
-rw-r--r--runtime/libs/rua/anchor/include/rua/Anchor.h38
-rw-r--r--runtime/libs/rua/anchor/src/Anchor.cpp33
-rw-r--r--runtime/libs/rua/core/CMakeLists.txt3
-rw-r--r--runtime/libs/rua/core/include/rua/Service.h158
-rw-r--r--runtime/libs/rua/dyn/CMakeLists.txt8
-rw-r--r--runtime/libs/rua/dyn/include/rua/DynamicBinder.h35
-rw-r--r--runtime/libs/rua/dyn/src/DynamicBinder.cpp353
-rw-r--r--runtime/libs/rua/shim/CMakeLists.txt4
-rw-r--r--runtime/libs/rua/shim/include/rua/Shim.h192
12 files changed, 841 insertions, 0 deletions
diff --git a/runtime/libs/rua/CMakeLists.txt b/runtime/libs/rua/CMakeLists.txt
new file mode 100644
index 000000000..07ad9ea26
--- /dev/null
+++ b/runtime/libs/rua/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_subdirectory(core)
+add_subdirectory(dyn)
+add_subdirectory(anchor)
+add_subdirectory(shim)
diff --git a/runtime/libs/rua/README.md b/runtime/libs/rua/README.md
new file mode 100644
index 000000000..aea4ce033
--- /dev/null
+++ b/runtime/libs/rua/README.md
@@ -0,0 +1,4 @@
+# rua
+
+_rua_ is a **RU**ntime **A**bstraction layer which allows us to switch between multiple
+Android NN rutime during execution (not loading time).
diff --git a/runtime/libs/rua/anchor/CMakeLists.txt b/runtime/libs/rua/anchor/CMakeLists.txt
new file mode 100644
index 000000000..6e65641f4
--- /dev/null
+++ b/runtime/libs/rua/anchor/CMakeLists.txt
@@ -0,0 +1,9 @@
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_library(nnfw_lib_rua_anchor STATIC ${SOURCES})
+set_target_properties(nnfw_lib_rua_anchor PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(nnfw_lib_rua_anchor PUBLIC include)
+target_link_libraries(nnfw_lib_rua_anchor PUBLIC nnfw_lib_rua_core)
+target_link_libraries(nnfw_lib_rua_anchor PRIVATE nnfw_lib_rua_dyn)
+target_link_libraries(nnfw_lib_rua_anchor PRIVATE nnfw_common)
+target_link_libraries(nnfw_lib_rua_anchor PRIVATE nnfw_coverage)
diff --git a/runtime/libs/rua/anchor/include/rua/Anchor.h b/runtime/libs/rua/anchor/include/rua/Anchor.h
new file mode 100644
index 000000000..f6056ab4e
--- /dev/null
+++ b/runtime/libs/rua/anchor/include/rua/Anchor.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_RUA_ANCHOR_H__
+#define __NNFW_RUA_ANCHOR_H__
+
+#include <rua/Service.h>
+
+namespace rua
+{
+
+/**
+ * @brief Global Runtime Abstraction Context
+ *
+ * "set" will have global effect (within each process).
+ */
+struct Anchor
+{
+ static const RuntimeService *get(void);
+ static void set(const RuntimeService *svc);
+};
+
+} // namespace rua
+
+#endif // __NNFW_RUA_ANCHOR_H__
diff --git a/runtime/libs/rua/anchor/src/Anchor.cpp b/runtime/libs/rua/anchor/src/Anchor.cpp
new file mode 100644
index 000000000..a78cca19e
--- /dev/null
+++ b/runtime/libs/rua/anchor/src/Anchor.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "rua/Anchor.h"
+#include <rua/DynamicBinder.h>
+
+namespace
+{
+
+const rua::RuntimeService *anchored_service = rua::DynamicBinder::get();
+
+} // namespace
+
+namespace rua
+{
+
+const RuntimeService *Anchor::get(void) { return anchored_service; }
+void Anchor::set(const RuntimeService *service) { anchored_service = service; }
+
+} // namespace rua
diff --git a/runtime/libs/rua/core/CMakeLists.txt b/runtime/libs/rua/core/CMakeLists.txt
new file mode 100644
index 000000000..f7d41f657
--- /dev/null
+++ b/runtime/libs/rua/core/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_library(nnfw_lib_rua_core INTERFACE)
+target_include_directories(nnfw_lib_rua_core INTERFACE include)
+target_link_libraries(nnfw_lib_rua_core INTERFACE nnfw_lib_nnapi)
diff --git a/runtime/libs/rua/core/include/rua/Service.h b/runtime/libs/rua/core/include/rua/Service.h
new file mode 100644
index 000000000..a79524a8a
--- /dev/null
+++ b/runtime/libs/rua/core/include/rua/Service.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Service.h
+ * @brief Core abstraction that RUA depends on.
+ */
+#ifndef __NNFW_RUA_SERVICE_H__
+#define __NNFW_RUA_SERVICE_H__
+
+#include "NeuralNetworks.h"
+
+struct ANeuralNetworksMemory;
+struct ANeuralNetworksEvent;
+
+struct ANeuralNetworksModel;
+struct ANeuralNetworksCompilation;
+struct ANeuralNetworksExecution;
+
+namespace rua
+{
+
+/**
+ * @brief A wrapper for ANeuralNetworkMemory API
+ */
+struct MemoryService
+{
+ virtual ~MemoryService() = default;
+
+ virtual int createFromFd(size_t size, int protect, int fd, size_t offset,
+ ANeuralNetworksMemory **memory) const = 0;
+
+ virtual void free(ANeuralNetworksMemory *memory) const = 0;
+};
+
+/**
+ * @brief A wrapper for ANeuralNetworkModel API
+ */
+struct ModelService
+{
+ virtual ~ModelService() = default;
+
+ virtual int create(ANeuralNetworksModel **model) const = 0;
+
+ virtual int addOperand(ANeuralNetworksModel *model,
+ const ANeuralNetworksOperandType *type) const = 0;
+
+ virtual int setOperandValue(ANeuralNetworksModel *model, int32_t index, const void *buffer,
+ size_t length) const = 0;
+
+ virtual int setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index,
+ const ANeuralNetworksMemory *memory, size_t offset,
+ size_t length) const = 0;
+
+ virtual int addOperation(ANeuralNetworksModel *model, ANeuralNetworksOperationType type,
+ uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs) const = 0;
+
+ virtual int identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount,
+ const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs) const = 0;
+
+ virtual int relaxComputationFloat32toFloat16(ANeuralNetworksModel *model, bool allow) const = 0;
+
+ virtual int finish(ANeuralNetworksModel *model) const = 0;
+
+ virtual void free(ANeuralNetworksModel *model) const = 0;
+};
+
+/**
+ * @brief A wrapper for ANeuralNetworkCompilation API
+ */
+struct CompilationService
+{
+ virtual ~CompilationService() = default;
+
+ virtual int create(ANeuralNetworksModel *model,
+ ANeuralNetworksCompilation **compilation) const = 0;
+
+ virtual int setPreference(ANeuralNetworksCompilation *compilation, int32_t preference) const = 0;
+ virtual int finish(ANeuralNetworksCompilation *compilation) const = 0;
+
+ virtual void free(ANeuralNetworksCompilation *compilation) const = 0;
+};
+
+/**
+ * @brief A wrapper for ANeuralNetworkExecution API
+ */
+struct ExecutionService
+{
+ virtual ~ExecutionService() = default;
+
+ virtual int create(ANeuralNetworksCompilation *compilation,
+ ANeuralNetworksExecution **execution) const = 0;
+
+ virtual void free(ANeuralNetworksExecution *execution) const = 0;
+
+ virtual int setInput(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type, const void *buffer,
+ size_t length) const = 0;
+
+ virtual int setInputFromMemory(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory, size_t offset,
+ size_t length) const = 0;
+
+ virtual int setOutput(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type, void *buffer,
+ size_t length) const = 0;
+
+ virtual int setOutputFromMemory(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory, size_t offset,
+ size_t length) const = 0;
+
+ virtual int startCompute(ANeuralNetworksExecution *execution,
+ ANeuralNetworksEvent **event) const = 0;
+};
+
+/**
+ * @brief A wrapper for ANeuralNetworkEvent API
+ */
+struct EventService
+{
+ virtual int wait(ANeuralNetworksEvent *event) const = 0;
+ virtual void free(ANeuralNetworksEvent *event) const = 0;
+};
+
+/**
+ * @brief A wrapper for Android NN rutime itself
+ */
+struct RuntimeService
+{
+ virtual ~RuntimeService() = default;
+
+ virtual const MemoryService *memory(void) const = 0;
+ virtual const ModelService *model(void) const = 0;
+ virtual const CompilationService *compilation(void) const = 0;
+ virtual const ExecutionService *execution(void) const = 0;
+ virtual const EventService *event(void) const = 0;
+};
+
+} // namespace rua
+
+#endif // __NNFW_RUA_SERVICE_H__
diff --git a/runtime/libs/rua/dyn/CMakeLists.txt b/runtime/libs/rua/dyn/CMakeLists.txt
new file mode 100644
index 000000000..3f9ac8928
--- /dev/null
+++ b/runtime/libs/rua/dyn/CMakeLists.txt
@@ -0,0 +1,8 @@
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_library(nnfw_lib_rua_dyn STATIC ${SOURCES})
+set_target_properties(nnfw_lib_rua_dyn PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(nnfw_lib_rua_dyn PUBLIC include)
+target_link_libraries(nnfw_lib_rua_dyn PUBLIC nnfw_lib_rua_core)
+target_link_libraries(nnfw_lib_rua_dyn PRIVATE nnfw_common)
+target_link_libraries(nnfw_lib_rua_dyn PRIVATE nnfw_coverage)
diff --git a/runtime/libs/rua/dyn/include/rua/DynamicBinder.h b/runtime/libs/rua/dyn/include/rua/DynamicBinder.h
new file mode 100644
index 000000000..8ce0c42f8
--- /dev/null
+++ b/runtime/libs/rua/dyn/include/rua/DynamicBinder.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_RUA_DYNAMIC_BINDER_H__
+#define __NNFW_RUA_DYNAMIC_BINDER_H__
+
+#include <rua/Service.h>
+
+namespace rua
+{
+
+/**
+ * @brief Bind Android NN runtime implementation via dlopen & dlsym
+ */
+struct DynamicBinder
+{
+ static const rua::RuntimeService *get(void);
+};
+
+} // namespace
+
+#endif // __NNFW_RUA_DYNAMIC_BINDER_H__
diff --git a/runtime/libs/rua/dyn/src/DynamicBinder.cpp b/runtime/libs/rua/dyn/src/DynamicBinder.cpp
new file mode 100644
index 000000000..68dae6262
--- /dev/null
+++ b/runtime/libs/rua/dyn/src/DynamicBinder.cpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "rua/DynamicBinder.h"
+
+#include "NeuralNetworksLoadHelpers.h"
+
+using namespace rua;
+
+//
+// Memory
+//
+namespace
+{
+
+typedef int (*ANeuralNetworksMemory_createFromFd_fn)(size_t size, int protect, int fd,
+ size_t offset, ANeuralNetworksMemory **memory);
+
+typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory *memory);
+
+struct MemoryServiceImpl final : public MemoryService
+{
+ int createFromFd(size_t size, int protect, int fd, size_t offset,
+ ANeuralNetworksMemory **memory) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksMemory_createFromFd);
+ EXECUTE_FUNCTION_RETURN(size, protect, fd, offset, memory);
+ }
+
+ void free(ANeuralNetworksMemory *memory) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksMemory_free);
+ EXECUTE_FUNCTION(memory);
+ }
+};
+
+} // namespace
+
+//
+// Event
+//
+namespace
+{
+
+typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent *event);
+
+typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent *event);
+
+struct EventServiceImpl final : public EventService
+{
+
+ int wait(ANeuralNetworksEvent *event) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksEvent_wait);
+ EXECUTE_FUNCTION_RETURN(event);
+ }
+
+ void free(ANeuralNetworksEvent *event) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksEvent_free);
+ EXECUTE_FUNCTION(event);
+ }
+};
+
+} // namespace
+
+//
+// Model
+//
+namespace
+{
+
+typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel **model);
+
+typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel *model);
+
+typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel *model);
+
+typedef int (*ANeuralNetworksModel_addOperand_fn)(ANeuralNetworksModel *model,
+ const ANeuralNetworksOperandType *type);
+
+typedef int (*ANeuralNetworksModel_setOperandValue_fn)(ANeuralNetworksModel *model, int32_t index,
+ const void *buffer, size_t length);
+
+typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)(
+ ANeuralNetworksModel *model, int32_t index, const ANeuralNetworksMemory *memory, size_t offset,
+ size_t length);
+
+typedef int (*ANeuralNetworksModel_addOperation_fn)(ANeuralNetworksModel *model,
+ ANeuralNetworksOperationType type,
+ uint32_t inputCount, const uint32_t *inputs,
+ uint32_t outputCount, const uint32_t *outputs);
+
+typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(ANeuralNetworksModel *model,
+ uint32_t inputCount,
+ const uint32_t *inputs,
+ uint32_t outputCount,
+ const uint32_t *outputs);
+
+typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(ANeuralNetworksModel *model,
+ bool allow);
+
+struct ModelServiceImpl final : public ModelService
+{
+ int create(ANeuralNetworksModel **model) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksModel_create);
+ EXECUTE_FUNCTION_RETURN(model);
+ }
+
+ int addOperand(ANeuralNetworksModel *model, const ANeuralNetworksOperandType *type) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksModel_addOperand);
+ EXECUTE_FUNCTION_RETURN(model, type);
+ }
+ int setOperandValue(ANeuralNetworksModel *model, int32_t index, const void *buffer,
+ size_t length) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksModel_setOperandValue);
+ EXECUTE_FUNCTION_RETURN(model, index, buffer, length);
+ }
+
+ int setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index,
+ const ANeuralNetworksMemory *memory, size_t offset,
+ size_t length) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksModel_setOperandValueFromMemory);
+ EXECUTE_FUNCTION_RETURN(model, index, memory, offset, length);
+ }
+
+ int addOperation(ANeuralNetworksModel *model, ANeuralNetworksOperationType type,
+ uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksModel_addOperation);
+ EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount, outputs);
+ }
+
+ int identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount,
+ const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksModel_identifyInputsAndOutputs);
+ EXECUTE_FUNCTION_RETURN(model, inputCount, inputs, outputCount, outputs);
+ }
+
+ int relaxComputationFloat32toFloat16(ANeuralNetworksModel *model, bool allow) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksModel_relaxComputationFloat32toFloat16);
+ EXECUTE_FUNCTION_RETURN(model, allow);
+ }
+
+ int finish(ANeuralNetworksModel *model) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksModel_finish);
+ EXECUTE_FUNCTION_RETURN(model);
+ }
+
+ void free(ANeuralNetworksModel *model) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksModel_free);
+ EXECUTE_FUNCTION(model);
+ }
+};
+
+} // namespace
+
+//
+// Compilation
+//
+namespace
+{
+
+typedef int (*ANeuralNetworksCompilation_create_fn)(ANeuralNetworksModel *model,
+ ANeuralNetworksCompilation **compilation);
+
+typedef void (*ANeuralNetworksCompilation_free_fn)(ANeuralNetworksCompilation *compilation);
+
+typedef int (*ANeuralNetworksCompilation_setPreference_fn)(ANeuralNetworksCompilation *compilation,
+ int32_t preference);
+
+typedef int (*ANeuralNetworksCompilation_finish_fn)(ANeuralNetworksCompilation *compilation);
+
+struct CompilationServiceImpl : public CompilationService
+{
+
+ int create(ANeuralNetworksModel *model, ANeuralNetworksCompilation **compilation) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksCompilation_create);
+ EXECUTE_FUNCTION_RETURN(model, compilation);
+ }
+
+ int setPreference(ANeuralNetworksCompilation *compilation, int32_t preference) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksCompilation_setPreference);
+ EXECUTE_FUNCTION_RETURN(compilation, preference);
+ }
+
+ int finish(ANeuralNetworksCompilation *compilation) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksCompilation_finish);
+ EXECUTE_FUNCTION_RETURN(compilation);
+ }
+
+ void free(ANeuralNetworksCompilation *compilation) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksCompilation_free);
+ EXECUTE_FUNCTION(compilation);
+ }
+};
+
+} // namespace
+
+//
+// Exceution
+//
+namespace
+{
+
+typedef int (*ANeuralNetworksExecution_create_fn)(ANeuralNetworksCompilation *compilation,
+ ANeuralNetworksExecution **execution);
+
+typedef void (*ANeuralNetworksExecution_free_fn)(ANeuralNetworksExecution *execution);
+
+typedef int (*ANeuralNetworksExecution_setInput_fn)(ANeuralNetworksExecution *execution,
+ int32_t index,
+ const ANeuralNetworksOperandType *type,
+ const void *buffer, size_t length);
+
+typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)(
+ ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory, size_t offset, size_t length);
+
+typedef int (*ANeuralNetworksExecution_setOutput_fn)(ANeuralNetworksExecution *execution,
+ int32_t index,
+ const ANeuralNetworksOperandType *type,
+ void *buffer, size_t length);
+
+typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)(
+ ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory, size_t offset, size_t length);
+
+typedef int (*ANeuralNetworksExecution_startCompute_fn)(ANeuralNetworksExecution *execution,
+ ANeuralNetworksEvent **event);
+
+struct ExecutionServiceImpl : public ExecutionService
+{
+
+ int create(ANeuralNetworksCompilation *compilation,
+ ANeuralNetworksExecution **execution) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksExecution_create);
+ EXECUTE_FUNCTION_RETURN(compilation, execution);
+ }
+
+ int setInput(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type, const void *buffer,
+ size_t length) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksExecution_setInput);
+ EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length);
+ }
+
+ int setInputFromMemory(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory, size_t offset,
+ size_t length) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksExecution_setInputFromMemory);
+ EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length);
+ }
+
+ int setOutput(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type, void *buffer, size_t length) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksExecution_setOutput);
+ EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length);
+ }
+
+ int setOutputFromMemory(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory, size_t offset,
+ size_t length) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksExecution_setOutputFromMemory);
+ EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length);
+ }
+
+ int startCompute(ANeuralNetworksExecution *execution, ANeuralNetworksEvent **event) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksExecution_startCompute);
+ EXECUTE_FUNCTION_RETURN(execution, event);
+ }
+
+ void free(ANeuralNetworksExecution *execution) const override
+ {
+ LOAD_FUNCTION(ANeuralNetworksExecution_free);
+ EXECUTE_FUNCTION(execution);
+ }
+};
+
+} // namespace
+
+//
+// Runtime
+//
+namespace
+{
+
+class RuntimeImpl final : public RuntimeService
+{
+public:
+ const MemoryService *memory(void) const override { return &_memory; }
+ const EventService *event(void) const override { return &_event; }
+
+ const ModelService *model(void) const override { return &_model; }
+ const CompilationService *compilation(void) const override { return &_compilation; }
+ const ExecutionService *execution(void) const override { return &_execution; }
+
+private:
+ MemoryServiceImpl _memory;
+ EventServiceImpl _event;
+
+ ModelServiceImpl _model;
+ CompilationServiceImpl _compilation;
+ ExecutionServiceImpl _execution;
+};
+
+} // namespace
+
+namespace rua
+{
+
+const RuntimeService *DynamicBinder::get(void)
+{
+ static RuntimeImpl runtime;
+ return &runtime;
+}
+
+} // namespace rua
diff --git a/runtime/libs/rua/shim/CMakeLists.txt b/runtime/libs/rua/shim/CMakeLists.txt
new file mode 100644
index 000000000..814db5f7f
--- /dev/null
+++ b/runtime/libs/rua/shim/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_library(nnfw_lib_rua_shim INTERFACE)
+target_include_directories(nnfw_lib_rua_shim INTERFACE include)
+target_link_libraries(nnfw_lib_rua_shim INTERFACE nnfw_lib_rua_core)
+target_link_libraries(nnfw_lib_rua_shim INTERFACE nnfw_lib_rua_anchor)
diff --git a/runtime/libs/rua/shim/include/rua/Shim.h b/runtime/libs/rua/shim/include/rua/Shim.h
new file mode 100644
index 000000000..07a4bb2fd
--- /dev/null
+++ b/runtime/libs/rua/shim/include/rua/Shim.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_RUA_SHIM_H__
+#define __NNFW_RUA_SHIM_H__
+
+#include <rua/Anchor.h>
+
+//
+// Memory
+//
+inline int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
+ ANeuralNetworksMemory **memory)
+{
+ return rua::Anchor::get()->memory()->createFromFd(size, protect, fd, offset, memory);
+}
+
+inline void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory)
+{
+ return rua::Anchor::get()->memory()->free(memory);
+}
+
+//
+// Event
+//
+inline int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event)
+{
+ return rua::Anchor::get()->event()->wait(event);
+}
+
+inline void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event)
+{
+ return rua::Anchor::get()->event()->free(event);
+}
+
+//
+// Model
+//
+inline int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
+{
+ return rua::Anchor::get()->model()->create(model);
+}
+
+inline int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
+ const ANeuralNetworksOperandType *type)
+{
+ return rua::Anchor::get()->model()->addOperand(model, type);
+}
+
+inline int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
+ const void *buffer, size_t length)
+{
+ return rua::Anchor::get()->model()->setOperandValue(model, index, buffer, length);
+}
+
+inline int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model,
+ int32_t index,
+ const ANeuralNetworksMemory *memory,
+ size_t offset, size_t length)
+{
+ return rua::Anchor::get()->model()->setOperandValueFromMemory(model, index, memory, offset,
+ length);
+}
+
+inline int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
+ ANeuralNetworksOperationType type, uint32_t inputCount,
+ const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ return rua::Anchor::get()->model()->addOperation(model, type, inputCount, inputs, outputCount,
+ outputs);
+}
+
+inline int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model,
+ uint32_t inputCount,
+ const uint32_t *inputs,
+ uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ return rua::Anchor::get()->model()->identifyInputsAndOutputs(model, inputCount, inputs,
+ outputCount, outputs);
+}
+
+inline int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel *model,
+ bool allow)
+{
+ return rua::Anchor::get()->model()->relaxComputationFloat32toFloat16(model, allow);
+}
+
+inline int ANeuralNetworksModel_finish(ANeuralNetworksModel *model)
+{
+ return rua::Anchor::get()->model()->finish(model);
+}
+
+inline void ANeuralNetworksModel_free(ANeuralNetworksModel *model)
+{
+ return rua::Anchor::get()->model()->free(model);
+}
+
+//
+// Compilation
+//
+inline int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model,
+ ANeuralNetworksCompilation **compilation)
+{
+ return rua::Anchor::get()->compilation()->create(model, compilation);
+}
+
+inline int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation,
+ int32_t preference)
+{
+ return rua::Anchor::get()->compilation()->setPreference(compilation, preference);
+}
+
+inline int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation)
+{
+ return rua::Anchor::get()->compilation()->finish(compilation);
+}
+
+inline void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation *compilation)
+{
+ return rua::Anchor::get()->compilation()->free(compilation);
+}
+
+//
+// Execution
+//
+inline int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
+ ANeuralNetworksExecution **execution)
+{
+ return rua::Anchor::get()->execution()->create(compilation, execution);
+}
+
+inline int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type,
+ const void *buffer, size_t length)
+{
+ return rua::Anchor::get()->execution()->setInput(execution, index, type, buffer, length);
+}
+
+inline int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution,
+ int32_t index,
+ const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory,
+ size_t offset, size_t length)
+{
+ return rua::Anchor::get()->execution()->setInputFromMemory(execution, index, type, memory, offset,
+ length);
+}
+
+inline int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type, void *buffer,
+ size_t length)
+{
+ return rua::Anchor::get()->execution()->setOutput(execution, index, type, buffer, length);
+}
+
+inline int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution,
+ int32_t index,
+ const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory,
+ size_t offset, size_t length)
+{
+ return rua::Anchor::get()->execution()->setOutputFromMemory(execution, index, type, memory,
+ offset, length);
+}
+
+inline int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
+ ANeuralNetworksEvent **event)
+{
+ return rua::Anchor::get()->execution()->startCompute(execution, event);
+}
+
+inline void ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution)
+{
+ return rua::Anchor::get()->execution()->free(execution);
+}
+
+#endif // __NNFW_RUA_SHIM_H__