summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src')
-rw-r--r--runtimes/neurun/src/backend/BackendManager.cc88
-rw-r--r--runtimes/neurun/src/backend/BackendManager.h73
-rw-r--r--runtimes/neurun/src/backend/CMakeLists.txt2
-rw-r--r--runtimes/neurun/src/backend/IBackendConfig.h39
-rw-r--r--runtimes/neurun/src/backend/IInitializerGenerator.h46
-rw-r--r--runtimes/neurun/src/backend/IObject.h42
-rw-r--r--runtimes/neurun/src/backend/IStageGenerator.h68
-rw-r--r--runtimes/neurun/src/backend/ITensorBuilder.h57
-rw-r--r--runtimes/neurun/src/backend/acl_cl/BackendConfig.cc32
-rw-r--r--runtimes/neurun/src/backend/acl_cl/BackendConfig.h45
-rw-r--r--runtimes/neurun/src/backend/acl_cl/CMakeLists.txt17
-rw-r--r--runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc144
-rw-r--r--runtimes/neurun/src/backend/acl_cl/InitializerGenerator.h50
-rw-r--r--runtimes/neurun/src/backend/acl_cl/StageGenerator.cc538
-rw-r--r--runtimes/neurun/src/backend/acl_cl/StageGenerator.h58
-rw-r--r--runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc79
-rw-r--r--runtimes/neurun/src/backend/acl_cl/TensorBuilder.h57
-rw-r--r--runtimes/neurun/src/backend/acl_cl/feature/View.h110
-rw-r--r--runtimes/neurun/src/backend/acl_cl/kernel/View.h87
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/Object.cc42
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/Object.h60
-rw-r--r--runtimes/neurun/src/backend/cpu/BackendConfig.cc33
-rw-r--r--runtimes/neurun/src/backend/cpu/BackendConfig.h45
-rw-r--r--runtimes/neurun/src/backend/cpu/CMakeLists.txt19
-rw-r--r--runtimes/neurun/src/backend/cpu/InitializerGenerator.cc208
-rw-r--r--runtimes/neurun/src/backend/cpu/InitializerGenerator.h50
-rw-r--r--runtimes/neurun/src/backend/cpu/MemoryAllocator.cc17
-rw-r--r--runtimes/neurun/src/backend/cpu/MemoryAllocator.h123
-rw-r--r--runtimes/neurun/src/backend/cpu/StageGenerator.cc536
-rw-r--r--runtimes/neurun/src/backend/cpu/StageGenerator.h59
-rw-r--r--runtimes/neurun/src/backend/cpu/TensorBuilder.cc73
-rw-r--r--runtimes/neurun/src/backend/cpu/TensorBuilder.h57
-rw-r--r--runtimes/neurun/src/backend/cpu/operand/Object.cc36
-rw-r--r--runtimes/neurun/src/backend/cpu/operand/Object.h60
-rw-r--r--runtimes/neurun/src/backend/cpu/operand/Tensor.cc33
-rw-r--r--runtimes/neurun/src/backend/cpu/operand/Tensor.h72
-rw-r--r--runtimes/neurun/src/codegen/BackendResolver.cc27
-rw-r--r--runtimes/neurun/src/codegen/BackendResolver.h82
-rw-r--r--runtimes/neurun/src/codegen/IPlanBuilder.h43
-rw-r--r--runtimes/neurun/src/codegen/Plan.cc27
-rw-r--r--runtimes/neurun/src/codegen/Plan.h58
-rw-r--r--runtimes/neurun/src/codegen/PlanBuilder.cc75
-rw-r--r--runtimes/neurun/src/codegen/PlanBuilder.h86
-rw-r--r--runtimes/neurun/src/codegen/Planner.cc253
-rw-r--r--runtimes/neurun/src/codegen/Planner.h67
-rw-r--r--runtimes/neurun/src/codegen/operand/Context.cc35
-rw-r--r--runtimes/neurun/src/codegen/operand/Context.h64
-rw-r--r--runtimes/neurun/src/codegen/operation/Sequence.cc30
-rw-r--r--runtimes/neurun/src/codegen/operation/Sequence.h55
-rw-r--r--runtimes/neurun/src/exec/Sink.h123
-rw-r--r--runtimes/neurun/src/exec/Source.h126
-rw-r--r--runtimes/neurun/src/frontend/compilation.cc73
-rw-r--r--runtimes/neurun/src/frontend/event.cc31
-rw-r--r--runtimes/neurun/src/frontend/execution.cc235
-rw-r--r--runtimes/neurun/src/frontend/memory.cc45
-rw-r--r--runtimes/neurun/src/frontend/model.cc434
-rw-r--r--runtimes/neurun/src/frontend/wrapper/compilation.cc66
-rw-r--r--runtimes/neurun/src/frontend/wrapper/compilation.h43
-rw-r--r--runtimes/neurun/src/frontend/wrapper/event.h24
-rw-r--r--runtimes/neurun/src/frontend/wrapper/execution.h69
-rw-r--r--runtimes/neurun/src/frontend/wrapper/memory.cc31
-rw-r--r--runtimes/neurun/src/frontend/wrapper/memory.h38
-rw-r--r--runtimes/neurun/src/frontend/wrapper/model.cc40
-rw-r--r--runtimes/neurun/src/frontend/wrapper/model.h41
-rw-r--r--runtimes/neurun/src/graph/Graph.cc315
-rw-r--r--runtimes/neurun/src/graph/Graph.h129
-rw-r--r--runtimes/neurun/src/graph/Index.h75
-rw-r--r--runtimes/neurun/src/graph/dumper/Dumper.cc118
-rw-r--r--runtimes/neurun/src/graph/dumper/Dumper.h50
-rw-r--r--runtimes/neurun/src/graph/operand/Data.h78
-rw-r--r--runtimes/neurun/src/graph/operand/DataType.h43
-rw-r--r--runtimes/neurun/src/graph/operand/Index.h51
-rw-r--r--runtimes/neurun/src/graph/operand/IndexSet.cc56
-rw-r--r--runtimes/neurun/src/graph/operand/IndexSet.h61
-rw-r--r--runtimes/neurun/src/graph/operand/Layout.h54
-rw-r--r--runtimes/neurun/src/graph/operand/LayoutSet.cc69
-rw-r--r--runtimes/neurun/src/graph/operand/LayoutSet.h61
-rw-r--r--runtimes/neurun/src/graph/operand/LowerInfo.cc30
-rw-r--r--runtimes/neurun/src/graph/operand/LowerInfo.h80
-rw-r--r--runtimes/neurun/src/graph/operand/Object.cc117
-rw-r--r--runtimes/neurun/src/graph/operand/Object.h116
-rw-r--r--runtimes/neurun/src/graph/operand/Set.cc68
-rw-r--r--runtimes/neurun/src/graph/operand/Set.h60
-rw-r--r--runtimes/neurun/src/graph/operand/Shape.cc73
-rw-r--r--runtimes/neurun/src/graph/operand/Shape.h59
-rw-r--r--runtimes/neurun/src/graph/operand/Shape4DConvert.h57
-rw-r--r--runtimes/neurun/src/graph/operand/TypeInfo.cc35
-rw-r--r--runtimes/neurun/src/graph/operand/TypeInfo.h62
-rw-r--r--runtimes/neurun/src/graph/operation/AvgPool2D.cc82
-rw-r--r--runtimes/neurun/src/graph/operation/AvgPool2D.h72
-rw-r--r--runtimes/neurun/src/graph/operation/Concat.cc69
-rw-r--r--runtimes/neurun/src/graph/operation/Concat.h61
-rw-r--r--runtimes/neurun/src/graph/operation/Conv2D.cc79
-rw-r--r--runtimes/neurun/src/graph/operation/Conv2D.h69
-rw-r--r--runtimes/neurun/src/graph/operation/FullyConnected.cc69
-rw-r--r--runtimes/neurun/src/graph/operation/FullyConnected.h62
-rw-r--r--runtimes/neurun/src/graph/operation/Index.h35
-rw-r--r--runtimes/neurun/src/graph/operation/IndexList.cc40
-rw-r--r--runtimes/neurun/src/graph/operation/IndexList.h55
-rw-r--r--runtimes/neurun/src/graph/operation/LowerInfo.cc33
-rw-r--r--runtimes/neurun/src/graph/operation/LowerInfo.h45
-rw-r--r--runtimes/neurun/src/graph/operation/MaxPool2D.cc82
-rw-r--r--runtimes/neurun/src/graph/operation/MaxPool2D.h72
-rw-r--r--runtimes/neurun/src/graph/operation/NOP.cc36
-rw-r--r--runtimes/neurun/src/graph/operation/NOP.h47
-rw-r--r--runtimes/neurun/src/graph/operation/Node.cc41
-rw-r--r--runtimes/neurun/src/graph/operation/Node.h73
-rw-r--r--runtimes/neurun/src/graph/operation/NodeVisitor.h56
-rw-r--r--runtimes/neurun/src/graph/operation/Op.lst30
-rw-r--r--runtimes/neurun/src/graph/operation/Permute.cc41
-rw-r--r--runtimes/neurun/src/graph/operation/Permute.h33
-rw-r--r--runtimes/neurun/src/graph/operation/Reshape.cc67
-rw-r--r--runtimes/neurun/src/graph/operation/Reshape.h51
-rw-r--r--runtimes/neurun/src/graph/operation/Set.cc67
-rw-r--r--runtimes/neurun/src/graph/operation/Set.h62
-rw-r--r--runtimes/neurun/src/graph/operation/Softmax.cc67
-rw-r--r--runtimes/neurun/src/graph/operation/Softmax.h62
-rw-r--r--runtimes/neurun/src/graph/verifier/IVerifier.cc72
-rw-r--r--runtimes/neurun/src/graph/verifier/IVerifier.h62
-rw-r--r--runtimes/neurun/src/internal/Convert.cc59
-rw-r--r--runtimes/neurun/src/internal/Convert.h40
-rw-r--r--runtimes/neurun/src/internal/Padding.cc72
-rw-r--r--runtimes/neurun/src/internal/Padding.h48
-rw-r--r--runtimes/neurun/src/internal/nnapi/feature/Reader.h75
-rw-r--r--runtimes/neurun/src/internal/nnapi/feature/Utils.h60
-rw-r--r--runtimes/neurun/src/internal/nnapi/feature/View.h92
-rw-r--r--runtimes/neurun/src/internal/nnapi/kernel/Reader.h70
-rw-r--r--runtimes/neurun/src/internal/nnapi/kernel/View.h88
-rw-r--r--runtimes/neurun/src/kernel/CMakeLists.txt2
-rw-r--r--runtimes/neurun/src/kernel/acl_cl/CMakeLists.txt15
-rw-r--r--runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc158
-rw-r--r--runtimes/neurun/src/kernel/acl_cl/ConcatLayer.h67
-rw-r--r--runtimes/neurun/src/kernel/acl_cl/TensorConvertFromCommonLayer.cc94
-rw-r--r--runtimes/neurun/src/kernel/acl_cl/TensorConvertFromCommonLayer.h67
-rw-r--r--runtimes/neurun/src/kernel/acl_cl/TensorConvertToCommonLayer.cc94
-rw-r--r--runtimes/neurun/src/kernel/acl_cl/TensorConvertToCommonLayer.h67
-rw-r--r--runtimes/neurun/src/kernel/cpu/AvgPoolLayer.cc118
-rw-r--r--runtimes/neurun/src/kernel/cpu/AvgPoolLayer.h78
-rw-r--r--runtimes/neurun/src/kernel/cpu/CMakeLists.txt14
-rw-r--r--runtimes/neurun/src/kernel/cpu/ConcatLayer.cc109
-rw-r--r--runtimes/neurun/src/kernel/cpu/ConcatLayer.h66
-rw-r--r--runtimes/neurun/src/kernel/cpu/ConvolutionLayer.cc202
-rw-r--r--runtimes/neurun/src/kernel/cpu/ConvolutionLayer.h79
-rw-r--r--runtimes/neurun/src/kernel/cpu/FullyConnectedLayer.cc139
-rw-r--r--runtimes/neurun/src/kernel/cpu/FullyConnectedLayer.h69
-rw-r--r--runtimes/neurun/src/kernel/cpu/MaxPoolLayer.cc118
-rw-r--r--runtimes/neurun/src/kernel/cpu/MaxPoolLayer.h78
-rw-r--r--runtimes/neurun/src/kernel/cpu/OperationUtils.cc230
-rw-r--r--runtimes/neurun/src/kernel/cpu/OperationUtils.h103
-rw-r--r--runtimes/neurun/src/kernel/cpu/ReshapeLayer.cc57
-rw-r--r--runtimes/neurun/src/kernel/cpu/ReshapeLayer.h58
-rw-r--r--runtimes/neurun/src/kernel/cpu/SoftMaxLayer.cc128
-rw-r--r--runtimes/neurun/src/kernel/cpu/SoftMaxLayer.h64
-rw-r--r--runtimes/neurun/src/kernel/cpu/TensorConvertFromCommonLayer.cc90
-rw-r--r--runtimes/neurun/src/kernel/cpu/TensorConvertFromCommonLayer.h67
-rw-r--r--runtimes/neurun/src/kernel/cpu/TensorConvertToCommonLayer.cc90
-rw-r--r--runtimes/neurun/src/kernel/cpu/TensorConvertToCommonLayer.h67
-rw-r--r--runtimes/neurun/src/library_info.cc17
-rw-r--r--runtimes/neurun/src/linear/Linear.cc73
-rw-r--r--runtimes/neurun/src/linear/Linear.h71
-rw-r--r--runtimes/neurun/src/logging.h53
161 files changed, 12591 insertions, 0 deletions
diff --git a/runtimes/neurun/src/backend/BackendManager.cc b/runtimes/neurun/src/backend/BackendManager.cc
new file mode 100644
index 000000000..fb7d69108
--- /dev/null
+++ b/runtimes/neurun/src/backend/BackendManager.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BackendManager.h"
+
+#include "backend/acl_cl/BackendConfig.h"
+#include "backend/acl_cl/TensorBuilder.h"
+#include "backend/acl_cl/InitializerGenerator.h"
+#include "backend/acl_cl/StageGenerator.h"
+#include "backend/cpu/BackendConfig.h"
+#include "backend/cpu/TensorBuilder.h"
+#include "backend/cpu/InitializerGenerator.h"
+#include "backend/cpu/StageGenerator.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+Backend::Backend(const std::shared_ptr<neurun::backend::IBackendConfig> &backend_config,
+ const std::shared_ptr<neurun::backend::IInitializerGenerator> &initializer_gen,
+ const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen)
+ : _config(backend_config), _initializer_gen(initializer_gen), _stage_gen(stage_gen)
+{
+ backend_config->initialize();
+}
+
+const std::shared_ptr<neurun::backend::IBackendConfig> Backend::config() const { return _config; }
+
+const std::shared_ptr<neurun::backend::IInitializerGenerator> Backend::initializer_gen() const
+{
+ return _initializer_gen;
+}
+
+const std::shared_ptr<neurun::backend::IStageGenerator> Backend::stage_gen() const
+{
+ return _stage_gen;
+}
+
+const std::shared_ptr<neurun::backend::ITensorBuilder> Backend::tensor_builder() const
+{
+ return _stage_gen->tensor_builder();
+}
+
+BackendManager::BackendManager(const neurun::graph::operand::Set &operands)
+{
+ // Add arm_compute backend
+ {
+ using namespace ::neurun::backend::acl_cl;
+ auto acl_backend_initializer = std::make_shared<BackendConfig>();
+ auto acl_tensor_builder = std::make_shared<TensorBuilder>();
+ auto acl_initializer_gen = std::make_shared<InitializerGenerator>(operands);
+ auto acl_stage_gen = std::make_shared<StageGenerator>(operands, acl_tensor_builder);
+
+ // TODO Do not use magic string for backend id
+ _gen_map["acl_cl"] = {acl_backend_initializer, acl_initializer_gen, acl_stage_gen};
+ }
+
+ // Add CPU backend
+ {
+ using namespace ::neurun::backend::cpu;
+ auto cpu_backend_initializer = std::make_shared<BackendConfig>();
+ auto cpu_tensor_builder = std::make_shared<TensorBuilder>();
+ auto cpu_initializer_gen = std::make_shared<InitializerGenerator>(operands);
+ auto cpu_stage_gen = std::make_shared<StageGenerator>(operands, cpu_tensor_builder);
+
+ // TODO Do not use magic string for backend id
+ _gen_map["cpu"] = {cpu_backend_initializer, cpu_initializer_gen, cpu_stage_gen};
+ }
+}
+
+Backend BackendManager::get(const std::string &key) { return _gen_map.at(key); }
+
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/BackendManager.h b/runtimes/neurun/src/backend/BackendManager.h
new file mode 100644
index 000000000..6f862ffe6
--- /dev/null
+++ b/runtimes/neurun/src/backend/BackendManager.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_BACKEND_MANAGER_H__
+#define __NEURUN_BACKEND_BACKEND_MANAGER_H__
+
+#include <memory>
+#include <map>
+
+#include "graph/operand/Set.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+struct IBackendConfig;
+struct IInitializerGenerator;
+struct IStageGenerator;
+struct ITensorBuilder;
+
+class Backend
+{
+public:
+ Backend(const std::shared_ptr<neurun::backend::IBackendConfig> &backend_config,
+ const std::shared_ptr<neurun::backend::IInitializerGenerator> &initializer_gen,
+ const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen);
+
+ Backend(void) : _config(nullptr), _initializer_gen(nullptr), _stage_gen(nullptr)
+ {
+ // DO NOTHING
+ }
+
+public:
+ const std::shared_ptr<neurun::backend::IBackendConfig> config() const;
+ const std::shared_ptr<neurun::backend::IInitializerGenerator> initializer_gen() const;
+ const std::shared_ptr<neurun::backend::IStageGenerator> stage_gen() const;
+ const std::shared_ptr<neurun::backend::ITensorBuilder> tensor_builder() const;
+
+private:
+ std::shared_ptr<neurun::backend::IBackendConfig> _config;
+ std::shared_ptr<neurun::backend::IInitializerGenerator> _initializer_gen;
+ std::shared_ptr<neurun::backend::IStageGenerator> _stage_gen;
+};
+
+class BackendManager
+{
+public:
+ BackendManager(const neurun::graph::operand::Set &operands);
+
+ Backend get(const std::string &key);
+
+private:
+ std::map<std::string, Backend> _gen_map;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_BACKEND_MANAGER_H__
diff --git a/runtimes/neurun/src/backend/CMakeLists.txt b/runtimes/neurun/src/backend/CMakeLists.txt
new file mode 100644
index 000000000..a39823102
--- /dev/null
+++ b/runtimes/neurun/src/backend/CMakeLists.txt
@@ -0,0 +1,2 @@
+add_subdirectory(cpu)
+add_subdirectory(acl_cl)
diff --git a/runtimes/neurun/src/backend/IBackendConfig.h b/runtimes/neurun/src/backend/IBackendConfig.h
new file mode 100644
index 000000000..a6c7ce517
--- /dev/null
+++ b/runtimes/neurun/src/backend/IBackendConfig.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_IBACKEND_CONFIG_H__
+#define __INTERNAL_IBACKEND_CONFIG_H__
+
+#include "graph/operand/Layout.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+struct IBackendConfig
+{
+ virtual ~IBackendConfig() = default;
+
+ virtual void initialize() = 0;
+ // NOTE Assume backend has only one type of operand layout
+ virtual graph::operand::Layout getOperandLayout() = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __INTERNAL_IBACKEND_CONFIG_H__
diff --git a/runtimes/neurun/src/backend/IInitializerGenerator.h b/runtimes/neurun/src/backend/IInitializerGenerator.h
new file mode 100644
index 000000000..83cf87a52
--- /dev/null
+++ b/runtimes/neurun/src/backend/IInitializerGenerator.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_IINITIALIZER_GENERATOR_H__
+#define __INTERNAL_IINITIALIZER_GENERATOR_H__
+
+#include "arm_compute/core/ITensor.h"
+
+#include "graph/operation/Conv2D.h"
+#include "graph/operation/FullyConnected.h"
+
+using Initializer = std::function<void(::arm_compute::ITensor &)>;
+
+namespace neurun
+{
+namespace backend
+{
+
+struct IInitializerGenerator
+{
+ virtual ~IInitializerGenerator() = default;
+
+ virtual Initializer generateWeight(const graph::operation::Conv2D::Implicit::Node &node) = 0;
+ virtual Initializer generateWeight(const graph::operation::FullyConnected::Node &node) = 0;
+
+ virtual Initializer generateBias(const graph::operation::Conv2D::Implicit::Node &node) = 0;
+ virtual Initializer generateBias(const graph::operation::FullyConnected::Node &node) = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __INTERNAL_IINITIALIZER_GENERATOR_H__
diff --git a/runtimes/neurun/src/backend/IObject.h b/runtimes/neurun/src/backend/IObject.h
new file mode 100644
index 000000000..f7d511095
--- /dev/null
+++ b/runtimes/neurun/src/backend/IObject.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
+#define __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
+
+#include <functional>
+
+#include <arm_compute/core/ITensor.h>
+
+namespace neurun
+{
+namespace backend
+{
+namespace operand
+{
+
+struct IObject
+{
+ virtual ~IObject() = default;
+ virtual ::arm_compute::ITensor *ptr(void) const = 0;
+ virtual void access(const std::function<void(::arm_compute::ITensor &tensor)> &fn) const = 0;
+};
+
+} // namespace operand
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
diff --git a/runtimes/neurun/src/backend/IStageGenerator.h b/runtimes/neurun/src/backend/IStageGenerator.h
new file mode 100644
index 000000000..05959e2b1
--- /dev/null
+++ b/runtimes/neurun/src/backend/IStageGenerator.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_ISTAGE_GENERATOR_H__
+#define __INTERNAL_ISTAGE_GENERATOR_H__
+
+#include <memory>
+#include <functional>
+
+#include <arm_compute/runtime/IFunction.h>
+
+#include "backend/ITensorBuilder.h"
+#include "graph/operation/Conv2D.h"
+#include "graph/operation/MaxPool2D.h"
+#include "graph/operation/AvgPool2D.h"
+#include "graph/operation/Concat.h"
+#include "graph/operation/FullyConnected.h"
+#include "graph/operation/Reshape.h"
+#include "graph/operation/Softmax.h"
+#include "graph/operation/NOP.h"
+
+struct IExecutionBuilder
+{
+ virtual ~IExecutionBuilder() = default;
+
+ virtual void append(std::unique_ptr<::arm_compute::IFunction> &&f) = 0;
+};
+
+using Stage = std::function<void(IExecutionBuilder &)>;
+
+namespace neurun
+{
+namespace backend
+{
+
+struct IStageGenerator
+{
+ virtual ~IStageGenerator() = default;
+
+ virtual std::shared_ptr<ITensorBuilder> tensor_builder() = 0;
+
+ virtual Stage generate(const graph::operation::Conv2D::Implicit::Node &node) = 0;
+ virtual Stage generate(const graph::operation::MaxPool2D::Implicit::Node &node) = 0;
+ virtual Stage generate(const graph::operation::AvgPool2D::Implicit::Node &node) = 0;
+ virtual Stage generate(const graph::operation::Concat::Node &node) = 0;
+ virtual Stage generate(const graph::operation::FullyConnected::Node &node) = 0;
+ virtual Stage generate(const graph::operation::Reshape::Node &node) = 0;
+ virtual Stage generate(const graph::operation::Softmax::Node &node) = 0;
+ virtual Stage generate(const graph::operation::NOP::Node &node) = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __INTERNAL_ISTAGE_GENERATOR_H__
diff --git a/runtimes/neurun/src/backend/ITensorBuilder.h b/runtimes/neurun/src/backend/ITensorBuilder.h
new file mode 100644
index 000000000..c3a07ffeb
--- /dev/null
+++ b/runtimes/neurun/src/backend/ITensorBuilder.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_ITENSOR_BUILDER_H__
+#define __INTERNAL_ITENSOR_BUILDER_H__
+
+#include <map>
+#include <arm_compute/core/TensorInfo.h>
+
+#include "graph/operand/Index.h"
+#include "codegen/Plan.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+struct ITensorBuilder
+{
+ virtual ~ITensorBuilder(void) = default;
+ virtual void mark(const ::neurun::graph::operand::Index &ind) = 0;
+ // TODO Add an interface for adding subsumption info
+ virtual void prepare(codegen::Plan &plan,
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) = 0;
+ virtual void allocate(void) = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#include <set>
+#include <memory>
+
+namespace neurun
+{
+namespace backend
+{
+
+using TensorBuilderSet = std::set<std::shared_ptr<backend::ITensorBuilder>>;
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __INTERNAL_ITENSOR_BUILDER_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/BackendConfig.cc b/runtimes/neurun/src/backend/acl_cl/BackendConfig.cc
new file mode 100644
index 000000000..6b3e6b3a3
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/BackendConfig.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <arm_compute/runtime/CL/CLScheduler.h>
+
+#include "backend/acl_cl/BackendConfig.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+
+void BackendConfig::initialize() { arm_compute::CLScheduler::get().default_init(); }
+
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/BackendConfig.h b/runtimes/neurun/src/backend/acl_cl/BackendConfig.h
new file mode 100644
index 000000000..8eec9e795
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/BackendConfig.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_CL_BACKEND_CONFIG_H__
+#define __NEURUN_BACKEND_ACL_CL_BACKEND_CONFIG_H__
+
+#include "backend/IBackendConfig.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+
+class BackendConfig : public IBackendConfig
+{
+public:
+ BackendConfig()
+ {
+ // DO NOTHING
+ }
+
+ virtual void initialize() override;
+ virtual graph::operand::Layout getOperandLayout() { return graph::operand::Layout::NCHW; }
+};
+
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_CL_BACKEND_CONFIG_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/CMakeLists.txt b/runtimes/neurun/src/backend/acl_cl/CMakeLists.txt
new file mode 100644
index 000000000..d64c23a80
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/CMakeLists.txt
@@ -0,0 +1,17 @@
+file(GLOB_RECURSE SOURCES "*.cc")
+
+add_library(${LIB_NEURUN_BACKEND_ACL_CL} STATIC ${SOURCES})
+
+target_include_directories(${LIB_NEURUN_BACKEND_ACL_CL} PUBLIC ${NNFW_INCLUDE_DIR})
+target_include_directories(${LIB_NEURUN_BACKEND_ACL_CL} PUBLIC ${NEURUN_INCLUDE_DIR})
+target_include_directories(${LIB_NEURUN_BACKEND_ACL_CL} PUBLIC ${CMAKE_SOURCE_DIR}/externals/tensorflow) # TODO Remove this file. We should not need this.
+
+target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} arm_compute)
+target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} nnfw_support_nnapi)
+target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} ${LIB_NEURUN_KERNEL_ACL_CL})
+
+target_compile_options(${LIB_NEURUN_BACKEND_ACL_CL} PRIVATE -Wall -Wextra -Werror)
+
+set_target_properties(${LIB_NEURUN_BACKEND_ACL_CL} PROPERTIES POSITION_INDEPENDENT_CODE ON)
+set_target_properties(${LIB_NEURUN_BACKEND_ACL_CL} PROPERTIES OUTPUT_NAME backend_acl_cl)
+install(TARGETS ${LIB_NEURUN_BACKEND_ACL_CL} DESTINATION lib/neurun)
diff --git a/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc b/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc
new file mode 100644
index 000000000..9a681b3de
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "backend/acl_cl/InitializerGenerator.h"
+
+#include <arm_compute/core/Coordinates.h>
+
+#include "backend/acl_cl/kernel/View.h"
+#include "internal/nnapi/kernel/Reader.h"
+#include "util/kernel/IndexIterator.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+
+InitializerGenerator::InitializerGenerator(const neurun::graph::operand::Set &ctx) : _ctx(ctx)
+{
+ // DO NOTHING
+}
+
+Initializer
+InitializerGenerator::generateWeight(const graph::operation::Conv2D::Implicit::Node &node)
+{
+ const ::neurun::graph::operand::Index ker_index{node.getInputs().at(1)};
+
+ const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
+ auto ker_base = _ctx.at(ker_index).data().base();
+ auto ker_size = _ctx.at(ker_index).data().size();
+
+ return [ker_shape, ker_base, ker_size](::arm_compute::ITensor &tensor) {
+ const ::internal::nnapi::kernel::Reader<float> from{ker_shape, ker_base, ker_size};
+ ::internal::arm_compute::kernel::View<float> into{&tensor};
+
+ ::nnfw::util::kernel::iterate(ker_shape)
+ << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(nth, ch, row, col);
+ into.at(nth, ch, row, col) = value;
+ };
+ };
+}
+
+Initializer InitializerGenerator::generateWeight(const graph::operation::FullyConnected::Node &node)
+{
+ const ::neurun::graph::operand::Index weight_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+
+ const auto num_output = _ctx.at(weight_index).shape().dim(0);
+ auto weight_base = _ctx.at(weight_index).data().base();
+ auto weight_size = _ctx.at(weight_index).data().size();
+
+ // NOTE We assume that input is a feature map
+ // TODO Remove this restriction!
+ const auto ifm_shape = _ctx.at(input_index).shape().asFeature();
+
+ return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) {
+ const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H, ifm_shape.W};
+ const ::internal::nnapi::kernel::Reader<float> from{ker_shape, weight_base, weight_size};
+
+ ::nnfw::util::kernel::iterate(ker_shape)
+ << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(nth, ch, row, col);
+
+ uint32_t offset = 0;
+
+ // ARM Compute Library uses 'NCHW' ordering
+ offset += nth * ifm_shape.C * ifm_shape.H * ifm_shape.W;
+ offset += ch * ifm_shape.H * ifm_shape.W;
+ offset += row * ifm_shape.W;
+ offset += col;
+
+ const ::arm_compute::Coordinates coordinate{offset};
+
+ auto into = reinterpret_cast<float *>(tensor.ptr_to_element(coordinate));
+
+ *into = value;
+ };
+ };
+}
+
+Initializer InitializerGenerator::generateBias(const graph::operation::Conv2D::Implicit::Node &node)
+{
+ // TODO Refactor so we can reuse the common code
+
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
+
+ auto bias_base = _ctx.at(bias_index).data().base();
+ const auto bias_size = _ctx.at(bias_index).shape().asVector();
+
+ return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
+ for (int32_t n = 0; n < bias_size; ++n)
+ {
+ const ::arm_compute::Coordinates coordinate{n};
+
+ float *into = reinterpret_cast<float *>(tensor.ptr_to_element(coordinate));
+
+ const float *from = reinterpret_cast<const float *>(bias_base) + n;
+ const auto value = *from;
+
+ *into = value;
+ }
+ };
+}
+
+Initializer InitializerGenerator::generateBias(const graph::operation::FullyConnected::Node &node)
+{
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
+
+ auto bias_base = _ctx.at(bias_index).data().base();
+ const auto bias_size = _ctx.at(bias_index).shape().asVector();
+
+ return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
+ for (int32_t n = 0; n < bias_size; ++n)
+ {
+ const ::arm_compute::Coordinates coordinate{n};
+
+ float *into = reinterpret_cast<float *>(tensor.ptr_to_element(coordinate));
+
+ const float *from = reinterpret_cast<const float *>(bias_base) + n;
+ const auto value = *from;
+
+ *into = value;
+ }
+ };
+}
+
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.h b/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.h
new file mode 100644
index 000000000..78b7efb5e
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__
+#define __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__
+
+#include "backend/IInitializerGenerator.h"
+
+#include "graph/operand/Set.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+
+class InitializerGenerator : public IInitializerGenerator
+{
+public:
+ InitializerGenerator(const neurun::graph::operand::Set &ctx);
+
+ Initializer generateWeight(const graph::operation::Conv2D::Implicit::Node &node) override;
+ Initializer generateWeight(const graph::operation::FullyConnected::Node &node) override;
+
+ Initializer generateBias(const graph::operation::Conv2D::Implicit::Node &node) override;
+ Initializer generateBias(const graph::operation::FullyConnected::Node &node) override;
+
+private:
+ const neurun::graph::operand::Set &_ctx;
+};
+
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
new file mode 100644
index 000000000..c63698fd8
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
@@ -0,0 +1,538 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "backend/acl_cl/StageGenerator.h"
+
+#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
+#include <arm_compute/runtime/CL/functions/CLPoolingLayer.h>
+#include <arm_compute/runtime/CL/functions/CLActivationLayer.h>
+#include <arm_compute/runtime/CL/functions/CLReshapeLayer.h>
+#include <arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h>
+#include <arm_compute/runtime/CL/functions/CLSoftmaxLayer.h>
+
+#include "kernel/acl_cl/ConcatLayer.h"
+
+#include "internal/Padding.h"
+
+#include "graph/operand/Index.h"
+
+#include "logging.h"
+
+#include "NeuralNetworks.h"
+
+#include "support/nnapi/Utils.h"
+
+template <typename T> std::unique_ptr<T> make_layer(void) { return std::unique_ptr<T>{new T}; }
+
+::arm_compute::PadStrideInfo asPadStringInfo(const ::internal::Padding &padding,
+ const ::internal::Stride &stride)
+{
+ return ::arm_compute::PadStrideInfo{stride.horizontal,
+ stride.vertical,
+ padding.left,
+ padding.right,
+ padding.top,
+ padding.bottom,
+ ::arm_compute::DimensionRoundingType::FLOOR};
+}
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+
+//
+// ActivationBuilder
+//
+class ActivationBuilder
+{
+public:
+ ActivationBuilder(IExecutionBuilder &builder) : _builder(builder)
+ {
+ // DO NOTHING
+ }
+
+private:
+ void appendReLU(::arm_compute::ICLTensor *tensor);
+
+public:
+ void append(FuseCode code, ::arm_compute::ICLTensor *tensor);
+
+private:
+ IExecutionBuilder &_builder;
+};
+
+void ActivationBuilder::appendReLU(::arm_compute::ICLTensor *ifm_alloc)
+{
+ const ::arm_compute::ActivationLayerInfo act_info{
+ ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
+
+ auto fn = make_layer<::arm_compute::CLActivationLayer>();
+
+ fn->configure(ifm_alloc, nullptr, act_info);
+
+ _builder.append(std::move(fn));
+}
+
+void ActivationBuilder::append(FuseCode code, ::arm_compute::ICLTensor *ifm_alloc)
+{
+ switch (code)
+ {
+ case ANEURALNETWORKS_FUSED_NONE:
+ {
+ // DO NOTHING
+ break;
+ }
+ case ANEURALNETWORKS_FUSED_RELU:
+ {
+ appendReLU(ifm_alloc);
+ break;
+ }
+ default:
+ {
+ throw std::runtime_error("Not supported, yet");
+ }
+ }
+}
+
+//
+// StageGenerator
+//
+StageGenerator::StageGenerator(const neurun::graph::operand::Set &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder)
+ : _ctx(ctx), _tensor_builder(tensor_builder)
+{
+ // DO NOTHING
+}
+
+Stage StageGenerator::generate(const graph::operation::Conv2D::Implicit::Node &node)
+{
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index ker_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
+
+ const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index};
+ const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index};
+
+ const ::neurun::graph::operand::Index padding_index{node.param().padding_index};
+ const ::neurun::graph::operand::Index activation_index{node.param().activation_index};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+ const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
+
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+
+ assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+ (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
+ ::internal::Stride stride;
+
+ stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
+ stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
+
+ // Construct operation parameters
+ struct Param
+ {
+ int ofm_index;
+ int ifm_index;
+ int ker_index;
+ int bias_index;
+
+ ::internal::Padding padding;
+ ::internal::Stride stride;
+
+ FuseCode activation;
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index.asInt();
+ param.ifm_index = ifm_index.asInt();
+ param.ker_index = ker_index.asInt();
+ param.bias_index = bias_index.asInt();
+
+ param.stride = stride;
+ param.padding =
+ (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? ::internal::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
+ : ::internal::valid_padding();
+
+ param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}).get();
+ auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}).get();
+ auto ker_alloc = tensors->at(::neurun::graph::operand::Index{param.ker_index}).get();
+ auto bias_alloc = tensors->at(::neurun::graph::operand::Index{param.bias_index}).get();
+
+ const auto conv_info = asPadStringInfo(param.padding, param.stride);
+
+ std::unique_ptr<::arm_compute::CLConvolutionLayer> fn{new ::arm_compute::CLConvolutionLayer};
+
+ fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info);
+
+ builder.append(std::move(fn));
+
+ ActivationBuilder{builder}.append(param.activation, ofm_alloc);
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::MaxPool2D::Implicit::Node &node)
+{
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
+
+ const ::neurun::graph::operand::Index kh_index{node.param().kh_index};
+ const ::neurun::graph::operand::Index kw_index{node.param().kw_index};
+
+ const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index};
+ const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index};
+
+ const ::neurun::graph::operand::Index padding_index{node.param().padding_index};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+ const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
+ const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
+
+ const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
+ const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
+
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+
+ // Construct operation parameters
+ struct Param
+ {
+ int ofm_index;
+ int ifm_index;
+
+ uint32_t kw;
+ uint32_t kh;
+
+ ::internal::Padding padding;
+ ::internal::Stride stride;
+
+ // TODO Add 'activation' field
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index.asInt();
+ param.ifm_index = ifm_index.asInt();
+
+ param.kh = kh;
+ param.kw = kw;
+
+ param.stride.vertical = vstride;
+ param.stride.horizontal = hstride;
+
+ param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? ::internal::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+ : ::internal::valid_padding();
+
+ VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
+ VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
+ VERBOSE(MaxPool2D) << "OFM_H: " << ofm_shape.H << std::endl;
+ VERBOSE(MaxPool2D) << "OFM_W: " << ofm_shape.W << std::endl;
+ VERBOSE(MaxPool2D) << "KER_H: " << kh << std::endl;
+ VERBOSE(MaxPool2D) << "KER_W: " << kw << std::endl;
+ VERBOSE(MaxPool2D) << "STRIDE_H: " << vstride << std::endl;
+ VERBOSE(MaxPool2D) << "STRIDE_W: " << hstride << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(T): " << param.padding.top << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(B): " << param.padding.bottom << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(L): " << param.padding.left << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(R): " << param.padding.right << std::endl;
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}).get();
+ auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}).get();
+
+ ::arm_compute::PoolingLayerInfo info{::arm_compute::PoolingType::MAX,
+ ::arm_compute::Size2D{param.kw, param.kh},
+ asPadStringInfo(param.padding, param.stride)};
+
+ std::unique_ptr<::arm_compute::CLPoolingLayer> fn{new ::arm_compute::CLPoolingLayer};
+
+ fn->configure(ifm_alloc, ofm_alloc, info);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::AvgPool2D::Implicit::Node &node)
+{
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
+
+ const ::neurun::graph::operand::Index kh_index{node.param().kh_index};
+ const ::neurun::graph::operand::Index kw_index{node.param().kw_index};
+
+ const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index};
+ const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index};
+
+ const ::neurun::graph::operand::Index padding_index{node.param().padding_index};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+ const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
+ const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
+
+ const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
+ const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
+
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+
+ assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+ (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
+ // Construct operation parameters
+ struct Param
+ {
+ int ofm_index;
+ int ifm_index;
+
+ uint32_t kw;
+ uint32_t kh;
+
+ ::internal::Padding padding;
+ ::internal::Stride stride;
+
+ // TODO Add 'activation' field
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index.asInt();
+ param.ifm_index = ifm_index.asInt();
+
+ param.kh = kh;
+ param.kw = kw;
+
+ param.stride.vertical = vstride;
+ param.stride.horizontal = hstride;
+
+ param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? ::internal::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+ : ::internal::valid_padding();
+
+ VERBOSE(AvgPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
+ VERBOSE(AvgPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
+ VERBOSE(AvgPool2D) << "OFM_H: " << ofm_shape.H << std::endl;
+ VERBOSE(AvgPool2D) << "OFM_W: " << ofm_shape.W << std::endl;
+ VERBOSE(AvgPool2D) << "KER_H: " << kh << std::endl;
+ VERBOSE(AvgPool2D) << "KER_W: " << kw << std::endl;
+ VERBOSE(AvgPool2D) << "STRIDE_H: " << vstride << std::endl;
+ VERBOSE(AvgPool2D) << "STRIDE_W: " << hstride << std::endl;
+ VERBOSE(AvgPool2D) << "PAD: " << ::nnfw::support::nnapi::to_string(padding_type) << std::endl;
+ VERBOSE(AvgPool2D) << "PAD(T): " << param.padding.top << std::endl;
+ VERBOSE(AvgPool2D) << "PAD(B): " << param.padding.bottom << std::endl;
+ VERBOSE(AvgPool2D) << "PAD(L): " << param.padding.left << std::endl;
+ VERBOSE(AvgPool2D) << "PAD(R): " << param.padding.right << std::endl;
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}).get();
+ auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}).get();
+
+ ::arm_compute::PoolingLayerInfo info{
+ ::arm_compute::PoolingType::AVG, ::arm_compute::Size2D{param.kw, param.kh},
+ asPadStringInfo(param.padding, param.stride), true /* exclude_padding */};
+
+ std::unique_ptr<::arm_compute::CLPoolingLayer> fn{new ::arm_compute::CLPoolingLayer};
+
+ fn->configure(ifm_alloc, ofm_alloc, info);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::Concat::Node &node)
+{
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index axis_index{node.param().axis_index};
+
+ struct Param
+ {
+ int32_t output_index;
+ std::vector<int32_t> input_indexes;
+
+ int32_t axis;
+ };
+
+ Param param;
+
+ param.output_index = ofm_index.asInt();
+ for (const auto &e : node.getInputs())
+ {
+ param.input_indexes.emplace_back(e.asInt());
+ }
+ param.axis = _ctx.at(axis_index).asScalar<int32_t>();
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get();
+
+ std::vector<::arm_compute::ICLTensor *> input_allocs;
+ for (auto ifm_ind : param.input_indexes)
+ {
+ input_allocs.emplace_back(tensors->at(::neurun::graph::operand::Index{ifm_ind}).get());
+ }
+
+ std::unique_ptr<::neurun::kernel::acl_cl::ConcatLayer> fn{
+ new ::neurun::kernel::acl_cl::ConcatLayer};
+
+ fn->configure(input_allocs, param.axis, output_alloc);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::FullyConnected::Node &node)
+{
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index weight_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
+ const ::neurun::graph::operand::Index activation_index{node.param().activation_index};
+
+ // Construct operation parameters
+ struct Param
+ {
+ int output_index;
+
+ int input_index;
+ int weight_index;
+ int bias_index;
+
+ FuseCode activation;
+ };
+
+ Param param;
+
+ param.output_index = output_index.asInt();
+ param.input_index = input_index.asInt();
+ param.weight_index = weight_index.asInt();
+ param.bias_index = bias_index.asInt();
+
+ param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get();
+ auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get();
+ auto weight_alloc = tensors->at(::neurun::graph::operand::Index{param.weight_index}).get();
+ auto bias_alloc = tensors->at(::neurun::graph::operand::Index{param.bias_index}).get();
+
+ auto fn = make_layer<::arm_compute::CLFullyConnectedLayer>();
+
+ fn->configure(input_alloc, weight_alloc, bias_alloc, output_alloc);
+
+ builder.append(std::move(fn));
+
+ ActivationBuilder{builder}.append(param.activation, output_alloc);
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::Reshape::Node &node)
+{
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+
+ struct Param
+ {
+ int output_index;
+ int input_index;
+ };
+
+ Param param;
+
+ param.output_index = output_index.asInt();
+ param.input_index = input_index.asInt();
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get();
+ auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get();
+
+ auto fn = make_layer<::arm_compute::CLReshapeLayer>();
+
+ fn->configure(input_alloc, output_alloc);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::Softmax::Node &node)
+{
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index scale_index{node.param().scale_index};
+
+ assert(_ctx.at(scale_index).shape().rank() == 0);
+
+ struct Param
+ {
+ int output_index;
+ int input_index;
+ float scale;
+ };
+
+ Param param;
+
+ param.output_index = output_index.asInt();
+ param.input_index = input_index.asInt();
+ param.scale = _ctx.at(scale_index).asScalar<float>();
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get();
+ auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get();
+
+ auto fn = make_layer<::arm_compute::CLSoftmaxLayer>();
+
+ fn->configure(input_alloc, output_alloc, param.scale);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::NOP::Node & /* node */)
+{
+ // DO NOTHING
+ return nullptr;
+}
+
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.h b/runtimes/neurun/src/backend/acl_cl/StageGenerator.h
new file mode 100644
index 000000000..921604649
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
+#define __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
+
+#include "backend/IStageGenerator.h"
+
+#include "graph/operand/Set.h"
+#include "backend/acl_cl/TensorBuilder.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+
+class StageGenerator : public IStageGenerator
+{
+public:
+ StageGenerator(const neurun::graph::operand::Set &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder);
+
+ virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
+
+ virtual Stage generate(const graph::operation::Conv2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::MaxPool2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::AvgPool2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::Concat::Node &node) override;
+ virtual Stage generate(const graph::operation::FullyConnected::Node &node) override;
+ virtual Stage generate(const graph::operation::Reshape::Node &node) override;
+ virtual Stage generate(const graph::operation::Softmax::Node &node) override;
+ virtual Stage generate(const graph::operation::NOP::Node &node) override;
+
+private:
+ const neurun::graph::operand::Set &_ctx;
+ std::shared_ptr<TensorBuilder> _tensor_builder;
+};
+
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
new file mode 100644
index 000000000..05943c26a
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "backend/acl_cl/TensorBuilder.h"
+
+#include <cassert>
+
+#include "operand/Object.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+
+TensorBuilder::TensorBuilder()
+{
+ // DO NOTHING
+}
+
+void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind)
+{
+ assert(_tensors.size() == 0);
+
+ _inds.insert(ind);
+}
+
+void TensorBuilder::prepare(codegen::Plan &plan,
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
+{
+ assert(_tensors.size() == 0);
+
+ // TODO Handle SubTensor(subsumption)
+ // Currently this TensorBuilder does not have subsumption info yet
+
+ for (auto ind_int : _inds)
+ {
+ ::neurun::graph::operand::Index ind{ind_int};
+ auto tensor = std::make_shared<::arm_compute::CLTensor>();
+ tensor->allocator()->init(tensor_info_ctx.at(ind.asInt()));
+ plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
+ _tensors[ind] = tensor;
+ }
+}
+
+void TensorBuilder::allocate(void)
+{
+ assert(_inds.size() == _tensors.size());
+
+ for (const auto &tensor_entry : _tensors)
+ {
+ auto tensor = tensor_entry.second;
+ tensor->allocator()->allocate();
+ }
+}
+
+std::shared_ptr<::arm_compute::CLTensor>
+TensorBuilder::at(const ::neurun::graph::operand::Index &ind)
+{
+ return _tensors.at(ind);
+}
+
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h
new file mode 100644
index 000000000..0a0f4e9ca
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
+#define __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
+
+#include "backend/ITensorBuilder.h"
+
+#include <unordered_map>
+#include <unordered_set>
+
+#include <arm_compute/runtime/CL/CLTensor.h>
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+
+class Plan;
+
+class TensorBuilder : public ITensorBuilder
+{
+public:
+ TensorBuilder();
+
+ virtual void mark(const ::neurun::graph::operand::Index &ind) override;
+ virtual void prepare(codegen::Plan &plan,
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
+ virtual void allocate(void) override;
+
+ std::shared_ptr<::arm_compute::CLTensor> at(const ::neurun::graph::operand::Index &ind);
+
+private:
+ std::unordered_set<graph::operand::Index> _inds;
+ std::unordered_map<graph::operand::Index, std::shared_ptr<::arm_compute::CLTensor>> _tensors;
+};
+
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/feature/View.h b/runtimes/neurun/src/backend/acl_cl/feature/View.h
new file mode 100644
index 000000000..12025ce01
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/feature/View.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_ARM_COMPUTE_FEATURE_VIEW_H__
+#define __INTERNAL_ARM_COMPUTE_FEATURE_VIEW_H__
+
+#include "util/feature/Reader.h"
+
+#include <arm_compute/core/ITensor.h>
+
+#include <cassert>
+
+namespace internal
+{
+namespace arm_compute
+{
+namespace feature
+{
+
+template <typename T> class View;
+
+template <> class View<float> final : public nnfw::util::feature::Reader<float>
+{
+public:
+ View(::arm_compute::ITensor *tensor) : _tensor{tensor}
+ {
+ assert(tensor->info()->data_type() == ::arm_compute::DataType::F32);
+
+ // TODO Validate whether tensor is a feature map, or not
+
+ _shape.C = tensor->info()->dimension(2);
+ _shape.H = tensor->info()->dimension(1);
+ _shape.W = tensor->info()->dimension(0);
+ }
+
+public:
+ const ::nnfw::util::feature::Shape &shape(void) const { return _shape; }
+
+public:
+ float at(uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ const auto offset = feature_index_to_byte_offset(ch, row, col);
+
+ float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+ float at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
+
+ float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+
+public:
+ float &at(uint32_t ch, uint32_t row, uint32_t col)
+ {
+ const auto offset = feature_index_to_byte_offset(ch, row, col);
+
+ float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+ float &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
+ {
+ const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
+
+ float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+
+private:
+ size_t feature_index_to_byte_offset(uint32_t ch, uint32_t row, uint32_t col) const
+ {
+ // ARM Compute uses CHW ordering
+ return _tensor->info()->offset_element_in_bytes(::arm_compute::Coordinates{col, row, ch});
+ }
+ size_t feature_index_to_byte_offset(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const
+ {
+ // ARM Compute uses CHW ordering
+ return _tensor->info()->offset_element_in_bytes(
+ ::arm_compute::Coordinates{col, row, ch, batch});
+ }
+
+private:
+ ::nnfw::util::feature::Shape _shape;
+ ::arm_compute::ITensor *_tensor;
+};
+
+} // namespace feature
+} // namespace arm_compute
+} // namespace internal
+
+#endif // __INTERNAL_ARM_COMPUTE_FEATURE_VIEW_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/kernel/View.h b/runtimes/neurun/src/backend/acl_cl/kernel/View.h
new file mode 100644
index 000000000..aec9a8892
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/kernel/View.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_ARM_COMPUTE_KERNEL_VIEW_H__
+#define __INTERNAL_ARM_COMPUTE_KERNEL_VIEW_H__
+
+#include "util/kernel/Shape.h"
+#include "util/kernel/Reader.h"
+
+#include <arm_compute/core/ITensor.h>
+
+#include <cassert>
+
+namespace internal
+{
+namespace arm_compute
+{
+namespace kernel
+{
+
+template <typename T> class View;
+
+template <> class View<float> final : public nnfw::util::kernel::Reader<float>
+{
+public:
+ View(::arm_compute::ITensor *tensor) : _tensor{tensor}
+ {
+ assert(tensor->info()->data_type() == ::arm_compute::DataType::F32);
+
+ _shape.N = tensor->info()->dimension(3);
+ _shape.C = tensor->info()->dimension(2);
+ _shape.H = tensor->info()->dimension(1);
+ _shape.W = tensor->info()->dimension(0);
+ }
+
+public:
+ const ::nnfw::util::kernel::Shape &shape(void) const { return _shape; }
+
+public:
+ float at(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ const auto offset = kernel_index_to_byte_offset(nth, ch, row, col);
+
+ float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+
+public:
+ float &at(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col)
+ {
+ const auto offset = kernel_index_to_byte_offset(nth, ch, row, col);
+
+ float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+
+private:
+ size_t kernel_index_to_byte_offset(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) const
+ {
+ return _tensor->info()->offset_element_in_bytes(::arm_compute::Coordinates{col, row, ch, nth});
+ }
+
+private:
+ ::nnfw::util::kernel::Shape _shape;
+ ::arm_compute::ITensor *_tensor;
+};
+
+} // namespace kernel
+} // namespace arm_compute
+} // namespace internal
+
+#endif // __INTERNAL_ARM_COMPUTE_FEATURE_VIEW_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/Object.cc b/runtimes/neurun/src/backend/acl_cl/operand/Object.cc
new file mode 100644
index 000000000..98b96a11a
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/operand/Object.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Object.h"
+
+#include <arm_compute/runtime/CL/CLScheduler.h>
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+namespace operand
+{
+
+void Object::access(const std::function<void(::arm_compute::ITensor &tensor)> &fn) const
+{
+ auto &queue = ::arm_compute::CLScheduler::get().queue();
+
+ _tensor->map(queue);
+ fn(*_tensor);
+ _tensor->unmap(queue);
+}
+
+} // namespace operand
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/Object.h b/runtimes/neurun/src/backend/acl_cl/operand/Object.h
new file mode 100644
index 000000000..da33c0549
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/operand/Object.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_CL_OPERAND_OBJECT_H__
+#define __NEURUN_BACKEND_ACL_CL_OPERAND_OBJECT_H__
+
+#include <memory>
+#include <arm_compute/core/CL/ICLTensor.h>
+
+#include "backend/IObject.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+namespace operand
+{
+
+class Object : public backend::operand::IObject
+{
+public:
+ Object() = default;
+
+public:
+ Object(const std::shared_ptr<::arm_compute::ICLTensor> &tensor) : _tensor{tensor}
+ {
+ // DO NOTHING
+ }
+
+public:
+ ::arm_compute::ICLTensor *ptr(void) const override { return _tensor.get(); }
+
+private:
+ std::shared_ptr<::arm_compute::ICLTensor> _tensor;
+
+public:
+ void access(const std::function<void(::arm_compute::ITensor &tensor)> &fn) const override;
+};
+
+} // namespace operand
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_CL_OPERAND_OBJECT_H__
diff --git a/runtimes/neurun/src/backend/cpu/BackendConfig.cc b/runtimes/neurun/src/backend/cpu/BackendConfig.cc
new file mode 100644
index 000000000..34fc3491a
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/BackendConfig.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "backend/cpu/BackendConfig.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+void BackendConfig::initialize()
+{
+ // DO NOTHING
+}
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/cpu/BackendConfig.h b/runtimes/neurun/src/backend/cpu/BackendConfig.h
new file mode 100644
index 000000000..109235bb1
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/BackendConfig.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CPU_BACKEND_CONFIG_H__
+#define __NEURUN_BACKEND_CPU_BACKEND_CONFIG_H__
+
+#include "backend/IBackendConfig.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+class BackendConfig : public IBackendConfig
+{
+public:
+ BackendConfig()
+ {
+ // DO NOTHING
+ }
+
+ virtual void initialize() override;
+ virtual graph::operand::Layout getOperandLayout() { return graph::operand::Layout::NHWC; }
+};
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CPU_BACKEND_CONFIG_H__
diff --git a/runtimes/neurun/src/backend/cpu/CMakeLists.txt b/runtimes/neurun/src/backend/cpu/CMakeLists.txt
new file mode 100644
index 000000000..95e9af687
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/CMakeLists.txt
@@ -0,0 +1,19 @@
+file(GLOB_RECURSE SOURCES "*.cc")
+
+add_library(${LIB_NEURUN_BACKEND_CPU} STATIC ${SOURCES})
+
+target_include_directories(${LIB_NEURUN_BACKEND_CPU} PUBLIC ${NNFW_INCLUDE_DIR})
+target_include_directories(${LIB_NEURUN_BACKEND_CPU} PUBLIC ${NEURUN_INCLUDE_DIR})
+target_include_directories(${LIB_NEURUN_BACKEND_CPU} PUBLIC ${CMAKE_SOURCE_DIR}/externals/tensorflow)
+
+target_link_libraries(${LIB_NEURUN_BACKEND_CPU} arm_compute) # TODO We should not need this
+target_link_libraries(${LIB_NEURUN_BACKEND_CPU} tensorflow-lite)
+target_link_libraries(${LIB_NEURUN_BACKEND_CPU} nnfw_util)
+target_link_libraries(${LIB_NEURUN_BACKEND_CPU} nnfw_support_nnapi)
+target_link_libraries(${LIB_NEURUN_BACKEND_CPU} ${LIB_NEURUN_KERNEL_CPU})
+
+target_compile_options(${LIB_NEURUN_BACKEND_CPU} PRIVATE -Wall -Wextra -Werror)
+
+set_target_properties(${LIB_NEURUN_BACKEND_CPU} PROPERTIES POSITION_INDEPENDENT_CODE ON)
+set_target_properties(${LIB_NEURUN_BACKEND_CPU} PROPERTIES OUTPUT_NAME backend_cpu)
+install(TARGETS ${LIB_NEURUN_BACKEND_CPU} DESTINATION lib/neurun)
diff --git a/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc b/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc
new file mode 100644
index 000000000..7b08c7131
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InitializerGenerator.h"
+
+#include "internal/nnapi/kernel/Reader.h"
+#include "internal/nnapi/kernel/View.h"
+#include "util/kernel/IndexIterator.h"
+
+#include "NeuralNetworks.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+InitializerGenerator::InitializerGenerator(const neurun::graph::operand::Set &ctx) : _ctx(ctx)
+{
+ // DO NOTHING
+}
+
+Initializer
+InitializerGenerator::generateWeight(const graph::operation::Conv2D::Implicit::Node &node)
+{
+ const ::neurun::graph::operand::Index ker_index{node.getInputs().at(1)};
+
+ const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
+ auto ker_base = _ctx.at(ker_index).data().base();
+ auto ker_size = _ctx.at(ker_index).data().size();
+
+ return [ker_shape, ker_base, ker_size](::arm_compute::ITensor &tensor) {
+ const ::internal::nnapi::kernel::Reader<float> from{ker_shape, ker_base, ker_size};
+ ::internal::nnapi::kernel::View<float> into{&tensor};
+
+ ::nnfw::util::kernel::iterate(ker_shape)
+ << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(nth, ch, row, col);
+ into.at(nth, row, col, ch) = value;
+ };
+ };
+}
+
+Initializer InitializerGenerator::generateWeight(const graph::operation::FullyConnected::Node &node)
+{
+ const ::neurun::graph::operand::Index weight_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+
+ const auto num_output = _ctx.at(weight_index).shape().dim(0);
+ auto weight_base = _ctx.at(weight_index).data().base();
+ auto weight_size = _ctx.at(weight_index).data().size();
+ auto weight_type = _ctx.at(weight_index).typeInfo().type();
+
+ // NOTE We assume that input is a feature map
+ // TODO Remove this restriction!
+ const auto ifm_shape = _ctx.at(input_index).shape().asFeature();
+
+ switch (weight_type)
+ {
+ case ::neurun::graph::operand::DataType::TENSOR_FLOAT32:
+ {
+ return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) {
+ const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H,
+ ifm_shape.W};
+ const ::internal::nnapi::kernel::Reader<float> from{ker_shape, weight_base, weight_size};
+
+ ::nnfw::util::kernel::iterate(ker_shape)
+ << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(nth, ch, row, col);
+
+ uint32_t offset = 0;
+
+ // NNAPI uses NHWC ordering
+ offset += nth * ifm_shape.H * ifm_shape.W * ifm_shape.C;
+ offset += row * ifm_shape.W * ifm_shape.C;
+ offset += col * ifm_shape.C;
+ offset += ch;
+
+ const ::arm_compute::Coordinates coordinate{offset};
+
+ auto into = reinterpret_cast<float *>(tensor.ptr_to_element(coordinate));
+
+ *into = value;
+ };
+ };
+ }
+ case ::neurun::graph::operand::DataType::TENSOR_QUANT8_ASYMM:
+ {
+ return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) {
+ const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H,
+ ifm_shape.W};
+ const ::internal::nnapi::kernel::Reader<uint8_t> from{ker_shape, weight_base, weight_size};
+ ::nnfw::util::kernel::iterate(ker_shape)
+ << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(nth, ch, row, col);
+ uint32_t offset = 0;
+
+ // NNAPI uses NHWC ordering
+ offset += nth * ifm_shape.H * ifm_shape.W * ifm_shape.C;
+ offset += row * ifm_shape.W * ifm_shape.C;
+ offset += col * ifm_shape.C;
+ offset += ch;
+
+ const ::arm_compute::Coordinates coordinate{offset};
+
+ auto into = reinterpret_cast<uint8_t *>(tensor.ptr_to_element(coordinate));
+
+ *into = value;
+ };
+ };
+ }
+ default:
+ {
+ throw std::runtime_error("Not supported weight type");
+ }
+ }
+}
+
+Initializer InitializerGenerator::generateBias(const graph::operation::Conv2D::Implicit::Node &node)
+{
+ // TODO Refactor so we can reuse the common code
+
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
+
+ auto bias_base = _ctx.at(bias_index).data().base();
+ const auto bias_size = _ctx.at(bias_index).shape().asVector();
+
+ return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
+ for (int32_t n = 0; n < bias_size; ++n)
+ {
+ const ::arm_compute::Coordinates coordinate{n};
+
+ float *into = reinterpret_cast<float *>(tensor.ptr_to_element(coordinate));
+
+ const float *from = reinterpret_cast<const float *>(bias_base) + n;
+ const auto value = *from;
+
+ *into = value;
+ }
+ };
+}
+
+Initializer InitializerGenerator::generateBias(const graph::operation::FullyConnected::Node &node)
+{
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
+
+ auto bias_base = _ctx.at(bias_index).data().base();
+ auto bias_type = _ctx.at(bias_index).typeInfo().type();
+ const auto bias_size = _ctx.at(bias_index).shape().asVector();
+
+ switch (bias_type)
+ {
+ case ::neurun::graph::operand::DataType::TENSOR_FLOAT32:
+ {
+ return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
+ for (int32_t n = 0; n < bias_size; ++n)
+ {
+ const ::arm_compute::Coordinates coordinate{n};
+
+ float *into = reinterpret_cast<float *>(tensor.ptr_to_element(coordinate));
+
+ const float *from = reinterpret_cast<const float *>(bias_base) + n;
+ const auto value = *from;
+
+ *into = value;
+ }
+ };
+ }
+ case ::neurun::graph::operand::DataType::TENSOR_QUANT8_ASYMM:
+ {
+ return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
+ for (int32_t n = 0; n < bias_size; ++n)
+ {
+ const ::arm_compute::Coordinates coordinate{n};
+
+ uint8_t *into = reinterpret_cast<uint8_t *>(tensor.ptr_to_element(coordinate));
+
+ const uint8_t *from = reinterpret_cast<const uint8_t *>(bias_base) + n;
+ const auto value = *from;
+
+ *into = value;
+ }
+ };
+ }
+ default:
+ {
+ throw std::runtime_error("Not supported bias type");
+ }
+ }
+}
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/cpu/InitializerGenerator.h b/runtimes/neurun/src/backend/cpu/InitializerGenerator.h
new file mode 100644
index 000000000..42d37f48b
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/InitializerGenerator.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CPU_INITIALIZER_GENERATOR_H__
+#define __NEURUN_BACKEND_CPU_INITIALIZER_GENERATOR_H__
+
+#include "backend/IInitializerGenerator.h"
+
+#include "graph/operand/Set.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+class InitializerGenerator : public IInitializerGenerator
+{
+public:
+ InitializerGenerator(const neurun::graph::operand::Set &ctx);
+
+ Initializer generateWeight(const graph::operation::Conv2D::Implicit::Node &node) override;
+ Initializer generateWeight(const graph::operation::FullyConnected::Node &node) override;
+
+ Initializer generateBias(const graph::operation::Conv2D::Implicit::Node &node) override;
+ Initializer generateBias(const graph::operation::FullyConnected::Node &node) override;
+
+private:
+ const neurun::graph::operand::Set &_ctx;
+};
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CPU_INITIALIZER_GENERATOR_H__
diff --git a/runtimes/neurun/src/backend/cpu/MemoryAllocator.cc b/runtimes/neurun/src/backend/cpu/MemoryAllocator.cc
new file mode 100644
index 000000000..13d2a7ffc
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/MemoryAllocator.cc
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#include "internal/cpu/MemoryAllocator.h"
diff --git a/runtimes/neurun/src/backend/cpu/MemoryAllocator.h b/runtimes/neurun/src/backend/cpu/MemoryAllocator.h
new file mode 100644
index 000000000..e3550ac07
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/MemoryAllocator.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_CPU_MEMORY_ALLOCATOR_H__
+#define __INTERNAL_CPU_MEMORY_ALLOCATOR_H__
+
+#include "arm_compute/runtime/ITensorAllocator.h"
+#include "arm_compute/runtime/Memory.h"
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace arm_compute
+{
+class Coordinates;
+class TensorInfo;
+class Tensor;
+};
+
+/** Basic implementation of a CPU memory tensor allocator. */
+class TensorAllocator : public ITensorAllocator
+{
+public:
+ /** Default constructor. */
+ TensorAllocator(Tensor *owner = nullptr);
+ /** Default destructor */
+ ~TensorAllocator();
+
+ /** Make ITensorAllocator's init methods available */
+ using ITensorAllocator::init;
+
+ /** Shares the same backing memory with another tensor allocator, while the tensor info might be
+ * different.
+ * In other words this can be used to create a sub-tensor from another tensor while sharing the
+ * same memory.
+ *
+ * @note TensorAllocator have to be of the same specialized type.
+ *
+ * @param[in] allocator The allocator that owns the backing memory to be shared. Ownership becomes
+ * shared afterwards.
+ * @param[in] coords The starting coordinates of the new tensor inside the parent tensor.
+ * @param[in] sub_info The new tensor information (e.g. shape etc)
+ */
+ void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo sub_info);
+
+ /** Returns the pointer to the allocated data. */
+ uint8_t *data() const;
+
+ /** Allocate size specified by TensorInfo of CPU memory.
+ *
+ * @note The tensor must not already be allocated when calling this function.
+ *
+ */
+ void allocate() override;
+
+ /** Free allocated CPU memory.
+ *
+ * @note The tensor must have been allocated when calling this function.
+ *
+ */
+ void free() override;
+ /** Import an existing memory as a tensor's backing memory
+ *
+ * @warning If the tensor is flagged to be managed by a memory manager,
+ * this call will lead to an error.
+ * @warning Ownership of memory depends on the way the @ref Memory object was constructed
+ * @note Calling free on a tensor with imported memory will just clear
+ * the internal pointer value.
+ *
+ * @param[in] memory Memory to import
+ *
+ * @return error status
+ */
+ arm_compute::Status import_memory(Memory memory);
+ /** Associates the tensor with a memory group
+ *
+ * @param[in] associated_memory_group Memory group to associate the tensor with
+ */
+ void set_associated_memory_group(MemoryGroup *associated_memory_group);
+
+protected:
+ /** No-op for CPU memory
+ *
+ * @return A pointer to the beginning of the tensor's allocation.
+ */
+ uint8_t *lock() override;
+
+ /** No-op for CPU memory. */
+ void unlock() override;
+
+private:
+ MemoryGroup *_associated_memory_group; /**< Registered memory manager */
+ Memory _memory; /**< CPU memory */
+ Tensor *_owner; /**< Owner of the allocator */
+};
+
+namespace internal
+{
+namespace cpu
+{
+
+class MemoryAllocator : public
+{
+};
+
+} // namespace cpu
+} // namespace internal
+
+#endif // __INTERNAL_CPU_MEMORY_ALLOCATOR_H__
diff --git a/runtimes/neurun/src/backend/cpu/StageGenerator.cc b/runtimes/neurun/src/backend/cpu/StageGenerator.cc
new file mode 100644
index 000000000..b7a3fa24a
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/StageGenerator.cc
@@ -0,0 +1,536 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "StageGenerator.h"
+
+#include <stdexcept>
+
+#include "internal/Padding.h"
+#include "kernel/cpu/OperationUtils.h"
+#include "kernel/cpu/ConvolutionLayer.h"
+#include "kernel/cpu/AvgPoolLayer.h"
+#include "kernel/cpu/MaxPoolLayer.h"
+#include "kernel/cpu/ConcatLayer.h"
+#include "kernel/cpu/FullyConnectedLayer.h"
+#include "kernel/cpu/ReshapeLayer.h"
+#include "kernel/cpu/SoftMaxLayer.h"
+
+#include "logging.h"
+
+#include "support/nnapi/Utils.h"
+
+#include "logging.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+StageGenerator::StageGenerator(const neurun::graph::operand::Set &operand_ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder)
+ : _ctx(operand_ctx), _tensor_builder(tensor_builder)
+{
+ // DO NOTHING
+}
+
+Stage StageGenerator::generate(const graph::operation::Conv2D::Implicit::Node &node)
+{
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index ker_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
+
+ const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index};
+ const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index};
+
+ const ::neurun::graph::operand::Index padding_index{node.param().padding_index};
+ const ::neurun::graph::operand::Index activation_index{node.param().activation_index};
+
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+
+ assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+ (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
+ ::internal::Stride stride;
+
+ stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
+ stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
+
+ // Construct operation parameters
+ struct Param
+ {
+ int ofm_index;
+ int ifm_index;
+ int ker_index;
+ int bias_index;
+
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
+ ::neurun::kernel::cpu::Shape ker_shape;
+ ::neurun::kernel::cpu::Shape bias_shape;
+
+ ::internal::Padding padding;
+ ::internal::Stride stride;
+
+ FuseCode activation;
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index.asInt();
+ param.ifm_index = ifm_index.asInt();
+ param.ker_index = ker_index.asInt();
+ param.bias_index = bias_index.asInt();
+
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ifm_index));
+ param.ker_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ker_index));
+ param.bias_shape = ::neurun::kernel::cpu::getShape(_ctx.at(bias_index));
+
+ param.stride = stride;
+ param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? ::internal::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), stride,
+ _ctx.at(ker_index).shape().asKernel().W,
+ _ctx.at(ker_index).shape().asKernel().H)
+ : ::internal::valid_padding();
+
+ param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index});
+ auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index});
+ auto ker_alloc = tensors->at(::neurun::graph::operand::Index{param.ker_index});
+ auto bias_alloc = tensors->at(::neurun::graph::operand::Index{param.bias_index});
+
+ std::unique_ptr<::neurun::kernel::cpu::ConvolutionLayer> fn{
+ new ::neurun::kernel::cpu::ConvolutionLayer};
+
+ fn->configure(ifm_alloc->buffer(), param.ifm_shape, ker_alloc->buffer(), param.ker_shape,
+ bias_alloc->buffer(), param.bias_shape, param.padding.left, param.padding.right,
+ param.padding.top, param.padding.bottom, param.stride.horizontal,
+ param.stride.vertical, param.activation, ofm_alloc->buffer(), param.ofm_shape);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::MaxPool2D::Implicit::Node &node)
+{
+ VERBOSE(MaxPool2D) << "generate CPU MaxPool2D" << std::endl;
+
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
+
+ const ::neurun::graph::operand::Index kh_index{node.param().kh_index};
+ const ::neurun::graph::operand::Index kw_index{node.param().kw_index};
+
+ const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index};
+ const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index};
+
+ const ::neurun::graph::operand::Index padding_index{node.param().padding_index};
+ const ::neurun::graph::operand::Index activation_index{node.param().activation_index};
+
+ const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
+ const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
+
+ const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
+ const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
+
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+
+ // Construct operation parameters
+ struct Param
+ {
+ int ofm_index;
+ int ifm_index;
+
+ uint32_t kw;
+ uint32_t kh;
+
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
+
+ ::internal::Padding padding;
+ ::internal::Stride stride;
+
+ FuseCode activation;
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index.asInt();
+ param.ifm_index = ifm_index.asInt();
+
+ param.kh = kh;
+ param.kw = kw;
+
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ifm_index));
+
+ param.stride.vertical = vstride;
+ param.stride.horizontal = hstride;
+
+ param.padding =
+ (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? ::internal::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh)
+ : ::internal::valid_padding();
+
+ param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+ VERBOSE(MaxPool2D) << "IFM_H: " << _ctx.at(ifm_index).shape().asFeature().H << std::endl;
+ VERBOSE(MaxPool2D) << "IFM_W: " << _ctx.at(ifm_index).shape().asFeature().W << std::endl;
+ VERBOSE(MaxPool2D) << "OFM_H: " << _ctx.at(ofm_index).shape().asFeature().H << std::endl;
+ VERBOSE(MaxPool2D) << "OFM_W: " << _ctx.at(ofm_index).shape().asFeature().W << std::endl;
+ VERBOSE(MaxPool2D) << "KER_H: " << kh << std::endl;
+ VERBOSE(MaxPool2D) << "KER_W: " << kw << std::endl;
+ VERBOSE(MaxPool2D) << "STRIDE_H: " << vstride << std::endl;
+ VERBOSE(MaxPool2D) << "STRIDE_W: " << hstride << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(T): " << param.padding.top << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(B): " << param.padding.bottom << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(L): " << param.padding.left << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(R): " << param.padding.right << std::endl;
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}).get();
+ auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}).get();
+
+ std::unique_ptr<::neurun::kernel::cpu::MaxPoolLayer> fn{
+ new ::neurun::kernel::cpu::MaxPoolLayer};
+
+ fn->configure(ifm_alloc->buffer(), param.ifm_shape, param.padding.left, param.padding.right,
+ param.padding.top, param.padding.bottom, param.stride.horizontal,
+ param.stride.vertical, param.kw, param.kh, param.activation, ofm_alloc->buffer(),
+ param.ofm_shape);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::AvgPool2D::Implicit::Node &node)
+{
+ VERBOSE(AvgPool2D) << "generate CPU AvgPool2D" << std::endl;
+
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
+
+ const ::neurun::graph::operand::Index kh_index{node.param().kh_index};
+ const ::neurun::graph::operand::Index kw_index{node.param().kw_index};
+
+ const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index};
+ const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index};
+
+ const ::neurun::graph::operand::Index padding_index{node.param().padding_index};
+ const ::neurun::graph::operand::Index activation_index{node.param().activation_index};
+
+ const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
+ const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
+
+ const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
+ const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
+
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+
+ assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+ (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
+ // Construct operation parameters
+ struct Param
+ {
+ int ofm_index;
+ int ifm_index;
+
+ uint32_t kw;
+ uint32_t kh;
+
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
+
+ ::internal::Padding padding;
+ ::internal::Stride stride;
+
+ FuseCode activation;
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index.asInt();
+ param.ifm_index = ifm_index.asInt();
+
+ param.kh = kh;
+ param.kw = kw;
+
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ifm_index));
+
+ param.stride.vertical = vstride;
+ param.stride.horizontal = hstride;
+
+ param.padding =
+ (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? ::internal::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh)
+ : ::internal::valid_padding();
+
+ param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+ VERBOSE(AvgPool2D) << "IFM_H: " << _ctx.at(ifm_index).shape().asFeature().H << std::endl;
+ VERBOSE(AvgPool2D) << "IFM_W: " << _ctx.at(ifm_index).shape().asFeature().W << std::endl;
+ VERBOSE(AvgPool2D) << "OFM_H: " << _ctx.at(ofm_index).shape().asFeature().H << std::endl;
+ VERBOSE(AvgPool2D) << "OFM_W: " << _ctx.at(ofm_index).shape().asFeature().W << std::endl;
+ VERBOSE(AvgPool2D) << "KER_H: " << kh << std::endl;
+ VERBOSE(AvgPool2D) << "KER_W: " << kw << std::endl;
+ VERBOSE(AvgPool2D) << "STRIDE_H: " << vstride << std::endl;
+ VERBOSE(AvgPool2D) << "STRIDE_W: " << hstride << std::endl;
+ VERBOSE(AvgPool2D) << "PAD: " << ::nnfw::support::nnapi::to_string(padding_type) << std::endl;
+ VERBOSE(AvgPool2D) << "PAD(T): " << param.padding.top << std::endl;
+ VERBOSE(AvgPool2D) << "PAD(B): " << param.padding.bottom << std::endl;
+ VERBOSE(AvgPool2D) << "PAD(L): " << param.padding.left << std::endl;
+ VERBOSE(AvgPool2D) << "PAD(R): " << param.padding.right << std::endl;
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto ofm_alloc = tensors->at(::neurun::graph::operand::Index{param.ofm_index}).get();
+ auto ifm_alloc = tensors->at(::neurun::graph::operand::Index{param.ifm_index}).get();
+
+ std::unique_ptr<::neurun::kernel::cpu::AvgPoolLayer> fn{
+ new ::neurun::kernel::cpu::AvgPoolLayer};
+
+ fn->configure(ifm_alloc->buffer(), param.ifm_shape, param.padding.left, param.padding.right,
+ param.padding.top, param.padding.bottom, param.stride.horizontal,
+ param.stride.vertical, param.kw, param.kh, param.activation, ofm_alloc->buffer(),
+ param.ofm_shape);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::Concat::Node &node)
+{
+ VERBOSE(Concat) << "generate CPU Concat" << std::endl;
+
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index axis_index{node.param().axis_index};
+
+ struct Param
+ {
+ int32_t output_index;
+ std::vector<int32_t> input_indexes;
+
+ int32_t axis;
+
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ std::vector<::neurun::kernel::cpu::Shape> ifm_shapes;
+ };
+
+ Param param;
+
+ param.output_index = ofm_index.asInt();
+ for (const auto &e : node.getInputs())
+ {
+ param.input_indexes.emplace_back(e.asInt());
+ }
+ param.axis = _ctx.at(axis_index).asScalar<int32_t>();
+
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
+
+ for (auto e : node.getInputs())
+ {
+ param.ifm_shapes.emplace_back(::neurun::kernel::cpu::getShape(_ctx.at(e)));
+ }
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get();
+
+ std::vector<const uint8_t *> input_buffers;
+ for (auto ifm_ind : param.input_indexes)
+ {
+ input_buffers.emplace_back(
+ tensors->at(::neurun::graph::operand::Index{ifm_ind}).get()->buffer());
+ }
+
+ std::unique_ptr<::neurun::kernel::cpu::ConcatLayer> fn{new ::neurun::kernel::cpu::ConcatLayer};
+
+ fn->configure(input_buffers, param.ifm_shapes, param.axis, output_alloc->buffer(),
+ param.ofm_shape);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::FullyConnected::Node &node)
+{
+ VERBOSE(FullyConnected) << "generate CPU FullyConnected" << std::endl;
+
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index weight_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
+ const ::neurun::graph::operand::Index activation_index{node.param().activation_index};
+
+ // Construct operation parameters
+ struct Param
+ {
+ int output_index;
+ int input_index;
+ int weight_index;
+ int bias_index;
+
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
+ ::neurun::kernel::cpu::Shape weight_shape;
+ ::neurun::kernel::cpu::Shape bias_shape;
+
+ FuseCode activation;
+ };
+
+ Param param;
+
+ param.output_index = output_index.asInt();
+ param.input_index = input_index.asInt();
+ param.weight_index = weight_index.asInt();
+ param.bias_index = bias_index.asInt();
+
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(output_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(input_index));
+ param.weight_shape = ::neurun::kernel::cpu::getShape(_ctx.at(weight_index));
+ param.bias_shape = ::neurun::kernel::cpu::getShape(_ctx.at(bias_index));
+
+ param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get();
+ auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get();
+ auto weight_alloc = tensors->at(::neurun::graph::operand::Index{param.weight_index}).get();
+ auto bias_alloc = tensors->at(::neurun::graph::operand::Index{param.bias_index}).get();
+
+ std::unique_ptr<::neurun::kernel::cpu::FullyConnectedLayer> fn{
+ new ::neurun::kernel::cpu::FullyConnectedLayer};
+
+ fn->configure(input_alloc->buffer(), param.ifm_shape, weight_alloc->buffer(),
+ param.weight_shape, bias_alloc->buffer(), param.bias_shape, param.activation,
+ output_alloc->buffer(), param.ofm_shape);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::Reshape::Node &node)
+{
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+
+ struct Param
+ {
+ int output_index;
+ int input_index;
+
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
+ };
+
+ Param param;
+
+ param.output_index = output_index.asInt();
+ param.input_index = input_index.asInt();
+
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(output_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(input_index));
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get();
+ auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get();
+
+ std::unique_ptr<::neurun::kernel::cpu::ReshapeLayer> fn{
+ new ::neurun::kernel::cpu::ReshapeLayer};
+
+ fn->configure(input_alloc->buffer(), param.ifm_shape, output_alloc->buffer(), param.ofm_shape);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::Softmax::Node &node)
+{
+ VERBOSE(Softmax) << "generate CPU Softmax" << std::endl;
+
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index scale_index{node.param().scale_index};
+
+ struct Param
+ {
+ int output_index;
+ int input_index;
+
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
+
+ float scale;
+ };
+
+ Param param;
+
+ param.output_index = output_index.asInt();
+ param.input_index = input_index.asInt();
+
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(output_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(input_index));
+
+ param.scale = _ctx.at(scale_index).asScalar<float>();
+
+ auto tensors = _tensor_builder;
+
+ return [tensors, param](IExecutionBuilder &builder) {
+ auto output_alloc = tensors->at(::neurun::graph::operand::Index{param.output_index}).get();
+ auto input_alloc = tensors->at(::neurun::graph::operand::Index{param.input_index}).get();
+
+ std::unique_ptr<::neurun::kernel::cpu::SoftMaxLayer> fn{
+ new ::neurun::kernel::cpu::SoftMaxLayer};
+
+ fn->configure(input_alloc->buffer(), param.ifm_shape, param.scale, output_alloc->buffer(),
+ param.ofm_shape);
+
+ builder.append(std::move(fn));
+ };
+}
+
+Stage StageGenerator::generate(const graph::operation::NOP::Node & /* node */)
+{
+ // DO NOTHING
+ return nullptr;
+}
+
+} // namespace neurun
+} // namespace backend
+} // namespace cpu
diff --git a/runtimes/neurun/src/backend/cpu/StageGenerator.h b/runtimes/neurun/src/backend/cpu/StageGenerator.h
new file mode 100644
index 000000000..acdd2c8b2
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/StageGenerator.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
+#define __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
+
+#include "backend/IStageGenerator.h"
+
+#include "graph/operand/Set.h"
+#include "backend/cpu/operand/Tensor.h"
+#include "TensorBuilder.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+class StageGenerator : public IStageGenerator
+{
+public:
+ StageGenerator(const neurun::graph::operand::Set &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder);
+
+ virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
+
+ virtual Stage generate(const graph::operation::Conv2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::MaxPool2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::AvgPool2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::Concat::Node &node) override;
+ virtual Stage generate(const graph::operation::FullyConnected::Node &node) override;
+ virtual Stage generate(const graph::operation::Reshape::Node &node) override;
+ virtual Stage generate(const graph::operation::Softmax::Node &node) override;
+ virtual Stage generate(const graph::operation::NOP::Node &node) override;
+
+private:
+ const neurun::graph::operand::Set &_ctx;
+ std::shared_ptr<TensorBuilder> _tensor_builder;
+};
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc b/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
new file mode 100644
index 000000000..1b972a830
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TensorBuilder.h"
+
+#include <cassert>
+
+#include "operand/Object.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+TensorBuilder::TensorBuilder()
+{
+ // DO NOTHING
+}
+
+void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind)
+{
+ assert(_tensors.size() == 0);
+
+ _inds.insert(ind);
+}
+
+void TensorBuilder::prepare(codegen::Plan &plan,
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
+{
+ assert(_tensors.size() == 0);
+
+ for (auto ind_int : _inds)
+ {
+ ::neurun::graph::operand::Index ind{ind_int};
+ auto tensor = std::make_shared<operand::Tensor>(tensor_info_ctx.at(ind.asInt()));
+ // TODO Fix allocation here. When Tensor object is created the memory for tensor is also
+ // allocated, and this must be fixed.
+ plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
+ _tensors[ind] = tensor;
+ }
+}
+
+void TensorBuilder::allocate(void)
+{
+ assert(_inds.size() == _tensors.size());
+
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is wrong
+ // See also: comment in `prepare()`
+}
+
+std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::graph::operand::Index &ind)
+{
+ return _tensors.at(ind);
+}
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.h b/runtimes/neurun/src/backend/cpu/TensorBuilder.h
new file mode 100644
index 000000000..f61a930fe
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/TensorBuilder.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
+#define __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
+
+#include <unordered_map>
+#include <unordered_set>
+
+#include "backend/ITensorBuilder.h"
+#include "backend/cpu/operand/Tensor.h"
+#include "graph/operand/Index.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+class Plan;
+
+class TensorBuilder : public ITensorBuilder
+{
+public:
+ TensorBuilder();
+
+ virtual void mark(const ::neurun::graph::operand::Index &ind) override;
+ virtual void prepare(codegen::Plan &plan,
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
+ virtual void allocate(void) override;
+
+ std::shared_ptr<operand::Tensor> at(const ::neurun::graph::operand::Index &ind);
+
+private:
+ std::unordered_set<graph::operand::Index> _inds;
+ std::unordered_map<graph::operand::Index, std::shared_ptr<operand::Tensor>> _tensors;
+};
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
diff --git a/runtimes/neurun/src/backend/cpu/operand/Object.cc b/runtimes/neurun/src/backend/cpu/operand/Object.cc
new file mode 100644
index 000000000..52b63fba7
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/operand/Object.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Object.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+namespace operand
+{
+
+void Object::access(const std::function<void(::arm_compute::ITensor &tensor)> &fn) const
+{
+ fn(*_tensor);
+}
+
+} // namespace operand
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/cpu/operand/Object.h b/runtimes/neurun/src/backend/cpu/operand/Object.h
new file mode 100644
index 000000000..08f63f3dc
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/operand/Object.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CPU_OPERAND_OBJECT_H__
+#define __NEURUN_BACKEND_CPU_OPERAND_OBJECT_H__
+
+#include <memory>
+#include <arm_compute/core/ITensor.h>
+
+#include "backend/IObject.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+namespace operand
+{
+
+class Object : public backend::operand::IObject
+{
+public:
+ Object() = default;
+
+public:
+ Object(const std::shared_ptr<::arm_compute::ITensor> &tensor) : _tensor{tensor}
+ {
+ // DO NOTHING
+ }
+
+public:
+ ::arm_compute::ITensor *ptr(void) const override { return _tensor.get(); }
+
+private:
+ std::shared_ptr<::arm_compute::ITensor> _tensor;
+
+public:
+ void access(const std::function<void(::arm_compute::ITensor &tensor)> &fn) const override;
+};
+
+} // namespace operand
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CPU_OPERAND_OBJECT_H__
diff --git a/runtimes/neurun/src/backend/cpu/operand/Tensor.cc b/runtimes/neurun/src/backend/cpu/operand/Tensor.cc
new file mode 100644
index 000000000..0e4f34aac
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/operand/Tensor.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tensor.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+namespace operand
+{
+
+// NO IMPLEMENTATION YET
+
+} // namespace operand
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/cpu/operand/Tensor.h b/runtimes/neurun/src/backend/cpu/operand/Tensor.h
new file mode 100644
index 000000000..83a99acf2
--- /dev/null
+++ b/runtimes/neurun/src/backend/cpu/operand/Tensor.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__
+#define __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__
+
+#include <arm_compute/core/ITensor.h>
+#include <arm_compute/core/TensorInfo.h>
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+namespace operand
+{
+
+class Tensor : public ::arm_compute::ITensor
+{
+public:
+ Tensor() = default;
+
+ Tensor(::arm_compute::TensorInfo info) : _info(info)
+ {
+ // TODO Do not allocate buffer here. This tensor is just an abstract Tensor object for cpu.
+ uint32_t size = _info.total_size(); // NOTE This size may not be accurate
+ _buffer = new uint8_t[size]; // NOTE The allocated buffer is never deallocated.
+ }
+
+ Tensor(uint8_t *buffer) : _buffer(buffer)
+ {
+ // DO NOTHING
+ }
+
+public:
+ void setBuffer(uint8_t *buffer) { _buffer = buffer; }
+
+public:
+ ::arm_compute::TensorInfo *info() const override
+ {
+ return const_cast<::arm_compute::TensorInfo *>(&_info);
+ }
+
+ ::arm_compute::TensorInfo *info() override { return &_info; }
+
+ uint8_t *buffer() const override { return _buffer; }
+
+private:
+ ::arm_compute::TensorInfo _info;
+ uint8_t *_buffer = nullptr;
+};
+
+} // namespace operand
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__
diff --git a/runtimes/neurun/src/codegen/BackendResolver.cc b/runtimes/neurun/src/codegen/BackendResolver.cc
new file mode 100644
index 000000000..4037a2813
--- /dev/null
+++ b/runtimes/neurun/src/codegen/BackendResolver.cc
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BackendResolver.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+// NOT IMPLEMENTED
+
+} // namespace codegen
+} // namespace neurun
diff --git a/runtimes/neurun/src/codegen/BackendResolver.h b/runtimes/neurun/src/codegen/BackendResolver.h
new file mode 100644
index 000000000..02f22b278
--- /dev/null
+++ b/runtimes/neurun/src/codegen/BackendResolver.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_CODEGEN_BACKEND_RESOLVER_H__
+#define __NEURUN_CODEGEN_BACKEND_RESOLVER_H__
+
+#include <set>
+#include <unordered_map>
+#include <typeindex>
+
+#include "logging.h"
+#include "util/EnvVar.h"
+#include "backend/BackendManager.h"
+#include "backend/IInitializerGenerator.h"
+#include "backend/IStageGenerator.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+class BackendResolver
+{
+public:
+ BackendResolver(const neurun::graph::operand::Set &operands)
+ {
+ _backend_manager = std::make_shared<backend::BackendManager>(operands);
+
+ const auto &backend_all_str =
+ ::nnfw::util::EnvVar{std::string("OP_BACKEND_ALLOPS")}.asString("none");
+ if (backend_all_str.compare("none") != 0)
+ {
+ VERBOSE(BackendResolver) << "Use backend for all ops: " << backend_all_str << std::endl;
+#define OP(InternalName, NnApiName) \
+ { \
+ auto backend = _backend_manager->get(backend_all_str); \
+ _gen_map[typeid(graph::operation::InternalName::Node)] = backend; \
+ }
+#include "graph/operation/Op.lst"
+#undef OP
+ }
+ else
+ {
+#define OP(InternalName, NnApiName) \
+ { \
+ const auto &backend_str = \
+ ::nnfw::util::EnvVar{std::string("OP_BACKEND_") + #NnApiName}.asString("acl_cl"); \
+ auto backend = _backend_manager->get(backend_str); \
+ VERBOSE(BackendResolver) << "backend for " << #NnApiName << ": " << backend_str << std::endl; \
+ _gen_map[typeid(graph::operation::InternalName::Node)] = backend; \
+ }
+
+#include "graph/operation/Op.lst"
+#undef OP
+ }
+ }
+
+public:
+ const backend::Backend &getBackend(const std::type_index &type) { return _gen_map[type]; }
+
+private:
+ std::unordered_map<std::type_index, backend::Backend> _gen_map;
+ std::shared_ptr<backend::BackendManager> _backend_manager;
+};
+
+} // namespace codegen
+} // namespace neurun
+
+#endif // __NEURUN_CODEGEN_BACKEND_RESOLVER_H__
diff --git a/runtimes/neurun/src/codegen/IPlanBuilder.h b/runtimes/neurun/src/codegen/IPlanBuilder.h
new file mode 100644
index 000000000..197681432
--- /dev/null
+++ b/runtimes/neurun/src/codegen/IPlanBuilder.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_CODEGEN_I_PLAN_BUILDER_H__
+#define __NEURUN_CODEGEN_I_PLAN_BUILDER_H__
+
+#include "arm_compute/core/TensorInfo.h"
+#include "backend/IStageGenerator.h"
+#include "backend/IInitializerGenerator.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+struct IPlanBuilder
+{
+ virtual ~IPlanBuilder() = default;
+
+ virtual void addShapeConstr(const ::neurun::graph::operand::Index &ind,
+ const ::arm_compute::TensorInfo &info) = 0;
+ virtual void addInitializer(const ::neurun::graph::operand::Index &ind,
+ const Initializer &initializer) = 0;
+ virtual void addStage(const Stage &) = 0;
+};
+
+} // namespace codegen
+} // namespace neurun
+
+#endif // __NEURUN_CODEGEN_I_PLAN_BUILDER_H__
diff --git a/runtimes/neurun/src/codegen/Plan.cc b/runtimes/neurun/src/codegen/Plan.cc
new file mode 100644
index 000000000..b7637b189
--- /dev/null
+++ b/runtimes/neurun/src/codegen/Plan.cc
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Plan.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+// NO IMPLEMENTATION YET
+
+} // namespace codegen
+} // namespace neurun
diff --git a/runtimes/neurun/src/codegen/Plan.h b/runtimes/neurun/src/codegen/Plan.h
new file mode 100644
index 000000000..47624ed35
--- /dev/null
+++ b/runtimes/neurun/src/codegen/Plan.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_CODEGEN_PLAN_H__
+#define __NEURUN_CODEGEN_PLAN_H__
+
+#include "graph/Graph.h"
+#include "codegen/operand/Context.h"
+#include "codegen/operation/Sequence.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+class Plan
+{
+public:
+ Plan(const std::shared_ptr<neurun::graph::Graph> &model) : _model(model)
+ {
+ // DO NOTHING
+ }
+
+public:
+ neurun::graph::Graph &model(void) { return *_model; }
+ const neurun::graph::Graph &model(void) const { return *_model; }
+
+public:
+ operand::Context &operands(void) { return _operands; }
+ const operand::Context &operands(void) const { return _operands; }
+
+public:
+ operation::Sequence &operations(void) { return _ops; }
+ const operation::Sequence &operations(void) const { return _ops; }
+
+private:
+ std::shared_ptr<neurun::graph::Graph> _model;
+ operand::Context _operands;
+ operation::Sequence _ops;
+};
+
+} // namespace codegen
+} // namespace neurun
+
+#endif // __NEURUN_CODEGEN_PLAN_H__
diff --git a/runtimes/neurun/src/codegen/PlanBuilder.cc b/runtimes/neurun/src/codegen/PlanBuilder.cc
new file mode 100644
index 000000000..5d4d6f9c9
--- /dev/null
+++ b/runtimes/neurun/src/codegen/PlanBuilder.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PlanBuilder.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+void PlanBuilder::addShapeConstr(const ::neurun::graph::operand::Index &ind,
+ const ::arm_compute::TensorInfo &info)
+{
+ _tensor_info_ctx[ind.asInt()] = info;
+}
+
+void PlanBuilder::addInitializer(const ::neurun::graph::operand::Index &ind,
+ const Initializer &initializer)
+{
+ _initializer_ctx[ind.asInt()] = initializer;
+}
+
+void PlanBuilder::addStage(const Stage &stage) { _stages.emplace_back(stage); }
+
+void PlanBuilder::finalize(const backend::TensorBuilderSet &tensor_builders)
+{
+ // Prepare tensors
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->prepare(_plan, _tensor_info_ctx);
+ }
+
+ // Process Stage
+ ExecutionBuilder execution_builder{_plan};
+
+ for (const auto &stage : _stages)
+ {
+ stage(execution_builder);
+ }
+
+ // TODO Add code for CPU/ACL tensor allocation
+ // Allocate Tensor Memory for cl_tensors
+ for (auto &tensor_builder : tensor_builders)
+ {
+ tensor_builder->allocate();
+ }
+
+ // Fill weight/bias
+ for (auto it = _initializer_ctx.begin(); it != _initializer_ctx.end(); ++it)
+ {
+ const ::neurun::graph::operand::Index operand_index{it->first};
+ auto objects = _plan.operands().at(operand_index);
+
+ for (auto object : objects)
+ {
+ object->access(it->second);
+ }
+ }
+}
+
+} // namepsace codegen
+} // namespace neurun
diff --git a/runtimes/neurun/src/codegen/PlanBuilder.h b/runtimes/neurun/src/codegen/PlanBuilder.h
new file mode 100644
index 000000000..4323143b3
--- /dev/null
+++ b/runtimes/neurun/src/codegen/PlanBuilder.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_CODEGEN_PLAN_BUILDER_H__
+#define __NEURUN_CODEGEN_PLAN_BUILDER_H__
+
+#include "IPlanBuilder.h"
+#include "codegen/Plan.h"
+#include "backend/IStageGenerator.h"
+#include "backend/ITensorBuilder.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+class ExecutionBuilder final : public IExecutionBuilder
+{
+public:
+ ExecutionBuilder(codegen::Plan &plan) : _plan{plan}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void append(std::unique_ptr<::arm_compute::IFunction> &&f) override
+ {
+ _plan.operations().append(std::move(f));
+ }
+
+private:
+ codegen::Plan &_plan;
+};
+
+class PlanBuilder final : public IPlanBuilder
+{
+public:
+ PlanBuilder(codegen::Plan &plan) : _plan{plan}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void addShapeConstr(const ::neurun::graph::operand::Index &ind,
+ const ::arm_compute::TensorInfo &info) override;
+
+public:
+ void addInitializer(const ::neurun::graph::operand::Index &ind,
+ const Initializer &initializer) override;
+
+public:
+ void addStage(const Stage &stage) override;
+
+public:
+ // TODO Remove the argument `tensor_builders`
+ void finalize(const backend::TensorBuilderSet &tensor_builders);
+
+public:
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx() { return _tensor_info_ctx; }
+
+private:
+ codegen::Plan &_plan;
+
+private:
+ std::map<int, ::arm_compute::TensorInfo> _tensor_info_ctx;
+ std::map<int, Initializer> _initializer_ctx;
+ std::vector<Stage> _stages;
+};
+
+} // namepsace codegen
+} // namespace neurun
+
+#endif // __NEURUN_CODEGEN_PLAN_BUILDER_H__
diff --git a/runtimes/neurun/src/codegen/Planner.cc b/runtimes/neurun/src/codegen/Planner.cc
new file mode 100644
index 000000000..1add3acf5
--- /dev/null
+++ b/runtimes/neurun/src/codegen/Planner.cc
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Planner.h"
+
+#include <typeinfo>
+
+#include "internal/Convert.h"
+#include "graph/operand/Set.h"
+#include "codegen/IPlanBuilder.h"
+#include "graph/operation/LowerInfo.h"
+
+#include "logging.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+void Planner::visit(const graph::operation::Conv2D::Implicit::Node &node)
+{
+ const auto ofm_index = node.getOutputs().at(0);
+
+ const auto ifm_index = node.getInputs().at(0);
+ const auto ker_index = node.getInputs().at(1);
+ const auto bias_index = node.getInputs().at(2);
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+ const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
+ const auto bias_size = _ctx.at(bias_index).shape().asVector();
+
+ // Set Shape Constraints
+ _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+ _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+ _builder.addShapeConstr(ker_index, ::internal::asTensorInfo(ker_shape));
+ _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
+
+ // backend
+ auto backend = node.lower_info()->backend();
+
+ // Generate Initializers
+ auto init_gen = backend.initializer_gen();
+ _builder.addInitializer(ker_index, init_gen->generateWeight(node));
+ _builder.addInitializer(bias_index, init_gen->generateBias(node));
+
+ // Generate Stage
+ auto stage_gen = backend.stage_gen();
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const graph::operation::MaxPool2D::Implicit::Node &node)
+{
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+ // Set Shape Constraints
+ _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+ _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+
+ // backend
+ auto backend = node.lower_info()->backend();
+
+ // Generate Stage
+ auto stage_gen = backend.stage_gen();
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const graph::operation::AvgPool2D::Implicit::Node &node)
+{
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+ // Set Shape Constraints
+ _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+ _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+
+ // backend
+ auto backend = node.lower_info()->backend();
+
+ // Generate Stage
+ auto stage_gen = backend.stage_gen();
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const graph::operation::Concat::Node &node)
+{
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+
+ // NOTE This implementation assumes that input and output are a feature
+ // TODO Remove this assumption
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+
+ // NOTE This implementation assumes concat over feature depth
+ // TODO Remove this assumption
+ assert(_ctx.at(::neurun::graph::operand::Index{node.param().axis_index}).asScalar<int32_t>() ==
+ 3);
+
+ // Set Shape Constraints (for output)
+ _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+
+ // Set Shape Constraints (for input)
+ for (const auto &index : node.getInputs())
+ {
+ const ::neurun::graph::operand::Index ifm_index{index};
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+ _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+ }
+
+ // backend
+ auto backend = node.lower_info()->backend();
+
+ // Generate Stage
+ auto stage_gen = backend.stage_gen();
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const graph::operation::FullyConnected::Node &node)
+{
+ VERBOSE(FullyConnected) << "Configure FULLY_CONNECTED operation" << std::endl;
+
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index weight_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
+
+ const ::neurun::graph::operand::Index activation_index{node.param().activation_index};
+
+ assert(_ctx.at(output_index).shape().rank() == 2);
+ const auto output_size = _ctx.at(output_index).shape().dim(1);
+
+ // NOTE We assume that input is a feature map
+ // TODO Remove this restriction!
+ const auto ifm_shape = _ctx.at(input_index).shape().asFeature();
+
+ assert(_ctx.at(weight_index).shape().rank() == 2);
+ const auto num_output = _ctx.at(weight_index).shape().dim(0);
+ const auto input_size = _ctx.at(weight_index).shape().dim(1);
+ assert(ifm_shape.C * ifm_shape.H * ifm_shape.W == input_size);
+
+ const auto bias_size = _ctx.at(bias_index).shape().asVector();
+
+ // Set Shape Constraints
+ _builder.addShapeConstr(output_index, ::internal::asTensorInfo(output_size));
+ _builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape));
+ _builder.addShapeConstr(weight_index,
+ ::internal::asTensorInfo(num_output /*H*/, input_size /*W*/));
+ _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
+
+ // backend
+ auto backend = node.lower_info()->backend();
+
+ // Generate Initializers
+ auto init_gen = backend.initializer_gen();
+ _builder.addInitializer(weight_index, init_gen->generateWeight(node));
+ _builder.addInitializer(bias_index, init_gen->generateBias(node));
+
+ // Generate Stage
+ auto stage_gen = backend.stage_gen();
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const graph::operation::Reshape::Node &node)
+{
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+
+ // NOTE The content of a tensor specified by shape_index should be aligned with
+ // output tensor shape
+ // TODO Check consistency of ouput shape
+
+ // 'Feature Map' to 'Vector' reshape
+ assert(_ctx.at(input_index).shape().rank() == 4);
+ assert(_ctx.at(output_index).shape().rank() == 2);
+ assert(_ctx.at(output_index).shape().dim(0) == 1);
+
+ const auto ifm_shape = _ctx.at(input_index).shape().asFeature();
+ const auto out_size = _ctx.at(output_index).shape().dim(1);
+
+ // NOTE Vector element ordering issue arises when H or W is not 1
+ assert(ifm_shape.H == 1);
+ assert(ifm_shape.W == 1);
+ assert((ifm_shape.C * ifm_shape.H * ifm_shape.W) == out_size);
+
+ _builder.addShapeConstr(output_index, ::internal::asTensorInfo(out_size));
+ _builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape));
+
+ // backend
+ auto backend = node.lower_info()->backend();
+
+ // Generate Stage
+ auto stage_gen = backend.stage_gen();
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const graph::operation::Softmax::Node &node)
+{
+ VERBOSE(Softmax) << "Configure SOFTMAX operation" << std::endl;
+
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+
+ assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
+
+ // TODO Support 'feature map' input
+ assert(_ctx.at(input_index).shape().rank() == 2);
+ assert(_ctx.at(input_index).shape().dim(0) == 1);
+ assert(_ctx.at(input_index).shape().dim(0) == _ctx.at(output_index).shape().dim(0));
+ assert(_ctx.at(input_index).shape().dim(1) == _ctx.at(output_index).shape().dim(1));
+
+ const uint32_t len = _ctx.at(output_index).shape().dim(1);
+
+ _builder.addShapeConstr(output_index, ::internal::asTensorInfo(len));
+ _builder.addShapeConstr(input_index, ::internal::asTensorInfo(len));
+
+ // backend
+ auto backend = node.lower_info()->backend();
+
+ // Generate Stage
+ auto stage_gen = backend.stage_gen();
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const graph::operation::NOP::Node & /* node */)
+{
+ // DO NOTHING
+ // TODO : It's just for graph manipulation test now, it should be added tensor copy stage later.
+}
+
+void Planner::visit(const graph::operation::Permute::Node & /* node */) { throw "NYI"; }
+
+} // namespace codegen
+} // namespace neurun
diff --git a/runtimes/neurun/src/codegen/Planner.h b/runtimes/neurun/src/codegen/Planner.h
new file mode 100644
index 000000000..d725567b5
--- /dev/null
+++ b/runtimes/neurun/src/codegen/Planner.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_CODEGEN_PLANNER_H__
+#define __NEURUN_CODEGEN_PLANNER_H__
+
+#include "graph/operation/NodeVisitor.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+class Set;
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace codegen
+{
+
+class IPlanBuilder;
+
+class Planner : public graph::operation::NodeVisitor
+{
+public:
+ Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder)
+ : _ctx{ctx}, _builder{builder}
+ {
+ }
+
+public:
+ virtual void visit(const graph::operation::Conv2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::MaxPool2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::AvgPool2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::Concat::Node &) override;
+ virtual void visit(const graph::operation::Reshape::Node &) override;
+ virtual void visit(const graph::operation::FullyConnected::Node &) override;
+ virtual void visit(const graph::operation::Softmax::Node &) override;
+ virtual void visit(const graph::operation::NOP::Node &) override;
+ virtual void visit(const graph::operation::Permute::Node &) override;
+
+private:
+ const neurun::graph::operand::Set &_ctx;
+ neurun::codegen::IPlanBuilder &_builder;
+};
+
+} // namespace codegen
+} // namespace neurun
+
+#endif // __NEURUN_CODEGEN_PLANNER_H__
diff --git a/runtimes/neurun/src/codegen/operand/Context.cc b/runtimes/neurun/src/codegen/operand/Context.cc
new file mode 100644
index 000000000..7e5cdeccf
--- /dev/null
+++ b/runtimes/neurun/src/codegen/operand/Context.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Context.h"
+
+namespace neurun
+{
+namespace codegen
+{
+namespace operand
+{
+
+Context &Context::set(const graph::operand::Index &id,
+ const std::shared_ptr<backend::operand::IObject> &object)
+{
+ _objects[id.value()].emplace_back(object);
+ return (*this);
+}
+
+} // namespace operand
+} // namespace codegen
+} // namespace neurun
diff --git a/runtimes/neurun/src/codegen/operand/Context.h b/runtimes/neurun/src/codegen/operand/Context.h
new file mode 100644
index 000000000..386c253c6
--- /dev/null
+++ b/runtimes/neurun/src/codegen/operand/Context.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_CODEGEN_OPERAND_CONTEXT_H__
+#define __NEURUN_CODEGEN_OPERAND_CONTEXT_H__
+
+#include "backend/IObject.h"
+#include "graph/operand/Index.h"
+
+#include <map>
+
+namespace neurun
+{
+namespace codegen
+{
+namespace operand
+{
+
+class Context
+{
+public:
+ Context &set(const graph::operand::Index &ind,
+ const std::shared_ptr<backend::operand::IObject> &object);
+
+public:
+ bool exist(const ::neurun::graph::operand::Index &ind) const
+ {
+ return _objects.find(ind.asInt()) != _objects.end();
+ }
+
+public:
+ const std::vector<std::shared_ptr<backend::operand::IObject>> &
+ at(const graph::operand::Index &ind) const
+ {
+ return _objects.at(ind.asInt());
+ }
+
+ std::vector<std::shared_ptr<backend::operand::IObject>> &at(const graph::operand::Index &ind)
+ {
+ return _objects.at(ind.asInt());
+ }
+
+private:
+ std::map<int, std::vector<std::shared_ptr<backend::operand::IObject>>> _objects;
+};
+
+} // namespace operand
+} // namespace codegen
+} // namespace neurun
+
+#endif // __NEURUN_CODEGEN_OPERAND_CONTEXT_H__
diff --git a/runtimes/neurun/src/codegen/operation/Sequence.cc b/runtimes/neurun/src/codegen/operation/Sequence.cc
new file mode 100644
index 000000000..908e84a5c
--- /dev/null
+++ b/runtimes/neurun/src/codegen/operation/Sequence.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sequence.h"
+
+namespace neurun
+{
+namespace codegen
+{
+namespace operation
+{
+
+// NO IMPLEMENTATION YET
+
+} // namespace operation
+} // namespace codegen
+} // namespace neurun
diff --git a/runtimes/neurun/src/codegen/operation/Sequence.h b/runtimes/neurun/src/codegen/operation/Sequence.h
new file mode 100644
index 000000000..83403feae
--- /dev/null
+++ b/runtimes/neurun/src/codegen/operation/Sequence.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_CODEGEN_OPERATION_SEQUENCE_H__
+#define __NEURUN_CODEGEN_OPERATION_SEQUENCE_H__
+
+#include <stdint.h>
+#include <arm_compute/runtime/IFunction.h>
+#include <memory>
+#include <vector>
+
+namespace neurun
+{
+namespace codegen
+{
+namespace operation
+{
+
+class Sequence
+{
+public:
+ uint32_t size(void) const { return _functions.size(); }
+
+public:
+ Sequence &append(std::unique_ptr<::arm_compute::IFunction> &&func)
+ {
+ _functions.emplace_back(std::move(func));
+ return (*this);
+ }
+
+public:
+ ::arm_compute::IFunction &at(uint32_t n) const { return *(_functions.at(n)); }
+
+private:
+ std::vector<std::unique_ptr<::arm_compute::IFunction>> _functions;
+};
+
+} // namespace operation
+} // namespace codegen
+} // namespace neurun
+
+#endif // __NEURUN_CODEGEN_OPERATION_SEQUENCE_H__
diff --git a/runtimes/neurun/src/exec/Sink.h b/runtimes/neurun/src/exec/Sink.h
new file mode 100644
index 000000000..a96f08320
--- /dev/null
+++ b/runtimes/neurun/src/exec/Sink.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_SINK_H__
+#define __NEURUN_EXEC_SINK_H__
+
+#include <cassert>
+
+#include <arm_compute/core/ITensor.h>
+
+#include <util/feature/Shape.h>
+#include <util/feature/IndexIterator.h>
+
+#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend
+#include "internal/nnapi/feature/View.h"
+#include "internal/nnapi/feature/Reader.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+struct Sink
+{
+ virtual ~Sink() = default;
+
+ virtual void pull(::arm_compute::ITensor &tensor) const = 0;
+};
+
+//
+// VectorSink
+//
+class VectorSink final : public Sink
+{
+public:
+ VectorSink(const int32_t vlen, uint8_t *base, const size_t size) : _vlen{vlen}, _base{base}
+ {
+ (void)size; // Workaround for unused variable in release mode
+ assert(size >= _vlen * sizeof(float));
+ }
+
+public:
+ void pull(::arm_compute::ITensor &tensor) const override
+ {
+ float *base = reinterpret_cast<float *>(_base);
+
+ for (int32_t n = 0; n < _vlen; ++n)
+ {
+ auto from = reinterpret_cast<float *>(tensor.ptr_to_element(::arm_compute::Coordinates{n}));
+ auto into = base + n;
+
+ *into = *from;
+ }
+ }
+
+private:
+ const int32_t _vlen;
+ uint8_t *const _base;
+};
+
+//
+// FeatureSink
+//
+class FeatureSink final : public Sink
+{
+public:
+ FeatureSink(const nnfw::util::feature::Shape &shape, uint8_t *base, const size_t size)
+ : _shape{shape}, _base{base}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void pull(::arm_compute::ITensor &tensor) const override
+ {
+ // TODO: This is just workaround codes, It needs to refactor.
+ if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
+ {
+ const ::internal::nnapi::feature::Reader<float> from{_shape, tensor.buffer(), _size};
+ ::internal::nnapi::feature::View<float> into{_shape, _base, _size};
+
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(bat, ch, row, col);
+ into.at(bat, ch, row, col) = value;
+ };
+ }
+ else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
+ {
+ const ::internal::arm_compute::feature::View<float> from{&tensor};
+ ::internal::nnapi::feature::View<float> into{_shape, _base, _size};
+
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(bat, ch, row, col);
+ into.at(bat, ch, row, col) = value;
+ };
+ }
+ }
+
+private:
+ const nnfw::util::feature::Shape _shape;
+ uint8_t *const _base;
+ const size_t _size;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __INTERNAL_SINK_H__
diff --git a/runtimes/neurun/src/exec/Source.h b/runtimes/neurun/src/exec/Source.h
new file mode 100644
index 000000000..e7c2e80c4
--- /dev/null
+++ b/runtimes/neurun/src/exec/Source.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_EXEC_SOURCE_H__
+#define __NEURUN_EXEC_SOURCE_H__
+
+#include <cassert>
+
+#include <arm_compute/runtime/CL/CLTensor.h>
+
+#include <util/feature/Shape.h>
+#include <util/feature/IndexIterator.h>
+
+#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend
+#include "internal/nnapi/feature/Reader.h"
+#include "internal/nnapi/feature/View.h"
+
+#include "backend/acl_cl/feature/View.h"
+
+namespace neurun
+{
+namespace exec
+{
+
+struct Source
+{
+ virtual ~Source() = default;
+
+ virtual void push(::arm_compute::ITensor &tensor) const = 0;
+};
+
+//
+// VectorSource
+//
+class VectorSource final : public Source
+{
+public:
+ VectorSource(const int32_t vlen, const uint8_t *base, const size_t size)
+ : _vlen{vlen}, _base{base}
+ {
+ (void)size; // Workaround for unused variable in release mode
+ assert(size >= _vlen * sizeof(float));
+ }
+
+public:
+ void push(::arm_compute::ITensor &tensor) const override
+ {
+ auto base = reinterpret_cast<const float *>(_base);
+
+ for (int32_t n = 0; n < _vlen; ++n)
+ {
+ auto from = base + n;
+ auto into = reinterpret_cast<float *>(tensor.ptr_to_element(::arm_compute::Coordinates{n}));
+
+ *into = *from;
+ }
+ }
+
+private:
+ const int32_t _vlen;
+ const uint8_t *const _base;
+};
+
+//
+// FeatureSource
+//
+class FeatureSource final : public Source
+{
+public:
+ FeatureSource(const nnfw::util::feature::Shape &shape, const uint8_t *base, const size_t size)
+ : _shape{shape}, _base{base}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void push(::arm_compute::ITensor &tensor) const override
+ {
+ // TODO: This is just workaround codes, It needs to refactor.
+ if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
+ {
+ const ::internal::nnapi::feature::Reader<float> from{_shape, _base, _size};
+ ::internal::nnapi::feature::View<float> into{_shape, tensor.buffer(), _size};
+
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(bat, ch, row, col);
+ into.at(bat, ch, row, col) = value;
+ };
+ }
+ else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
+ {
+ const ::internal::nnapi::feature::Reader<float> from{_shape, _base, _size};
+ ::internal::arm_compute::feature::View<float> into{&tensor};
+
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(bat, ch, row, col);
+ into.at(bat, ch, row, col) = value;
+ };
+ }
+ }
+
+private:
+ const nnfw::util::feature::Shape _shape;
+ const uint8_t *const _base;
+ const size_t _size;
+};
+
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_SOURCE_H__
diff --git a/runtimes/neurun/src/frontend/compilation.cc b/runtimes/neurun/src/frontend/compilation.cc
new file mode 100644
index 000000000..a135edac5
--- /dev/null
+++ b/runtimes/neurun/src/frontend/compilation.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <NeuralNetworks.h>
+
+#include <new>
+
+#include "frontend/wrapper/model.h"
+#include "frontend/wrapper/compilation.h"
+
+//
+// NNAPI Implementation
+//
+int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model,
+ ANeuralNetworksCompilation **compilation)
+{
+ if ((model == nullptr) || (compilation == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ std::shared_ptr<neurun::graph::Graph> internal;
+
+ model->release(internal);
+
+ *compilation = new (std::nothrow) ANeuralNetworksCompilation(internal);
+ if (*compilation == nullptr)
+ {
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation)
+{
+ if (compilation == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ return compilation->finish();
+}
+
+void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation *compilation)
+{
+ delete compilation;
+}
+
+int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation,
+ int32_t /* preference */)
+{
+ if (compilation == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ // NYi
+ return ANEURALNETWORKS_NO_ERROR;
+}
diff --git a/runtimes/neurun/src/frontend/event.cc b/runtimes/neurun/src/frontend/event.cc
new file mode 100644
index 000000000..cd47cc691
--- /dev/null
+++ b/runtimes/neurun/src/frontend/event.cc
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <NeuralNetworks.h>
+
+#include "frontend/wrapper/event.h"
+
+int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event)
+{
+ if (event == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event) { delete event; }
diff --git a/runtimes/neurun/src/frontend/execution.cc b/runtimes/neurun/src/frontend/execution.cc
new file mode 100644
index 000000000..ff34921b7
--- /dev/null
+++ b/runtimes/neurun/src/frontend/execution.cc
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <NeuralNetworks.h>
+
+#include <new>
+
+#include "frontend/wrapper/compilation.h"
+#include "frontend/wrapper/execution.h"
+#include "frontend/wrapper/event.h"
+
+#include "graph/operand/Index.h"
+
+//
+// NNAPI Implementation
+//
+int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
+ ANeuralNetworksExecution **execution)
+{
+ if ((compilation == nullptr) || (execution == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ std::shared_ptr<const neurun::codegen::Plan> plan;
+
+ compilation->publish(plan);
+
+ *execution = new (std::nothrow) ANeuralNetworksExecution{plan};
+ if (*execution == nullptr)
+ {
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType * /* type */,
+ const void *buffer, size_t length)
+{
+ // Don't check type
+ // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
+ // If the input or output is optional and omitted then it need not have a fully specified tensor
+ // operand type
+ if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ const auto &operands = execution->plan().model().operands();
+
+ // TODO Check type conflicts
+
+ // NOTE The current implemenation assumes that every input is a feature map.
+ // TODO Remove this assumption
+ neurun::graph::operand::IO::Index input_index{index};
+
+ const auto operand_index = execution->plan().model().getInputs().at(input_index);
+
+ if (operands.at(operand_index).shape().rank() == 2)
+ {
+ assert(operands.at(operand_index).shape().dim(0) == 1);
+
+ const auto len = operands.at(operand_index).shape().dim(1);
+
+ execution->source<neurun::exec::VectorSource>(
+ index, len, reinterpret_cast<const uint8_t *>(buffer), length);
+ }
+ else if (operands.at(operand_index).shape().rank() == 4)
+ {
+ const auto &operand_shape = operands.at(operand_index).shape().asFeature();
+
+ execution->source<neurun::exec::FeatureSource>(
+ index, operand_shape, reinterpret_cast<const uint8_t *>(buffer), length);
+ }
+ else
+ {
+ throw std::runtime_error{"Not supported, yet"};
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType * /* type */, void *buffer,
+ size_t length)
+{
+ // Don't check type
+ // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
+ // If the input or output is optional and omitted then it need not have a fully specified tensor
+ // operand type
+ if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ const auto &operands = execution->plan().model().operands();
+
+ // TODO Check type conflicts
+
+ // NOTE The current implemenation assumes that every output is a feature map.
+ // TODO Remove this assumption
+ neurun::graph::operand::IO::Index output_index{index};
+
+ const auto operand_index = execution->plan().model().getOutputs().at(output_index);
+
+ if (operands.at(operand_index).shape().rank() == 2)
+ {
+ assert(operands.at(operand_index).shape().dim(0) == 1);
+
+ const auto len = operands.at(operand_index).shape().dim(1);
+
+ execution->sink<neurun::exec::VectorSink>(index, len, reinterpret_cast<uint8_t *>(buffer),
+ length);
+ }
+ else if (operands.at(operand_index).shape().rank() == 4)
+ {
+ const auto &operand_shape = operands.at(operand_index).shape().asFeature();
+
+ execution->sink<neurun::exec::FeatureSink>(index, operand_shape,
+ reinterpret_cast<uint8_t *>(buffer), length);
+ }
+ else
+ {
+ throw std::runtime_error{"Not supported, yet"};
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
+ ANeuralNetworksEvent **event)
+{
+ if ((execution == nullptr) || (event == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ // TODO: Handle event
+ *event = new (std::nothrow) ANeuralNetworksEvent{};
+ if (*event == nullptr)
+ {
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+
+ const auto &plan = execution->plan();
+ const auto &model = plan.model();
+
+ // Set input(s)
+ for (uint32_t n = 0; n < model.getInputs().size(); ++n)
+ {
+ auto setter = [&](::arm_compute::ITensor &tensor) { execution->source(n).push(tensor); };
+
+ neurun::graph::operand::IO::Index input_index{n};
+
+ ::neurun::graph::operand::Index index{model.getInputs().at(input_index)};
+ auto objects = plan.operands().at(index);
+
+ for (auto object : objects)
+ {
+ object->access(setter);
+ }
+ }
+
+ const auto &operations = execution->plan().operations();
+
+ for (uint32_t n = 0; n < operations.size(); ++n)
+ {
+ operations.at(n).run();
+ }
+
+ // Get output(s)
+ for (uint32_t n = 0; n < model.getOutputs().size(); ++n)
+ {
+ auto getter = [&](::arm_compute::ITensor &tensor) { execution->sink(n).pull(tensor); };
+
+ neurun::graph::operand::IO::Index output_index{n};
+
+ ::neurun::graph::operand::Index index{model.getOutputs().at(output_index)};
+ auto objects = plan.operands().at(index);
+
+ for (auto object : objects)
+ {
+ object->access(getter);
+ }
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksExecution_free(ANeuralNetworksExecution * /* execution */) {}
+
+int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution,
+ int32_t /* index */,
+ const ANeuralNetworksOperandType * /* type */,
+ const ANeuralNetworksMemory *memory,
+ size_t /* offset */, size_t /* length */)
+{
+ if ((execution == nullptr) || (memory == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ // NYI
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution,
+ int32_t /* index */,
+ const ANeuralNetworksOperandType * /* type */,
+ const ANeuralNetworksMemory *memory,
+ size_t /* offset */, size_t /* length */)
+{
+ if ((execution == nullptr) || (memory == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ // NYI
+ return ANEURALNETWORKS_NO_ERROR;
+}
diff --git a/runtimes/neurun/src/frontend/memory.cc b/runtimes/neurun/src/frontend/memory.cc
new file mode 100644
index 000000000..cc891feef
--- /dev/null
+++ b/runtimes/neurun/src/frontend/memory.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <NeuralNetworks.h>
+#include <sys/mman.h>
+#include <new>
+#include <memory>
+
+#include "nnfw/std/memory.h"
+#include "frontend/wrapper/memory.h"
+
+int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
+ ANeuralNetworksMemory **memory)
+{
+ if (memory == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ // Use unique pointer to avoid memory leak
+ std::unique_ptr<ANeuralNetworksMemory> memory_ptr =
+ nnfw::make_unique<ANeuralNetworksMemory>(size, protect, fd, offset);
+ if (memory_ptr == nullptr)
+ {
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+ *memory = memory_ptr.release();
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) { delete memory; }
diff --git a/runtimes/neurun/src/frontend/model.cc b/runtimes/neurun/src/frontend/model.cc
new file mode 100644
index 000000000..28a9b2515
--- /dev/null
+++ b/runtimes/neurun/src/frontend/model.cc
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <NeuralNetworks.h>
+#include <NeuralNetworksEx.h>
+
+#include <cassert>
+#include <stdexcept>
+#include <new>
+
+#include "nnfw/std/memory.h"
+
+#include "graph/Graph.h"
+#include "frontend/wrapper/model.h"
+#include "frontend/wrapper/memory.h"
+#include "graph/operation/AvgPool2D.h"
+#include "graph/operation/Concat.h"
+#include "graph/operation/Conv2D.h"
+#include "graph/operation/FullyConnected.h"
+#include "graph/operation/MaxPool2D.h"
+#include "graph/operation/Reshape.h"
+#include "graph/operation/Softmax.h"
+
+int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
+{
+ if (model == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ *model = new (std::nothrow) ANeuralNetworksModel{};
+ if (*model == nullptr)
+ {
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; }
+
+int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
+ const ANeuralNetworksOperandType *type)
+{
+ if ((model == nullptr) || (type == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ if (model->isFinished())
+ {
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+
+ // scale and zeroPoint should be zero for scalars and non-fixed point tensors
+ // Quantized:
+ // scale: a 32 bit floating point value greater than zero
+ // zeroPoint: a 32 bit integer, in range [0, 255]
+ if (type->type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM)
+ {
+ if (!(type->scale > 0.0f))
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ if ((type->zeroPoint < 0) || (type->zeroPoint > 255))
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ }
+ else if ((type->scale != 0.0f) || (type->zeroPoint != 0))
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ // dimensionCount should be zero for scalars
+ if ((type->dimensionCount != 0) &&
+ ((type->type == ANEURALNETWORKS_FLOAT32) || (type->type == ANEURALNETWORKS_INT32) ||
+ (type->type == ANEURALNETWORKS_UINT32)))
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ ::neurun::graph::operand::Shape shape(type->dimensionCount);
+ ::neurun::graph::operand::TypeInfo typeInfo((OperandCode)(type->type), type->scale,
+ type->zeroPoint);
+
+ for (uint32_t axis = 0; axis < type->dimensionCount; ++axis)
+ {
+ shape.dim(axis) = type->dimensions[axis];
+ }
+
+ model->deref().addOperand(shape, typeInfo);
+
+ // NOTE We do NOT allocate CLTensor here as we do not how to interpret this one.
+ // TensorFlow Lite may interpret a rank-4 tensor either as a feature map (with batch) or
+ // a convolution kernel.
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
+ const void *buffer, size_t length)
+{
+ if ((model == nullptr) || ((buffer == nullptr) && (length != 0)))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ if (model->isFinished())
+ {
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+
+ // Negative index value is not allowed
+ if (index < 0)
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ const neurun::graph::operand::Index ind{static_cast<uint32_t>(index)};
+
+ if (!model->deref().operands().exist(ind))
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ auto &obj = model->deref().operands().at(ind);
+ if (obj.operandSize() != length)
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ if (!obj.setAsConstant())
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ using ::neurun::graph::operand::CachedData;
+
+ model->deref().setOperandValue(
+ ind, nnfw::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length));
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index,
+ const ANeuralNetworksMemory *memory,
+ size_t offset, size_t length)
+{
+ if ((model == nullptr) || (memory == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ if (model->isFinished())
+ {
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+
+ // Negative index value is not allowed
+ if (index < 0)
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ const neurun::graph::operand::Index ind{static_cast<uint32_t>(index)};
+
+ if (!model->deref().operands().exist(ind))
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ auto &obj = model->deref().operands().at(ind);
+ if ((obj.operandSize() != length) || (memory->size() < (offset + length)))
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ if (!obj.setAsConstant())
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ using ::neurun::graph::operand::ExternalData;
+
+ model->deref().setOperandValue(
+ ind, nnfw::make_unique<ExternalData>(
+ reinterpret_cast<const uint8_t *>(memory->base() + offset), length));
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
+ ANeuralNetworksOperationType type, uint32_t inputCount,
+ const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ if (model->isFinished())
+ {
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+
+ for (uint32_t i = 0; i < outputCount; i++)
+ {
+ const ::neurun::graph::operand::Index ind{outputs[i]};
+ auto &obj = model->deref().operands().at(ind);
+
+ if (!obj.setAsOperationOutput())
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ }
+
+ auto &graph = model->deref();
+
+ auto node_param =
+ neurun::graph::operation::Node::InitParam{inputCount, inputs, outputCount, outputs};
+
+ switch (type)
+ {
+ case ANEURALNETWORKS_CONV_2D:
+ {
+ // inputCount is either 7 or 10 acccording to NN API specification.
+ // - Padding is implicit when inputCount is 7
+ // - Padding is explicit when inputCount is 10
+ assert(inputCount == 7 || inputCount == 10);
+ assert(outputCount == 1);
+
+ if (inputCount == 7)
+ {
+ using GraphNode = neurun::graph::operation::Conv2D::Implicit::Node;
+
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+ }
+ else
+ {
+ throw std::runtime_error{"Explicit padding in Conv2D is not supported, yet"};
+ }
+
+ break;
+ }
+ case ANEURALNETWORKS_MAX_POOL_2D:
+ {
+ // inputCount is either 7 or 10 acccording to NN API specification.
+ // - Padding is implicit when inputCount is 7
+ // - Padding is explicit when inputCount is 10
+ assert(inputCount == 7 || inputCount == 10);
+ assert(outputCount == 1);
+
+ if (inputCount == 7)
+ {
+ using GraphNode = neurun::graph::operation::MaxPool2D::Implicit::Node;
+
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+ }
+ else
+ {
+ throw std::runtime_error{"Explicit padding in MaxPool2D is not supported, yet"};
+ }
+
+ break;
+ }
+ case ANEURALNETWORKS_AVERAGE_POOL_2D:
+ {
+ // inputCount is either 7 or 10 acccording to NN API specification.
+ // - Padding is implicit when inputCount is 7
+ // - Padding is explicit when inputCount is 10
+ assert(inputCount == 7 || inputCount == 10);
+ assert(outputCount == 1);
+
+ if (inputCount == 7)
+ {
+ using GraphNode = neurun::graph::operation::AvgPool2D::Implicit::Node;
+
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+ }
+ else
+ {
+ throw std::runtime_error{"Explicit padding in AvgPool2D is not supported, yet"};
+ }
+
+ break;
+ }
+ case ANEURALNETWORKS_CONCATENATION:
+ {
+ using GraphNode = neurun::graph::operation::Concat::Node;
+
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+
+ break;
+ }
+ case ANEURALNETWORKS_RESHAPE:
+ {
+ using GraphNode = neurun::graph::operation::Reshape::Node;
+
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+
+ break;
+ }
+ case ANEURALNETWORKS_FULLY_CONNECTED:
+ {
+ using GraphNode = neurun::graph::operation::FullyConnected::Node;
+
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+
+ break;
+ }
+ case ANEURALNETWORKS_SOFTMAX:
+ {
+ using GraphNode = neurun::graph::operation::Softmax::Node;
+
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+
+ break;
+ }
+ default:
+ throw std::runtime_error{"Not supported operation"};
+ };
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
+ ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
+ const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ if (model->isFinished())
+ {
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+
+ for (uint32_t i = 0; i < outputCount; i++)
+ {
+ const ::neurun::graph::operand::Index ind{outputs[i]};
+ auto &obj = model->deref().operands().at(ind);
+
+ if (!obj.setAsOperationOutput())
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ }
+
+ // Workaround: to avoid compile error by unused-parameter, use inputCount
+ if (inputCount == 0)
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+
+ switch (type)
+ {
+ default:
+ throw std::runtime_error{"Not supported operation"};
+ }
+}
+
+int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount,
+ const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ if (model->isFinished())
+ {
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+
+ // NOTE ::neurun::graph::operand::Index uses int as its underlying type as various NNAPI
+ // functions such as ANeuralNetworksModel_setOperandValue use int to represent operand index
+ //
+ // ANeuralNetworksModel_identifyInputsAndOutputs, however, uses uint32_t to represent operand
+ // index.
+ //
+ // Below, static_cast<int>(...) is introduced to eliminate compiler warning.
+ for (uint32_t n = 0; n < inputCount; ++n)
+ {
+ const neurun::graph::operand::Index ind{static_cast<uint32_t>(inputs[n])};
+ model->deref().addInput(ind);
+
+ auto &obj = model->deref().operands().at(ind);
+ if (!obj.setAsModelInput())
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ }
+
+ for (uint32_t n = 0; n < outputCount; ++n)
+ {
+ const neurun::graph::operand::Index ind{static_cast<uint32_t>(outputs[n])};
+ model->deref().addOutput(ind);
+
+ auto &obj = model->deref().operands().at(ind);
+ // Model output cannot become model input
+ if (obj.isModelInput())
+ {
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_finish(ANeuralNetworksModel *model)
+{
+ if (model == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ return model->finish();
+}
diff --git a/runtimes/neurun/src/frontend/wrapper/compilation.cc b/runtimes/neurun/src/frontend/wrapper/compilation.cc
new file mode 100644
index 000000000..4ff33faa5
--- /dev/null
+++ b/runtimes/neurun/src/frontend/wrapper/compilation.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <NeuralNetworks.h>
+
+#include <algorithm>
+
+#include <arm_compute/core/CL/ICLTensor.h>
+
+#include <arm_compute/runtime/IFunction.h>
+#include <arm_compute/runtime/CL/CLScheduler.h>
+
+#include "internal/Convert.h"
+#include "backend/acl_cl/kernel/View.h"
+#include "backend/acl_cl/TensorBuilder.h"
+#include "internal/nnapi/kernel/Reader.h"
+#include "internal/Padding.h"
+#include "backend/IInitializerGenerator.h"
+#include "backend/IStageGenerator.h"
+
+#include "compilation.h"
+#include "model.h"
+#include "logging.h"
+
+#include "graph/dumper/Dumper.h"
+#include "codegen/IPlanBuilder.h"
+#include "codegen/Planner.h"
+#include "codegen/PlanBuilder.h"
+
+#include "linear/Linear.h"
+
+int ANeuralNetworksCompilation::finish()
+{
+ auto &plan = this->plan();
+ const auto &operands = plan.model().operands();
+
+ plan.model().lower();
+ auto linear = plan.model().linearize();
+
+ // Dump ops
+ linear->accept(neurun::graph::dumper::Dumper{});
+
+ neurun::codegen::PlanBuilder plan_builder{plan};
+
+ auto tensor_builders = linear->markTensors();
+
+ linear->accept(neurun::codegen::Planner{operands, plan_builder});
+
+ // TODO Add optimization passes
+ plan_builder.finalize(tensor_builders);
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
diff --git a/runtimes/neurun/src/frontend/wrapper/compilation.h b/runtimes/neurun/src/frontend/wrapper/compilation.h
new file mode 100644
index 000000000..20d5a3d98
--- /dev/null
+++ b/runtimes/neurun/src/frontend/wrapper/compilation.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __COMPILATION_H__
+#define __COMPILATION_H__
+
+#include "codegen/Plan.h"
+#include "graph/Graph.h"
+
+struct ANeuralNetworksCompilation
+{
+public:
+ ANeuralNetworksCompilation(const std::shared_ptr<neurun::graph::Graph> &model)
+ : _plan{new neurun::codegen::Plan{model}}
+ {
+ // DO NOTHING
+ }
+
+public:
+ neurun::codegen::Plan &plan(void) { return *_plan; }
+
+public:
+ void publish(std::shared_ptr<const neurun::codegen::Plan> &plan) { plan = _plan; }
+ int finish();
+
+private:
+ std::shared_ptr<neurun::codegen::Plan> _plan;
+};
+
+#endif
diff --git a/runtimes/neurun/src/frontend/wrapper/event.h b/runtimes/neurun/src/frontend/wrapper/event.h
new file mode 100644
index 000000000..d144b7c07
--- /dev/null
+++ b/runtimes/neurun/src/frontend/wrapper/event.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __EVENT_H__
+#define __EVENT_H__
+
+struct ANeuralNetworksEvent
+{
+};
+
+#endif
diff --git a/runtimes/neurun/src/frontend/wrapper/execution.h b/runtimes/neurun/src/frontend/wrapper/execution.h
new file mode 100644
index 000000000..374201eb2
--- /dev/null
+++ b/runtimes/neurun/src/frontend/wrapper/execution.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __EXECUTION_H__
+#define __EXECUTION_H__
+
+#include "codegen/Plan.h"
+#include "exec/Source.h"
+#include "exec/Sink.h"
+
+struct ANeuralNetworksExecution
+{
+public:
+ ANeuralNetworksExecution(const std::shared_ptr<const neurun::codegen::Plan> &plan) : _plan{plan}
+ {
+ _sources.resize(_plan->model().getInputs().size());
+ _sinks.resize(_plan->model().getOutputs().size());
+ }
+
+public:
+ const neurun::codegen::Plan &plan(void) const { return *_plan; }
+
+private:
+ std::shared_ptr<const neurun::codegen::Plan> _plan;
+
+public:
+ // TODO Use InputIndex instead of int
+ void source(int n, std::unique_ptr<neurun::exec::Source> &&source)
+ {
+ _sources.at(n) = std::move(source);
+ }
+ template <typename T, typename... Args> void source(int n, Args &&... args)
+ {
+ source(n, std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
+ }
+
+public:
+ const neurun::exec::Source &source(int n) const { return *(_sources.at(n)); }
+
+public:
+ // TODO Use OutputIndex instead of int
+ void sink(int n, std::unique_ptr<neurun::exec::Sink> &&sink) { _sinks.at(n) = std::move(sink); }
+ template <typename T, typename... Args> void sink(int n, Args &&... args)
+ {
+ sink(n, std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
+ }
+
+public:
+ const neurun::exec::Sink &sink(int n) const { return *(_sinks.at(n)); }
+
+private:
+ std::vector<std::unique_ptr<neurun::exec::Source>> _sources;
+ std::vector<std::unique_ptr<neurun::exec::Sink>> _sinks;
+};
+
+#endif
diff --git a/runtimes/neurun/src/frontend/wrapper/memory.cc b/runtimes/neurun/src/frontend/wrapper/memory.cc
new file mode 100644
index 000000000..456015123
--- /dev/null
+++ b/runtimes/neurun/src/frontend/wrapper/memory.cc
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <NeuralNetworks.h>
+#include <sys/mman.h>
+
+#include "memory.h"
+
+//
+// ANeuralNetworksMemory
+//
+ANeuralNetworksMemory::ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset)
+{
+ _base = reinterpret_cast<uint8_t *>(mmap(nullptr, size, protect, MAP_PRIVATE, fd, offset));
+ _size = size;
+}
+
+ANeuralNetworksMemory::~ANeuralNetworksMemory() { munmap(reinterpret_cast<void *>(_base), _size); }
diff --git a/runtimes/neurun/src/frontend/wrapper/memory.h b/runtimes/neurun/src/frontend/wrapper/memory.h
new file mode 100644
index 000000000..a430bcf49
--- /dev/null
+++ b/runtimes/neurun/src/frontend/wrapper/memory.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEMORY_H__
+#define __MEMORY_H__
+
+#include <cstdint>
+
+struct ANeuralNetworksMemory
+{
+public:
+ ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset);
+ ~ANeuralNetworksMemory();
+
+public:
+ size_t size(void) const { return _size; }
+ uint8_t *base(void) { return _base; }
+ const uint8_t *base(void) const { return _base; }
+
+private:
+ size_t _size;
+ uint8_t *_base;
+};
+
+#endif // __MEMORY_H__
diff --git a/runtimes/neurun/src/frontend/wrapper/model.cc b/runtimes/neurun/src/frontend/wrapper/model.cc
new file mode 100644
index 000000000..c7ccbc60a
--- /dev/null
+++ b/runtimes/neurun/src/frontend/wrapper/model.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model.h"
+
+#include "graph/Graph.h"
+
+//
+// ANeuralNetworksModel
+//
+ANeuralNetworksModel::ANeuralNetworksModel() : _model{new neurun::graph::Graph}
+{
+ // DO NOTHING
+}
+
+ResultCode ANeuralNetworksModel::finish()
+{
+ // This function must only be called once for a given model
+ if (isFinished())
+ {
+ return ANEURALNETWORKS_BAD_STATE;
+ }
+
+ _model->finishBuilding();
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
diff --git a/runtimes/neurun/src/frontend/wrapper/model.h b/runtimes/neurun/src/frontend/wrapper/model.h
new file mode 100644
index 000000000..3c7b027dc
--- /dev/null
+++ b/runtimes/neurun/src/frontend/wrapper/model.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MODEL_H__
+#define __MODEL_H__
+
+#include <NeuralNetworks.h>
+
+#include "graph/Graph.h"
+
+struct ANeuralNetworksModel
+{
+public:
+ ANeuralNetworksModel();
+
+public:
+ neurun::graph::Graph &deref(void) { return *_model; }
+ ResultCode finish();
+ bool isFinished() { return !_model->isBuildingPhase(); }
+
+public:
+ void release(std::shared_ptr<neurun::graph::Graph> &model) { model = _model; }
+
+private:
+ std::shared_ptr<neurun::graph::Graph> _model;
+};
+
+#endif // __MODEL_H__
diff --git a/runtimes/neurun/src/graph/Graph.cc b/runtimes/neurun/src/graph/Graph.cc
new file mode 100644
index 000000000..07194ff7e
--- /dev/null
+++ b/runtimes/neurun/src/graph/Graph.cc
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Graph.h"
+
+#include <algorithm>
+#include <bitset>
+
+#include "logging.h"
+#include "verifier/IVerifier.h"
+#include "nnfw/std/memory.h"
+#include "linear/Linear.h"
+#include "operation/LowerInfo.h"
+#include "operand/LowerInfo.h"
+#include "operand/Shape4DConvert.h"
+#include "codegen/BackendResolver.h"
+#include "backend/IBackendConfig.h"
+
+namespace neurun
+{
+namespace graph
+{
+
+operand::Index Graph::addOperand(const operand::Shape &shape, const operand::TypeInfo &type)
+{
+ return _operands.append(shape, type);
+}
+
+operation::Index Graph::addOperation(std::unique_ptr<operation::Node> &&node)
+{
+ assert(_phase == Phase::BUILDING);
+ return _operations.append(std::move(node));
+}
+
+// TODO : If operand's use-def information is introduced,
+// Following API and implements would be refactored.
+/**
+ * @brief Insert operation into between an operand and next operation.
+ *
+ * @param prev_operand_index is an previous operand index of insertion.
+ * @param next_operation_index is an next operation index of insertion.
+ * @param node is an operation::Node to insert.
+ *
+ * @return operation::Index
+ */
+operation::Index Graph::insertOperation(const operand::Index &prev_operand_index,
+ const operation::Index &next_operation_index,
+ std::unique_ptr<operation::Node> &&node)
+{
+ assert(_phase != Phase::BUILDING);
+ auto &next_operation = _operations.at(next_operation_index);
+ auto next_input_indexes = next_operation.getInputs();
+
+ assert(next_input_indexes.contains(prev_operand_index));
+ assert(node->getInputs().size() == 0); // node to be inserted must not have any inputs
+
+ node->setInputs({prev_operand_index});
+
+ // For multi input operation (ex. concat)
+ operand::IndexSet index_set;
+ auto cur_output_indexes = node->getOutputs();
+ assert(cur_output_indexes.size() == 1); // Assume output of inserted node size always 1
+ auto cur_output_index = cur_output_indexes.at(operand::IO::Index{0});
+ // TODO : If the API for setting input one by one is introduced, it would be changed to simple.
+ for (auto next_input_index : next_input_indexes)
+ {
+ if (prev_operand_index == next_input_index)
+ {
+ index_set.append(cur_output_index);
+ }
+ else
+ {
+ index_set.append(next_input_index);
+ }
+ }
+
+ next_operation.setInputs(index_set);
+
+ operation::Index node_index = _operations.append(std::move(node));
+
+ // Update Use/Def info
+ {
+ _operands.at(prev_operand_index).removeUse(next_operation_index);
+ _operands.at(cur_output_index).appendUse(next_operation_index);
+
+ _operands.at(prev_operand_index).appendUse(node_index);
+
+ auto node_output_indexes = _operations.at(node_index).getOutputs();
+ assert(node_output_indexes.size() == 1);
+ auto node_output_index = node_output_indexes.at(operand::IO::Index{0});
+ _operands.at(node_output_index).appendDef(node_index);
+ }
+
+ return node_index;
+}
+
+void Graph::setOperandValue(const operand::Index &ind, std::unique_ptr<operand::Data> &&data)
+{
+ assert(_phase == Phase::BUILDING);
+ assert(_operands.exist(ind));
+ _operands.at(ind).data(std::move(data));
+}
+
+void Graph::addInput(const operand::Index &ind)
+{
+ assert(_phase == Phase::BUILDING);
+ _inputs.append(ind);
+}
+
+void Graph::addOutput(const operand::Index &ind)
+{
+ assert(_phase == Phase::BUILDING);
+ _outputs.append(ind);
+}
+
+void Graph::finishBuilding(void)
+{
+ assert(_phase == Phase::BUILDING);
+ _phase = Phase::MODEL;
+
+ // Initialize operand use-def
+ initializeUseDef();
+
+ // Call graph verifications for the MODEL phase
+ {
+ verifier::DAGChecker dag_checker;
+ dag_checker.verify(*this);
+ }
+}
+
+void Graph::lower(void)
+{
+ assert(_phase == Phase::MODEL);
+
+ // Lower
+ {
+ // operand::LowerInfo holder
+ std::unordered_map<operand::Index, std::unique_ptr<operand::LowerInfo>> operands_lower_info;
+
+ _operands.iterate([&](const operand::Index &index, const operand::Object &object) {
+ operands_lower_info[index] =
+ nnfw::make_unique<operand::LowerInfo>(operand::asShape4D(object.shape()));
+ });
+
+ auto _backend_resolver = codegen::BackendResolver(_operands);
+
+ _operations.iterate([&](const operation::Index &, operation::Node &node) {
+ auto backend = _backend_resolver.getBackend(typeid(node));
+
+ // Operation LowerInfo
+ node.lower_info(nnfw::make_unique<operation::LowerInfo>(backend));
+
+ // LowerInfo for in/output operands
+ for (auto operand : node.getInputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ lower_info->addUseLayout(backend.config()->getOperandLayout());
+ }
+ for (auto operand : node.getOutputs())
+ {
+ auto &&lower_info = operands_lower_info.at(operand);
+ lower_info->addDefLayout(backend.config()->getOperandLayout());
+ }
+ });
+
+ // Set LowerInfo for each operand from the operand::LowerInfo holder
+ _operands.iterate([&](const operand::Index &index, operand::Object &object) {
+ object.lower_info(std::move(operands_lower_info[index]));
+
+ // Dump operand LowerInfo
+ {
+ auto layouts_to_string = [](const operand::LayoutSet &layouts) {
+ std::string str;
+ for (auto layout : layouts)
+ {
+ const char *name = "";
+ if (layout == operand::Layout::NHWC)
+ name = "NHWC";
+ if (layout == operand::Layout::NCHW)
+ name = "NCHW";
+ str += name;
+ str += " ";
+ }
+ return "{ " + str + "}";
+ };
+
+ const auto &lower_info = object.lower_info();
+ const auto &shape = lower_info->shape();
+ std::string def_layouts = layouts_to_string(lower_info->def_layouts());
+ std::string use_layouts = layouts_to_string(lower_info->use_layouts());
+ VERBOSE(Lower) << "* Operand #" << index.value() << " LowerInfo" << std::endl;
+ VERBOSE(Lower) << " - 4D Shape (NHWC) : { " << shape.n() << " " << shape.h() << " "
+ << shape.w() << " " << shape.c() << " "
+ << "}" << std::endl;
+ VERBOSE(Lower) << " - Def Layout : " << def_layouts << std::endl;
+ VERBOSE(Lower) << " - Use Layout : " << use_layouts << std::endl;
+ }
+ });
+ }
+
+ // Graph verifications for the LOWERED phase
+ {
+ verifier::DAGChecker dag_checker;
+ dag_checker.verify(*this);
+ }
+
+ _phase = Phase::LOWERED;
+}
+
+std::unique_ptr<linear::Linear> Graph::linearize(void)
+{
+ assert(_phase == Phase::LOWERED);
+
+ auto linear = nnfw::make_unique<linear::Linear>(*this);
+
+ // TODO Move the operations and operands to linear object
+
+ _phase = Phase::LINEARIZED;
+
+ return std::move(linear);
+}
+
+void Graph::initializeUseDef()
+{
+ operations().iterate([&](const operation::Index &index, const operation::Node &node) -> void {
+ auto outputs = node.getOutputs();
+ for (auto output : outputs)
+ {
+ operands().at(output).appendDef(index);
+ }
+
+ auto inputs = node.getInputs();
+ for (auto input : inputs)
+ {
+ operands().at(input).appendUse(index);
+ }
+ });
+}
+
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+
+// Explicit instantiations to have implementation in the source file.
+
+template class Graph::DefaultIterator<true>;
+template class Graph::DefaultIterator<false>;
+
+template class Graph::PostDfsIterator<true>;
+template class Graph::PostDfsIterator<false>;
+
+//
+// Graph::DefaultIterator
+//
+
+template <bool is_const>
+void Graph::DefaultIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
+{
+ graph._operations.iterate([&](const operation::Index &, NodeRef node) -> void { fn(node); });
+}
+
+//
+// Graph::PostDfsIterator
+//
+
+template <bool is_const>
+void Graph::PostDfsIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
+{
+ assert(!graph.isBuildingPhase()); // Restrict iteration condition
+
+ std::vector<bool> visited(graph._operations.size(), false);
+
+ std::function<void(const operation::Index &, NodeRef)> dfs_recursive =
+ [&](const operation::Index &index, NodeRef node) -> void {
+ if (visited[index.asInt()])
+ return;
+ visited[index.asInt()] = true;
+
+ for (auto output : node.getOutputs())
+ {
+ const auto &operand = graph._operands.at(output);
+ for (const auto &use : operand.getUses().list())
+ {
+ dfs_recursive(use, graph._operations.at(use));
+ }
+ }
+
+ fn(node);
+ };
+
+ graph._operations.iterate(dfs_recursive);
+
+ // All of the operations(nodes) must have been visited.
+ assert(std::all_of(visited.begin(), visited.end(), [](bool v) { return v; }));
+}
+
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/Graph.h b/runtimes/neurun/src/graph/Graph.h
new file mode 100644
index 000000000..dd1489a93
--- /dev/null
+++ b/runtimes/neurun/src/graph/Graph.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_GRAPH_H__
+#define __NEURUN_GRAPH_GRAPH_H__
+
+#include <functional>
+
+#include "graph/operation/Node.h"
+#include "graph/operation/Set.h"
+#include "graph/operand/IndexSet.h"
+#include "graph/operand/Set.h"
+
+namespace neurun
+{
+namespace linear
+{
+class Linear;
+} // namespace linear
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+
+class Graph
+{
+private:
+ enum class Phase
+ {
+ BUILDING,
+ MODEL,
+ LOWERED,
+ LINEARIZED // Everything is moved to Linear object so this Graph object is no longer effective
+ };
+
+public:
+ template <bool is_const> class Iterator
+ {
+ public:
+ using GraphRef = typename std::conditional<is_const, const Graph &, Graph &>::type;
+ using NodeRef =
+ typename std::conditional<is_const, const operation::Node &, operation::Node &>::type;
+ using IterFn = std::function<void(NodeRef)>;
+
+ public:
+ virtual ~Iterator() = default;
+ virtual void iterate(GraphRef graph, const IterFn &fn) const = 0;
+ };
+
+ template <bool is_const = false> class DefaultIterator final : public Iterator<is_const>
+ {
+ public:
+ using GraphRef = typename Iterator<is_const>::GraphRef;
+ using NodeRef = typename Iterator<is_const>::NodeRef;
+ using IterFn = typename Iterator<is_const>::IterFn;
+
+ public:
+ void iterate(GraphRef graph, const IterFn &fn) const;
+ };
+ using DefaultConstIterator = DefaultIterator<true>;
+
+ template <bool is_const = false> class PostDfsIterator final : public Iterator<is_const>
+ {
+ public:
+ using GraphRef = typename Iterator<is_const>::GraphRef;
+ using NodeRef = typename Iterator<is_const>::NodeRef;
+ using IterFn = typename Iterator<is_const>::IterFn;
+
+ public:
+ void iterate(GraphRef graph, const IterFn &fn) const;
+ };
+ using PostDfsConstIterator = PostDfsIterator<true>;
+
+public:
+ Graph(void) = default;
+
+ // Graph Building
+public:
+ operand::Index addOperand(const operand::Shape &shape, const operand::TypeInfo &type);
+ operation::Index addOperation(std::unique_ptr<operation::Node> &&node);
+ operation::Index insertOperation(const operand::Index &prev_operand_index,
+ const operation::Index &next_operation_index,
+ std::unique_ptr<operation::Node> &&node);
+ void setOperandValue(const operand::Index &ind, std::unique_ptr<operand::Data> &&data);
+ void addInput(const operand::Index &ind);
+ void addOutput(const operand::Index &ind);
+ void finishBuilding(void);
+ void lower(void);
+ std::unique_ptr<linear::Linear> linearize(void);
+ bool isBuildingPhase(void) const { return _phase == Phase::BUILDING; }
+
+private:
+ void initializeUseDef();
+
+ // Accessors
+public:
+ const operand::IndexSet &getInputs() const { return _inputs; }
+ const operand::IndexSet &getOutputs() const { return _outputs; }
+ const operand::Set &operands() const { return _operands; }
+ operand::Set &operands() { return _operands; } // TODO Remove this non-const accessor
+ const operation::Set &operations() const { return _operations; }
+
+private:
+ Phase _phase{Phase::BUILDING};
+ operation::Set _operations;
+ operand::Set _operands;
+ operand::IndexSet _inputs;
+ operand::IndexSet _outputs;
+};
+
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_GRAPH_H__
diff --git a/runtimes/neurun/src/graph/Index.h b/runtimes/neurun/src/graph/Index.h
new file mode 100644
index 000000000..864aaffd0
--- /dev/null
+++ b/runtimes/neurun/src/graph/Index.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_INDEX_H__
+#define __NEURUN_GRAPH_INDEX_H__
+
+#include <functional>
+#include <stdint.h>
+
+namespace neurun
+{
+namespace graph
+{
+
+template <typename T, typename DummyTag> class Index
+{
+public:
+ explicit Index(T o) : _index{o} {}
+ explicit Index(int32_t o) : _index{static_cast<T>(o)} {} // For legacy code compatibility
+ Index(const Index &o) : _index{o._index} {}
+
+ Index &operator=(T o)
+ {
+ _index = o;
+ return *this;
+ }
+
+ Index &operator=(const T &o)
+ {
+ _index = o._index;
+ return *this;
+ }
+
+ bool operator==(T o) const { return _index == o; }
+ bool operator==(const Index &o) const { return _index == o._index; }
+ bool operator!=(T o) const { return !(*this == o); }
+ bool operator!=(const Index &o) const { return !(*this == o); }
+
+ T value() const { return _index; }
+ int32_t asInt() const { return static_cast<int32_t>(_index); } // For legacy code compatibility
+
+private:
+ T _index;
+};
+
+} // namespace graph
+} // namespace neurun
+
+namespace std
+{
+
+template <typename T, typename Tag> struct hash<::neurun::graph::Index<T, Tag>>
+{
+ size_t operator()(const ::neurun::graph::Index<T, Tag> &index) const noexcept
+ {
+ return hash<T>()(index.value());
+ }
+};
+
+} // namespace std
+
+#endif // __NEURUN_GRAPH_INDEX_H__
diff --git a/runtimes/neurun/src/graph/dumper/Dumper.cc b/runtimes/neurun/src/graph/dumper/Dumper.cc
new file mode 100644
index 000000000..3788317ce
--- /dev/null
+++ b/runtimes/neurun/src/graph/dumper/Dumper.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dumper.h"
+
+#include <string>
+
+#include "logging.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace dumper
+{
+
+using namespace neurun::graph::operation;
+
+void Dumper::visit(const Conv2D::Implicit::Node &node)
+{
+ VERBOSE(LIR) << "* Conv2D(Implicit)" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(0).value() << ") Kernel("
+ << node.getInputs().at(1).value() << ") Bias(" << node.getInputs().at(2).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const MaxPool2D::Implicit::Node &node)
+{
+ VERBOSE(LIR) << "* MaxPool2D(Implicit)" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(0).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const AvgPool2D::Implicit::Node &node)
+{
+ VERBOSE(LIR) << "* AvgPool2D(Implicit)" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(0).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Concat::Node &node)
+{
+ VERBOSE(LIR) << "* Concat" << std::endl;
+ std::string inputs;
+ for (auto i : node.getInputs())
+ {
+ inputs += std::to_string(i.value()) + ",";
+ }
+ VERBOSE(LIR) << " - Inputs : IFM(" << inputs << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const FullyConnected::Node &node)
+{
+ VERBOSE(LIR) << "* FullyConnected" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(0).value() << ") Weight("
+ << node.getInputs().at(1).value() << ") Bias(" << node.getInputs().at(2).value()
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Reshape::Node &node)
+{
+ VERBOSE(LIR) << "* Reshape" << std::endl;
+ // TODO The shape index should be "node.getInputs().at(1).value()" but not valid for now
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(0).value() << ") Shape("
+ << "?"
+ << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const Softmax::Node &node)
+{
+ VERBOSE(LIR) << "* Softmax" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(0).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+void Dumper::visit(const NOP::Node &node)
+{
+ VERBOSE(LIR) << "* NOP" << std::endl;
+ std::string inputs, outputs;
+ for (auto i : node.getInputs())
+ {
+ inputs += std::to_string(i.value()) + ",";
+ }
+ VERBOSE(LIR) << " - Inputs : IFM(" << inputs << ")" << std::endl;
+ for (auto i : node.getOutputs())
+ {
+ outputs += std::to_string(i.value()) + ",";
+ }
+ VERBOSE(LIR) << " - Outputs : OFM(" << outputs << ")" << std::endl;
+}
+
+void Dumper::visit(const Permute::Node &node)
+{
+ VERBOSE(LIR) << "* Permute" << std::endl;
+ VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(0).value() << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : OFM(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
+} // namespace dumper
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/dumper/Dumper.h b/runtimes/neurun/src/graph/dumper/Dumper.h
new file mode 100644
index 000000000..dee490cbd
--- /dev/null
+++ b/runtimes/neurun/src/graph/dumper/Dumper.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_DUMPER_H__
+#define __NEURUN_GRAPH_DUMPER_H__
+
+#include "graph/operation/NodeVisitor.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace dumper
+{
+
+class Dumper : public graph::operation::NodeVisitor
+{
+public:
+ Dumper() = default;
+
+public:
+ void visit(const graph::operation::Conv2D::Implicit::Node &node) override;
+ void visit(const graph::operation::MaxPool2D::Implicit::Node &node) override;
+ void visit(const graph::operation::AvgPool2D::Implicit::Node &node) override;
+ void visit(const graph::operation::Concat::Node &node) override;
+ void visit(const graph::operation::FullyConnected::Node &node) override;
+ void visit(const graph::operation::Reshape::Node &node) override;
+ void visit(const graph::operation::Softmax::Node &node) override;
+ void visit(const graph::operation::NOP::Node &node) override;
+ void visit(const graph::operation::Permute::Node &node) override;
+};
+
+} // namespace dumper
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_DUMPER_H__
diff --git a/runtimes/neurun/src/graph/operand/Data.h b/runtimes/neurun/src/graph/operand/Data.h
new file mode 100644
index 000000000..e36a9a2ae
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/Data.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_DATA_H__
+#define __NEURUN_GRAPH_OPERAND_DATA_H__
+
+#include <algorithm>
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+struct Data
+{
+ virtual ~Data() = default;
+
+ virtual size_t size(void) const = 0;
+ virtual const uint8_t *base(void) const = 0;
+};
+
+class CachedData final : public Data
+{
+public:
+ CachedData(const uint8_t *base, size_t size) : _base{new uint8_t[size]}, _size{size}
+ {
+ std::copy(base, base + size, _base);
+ }
+
+public:
+ ~CachedData() { delete[] _base; }
+
+public:
+ size_t size(void) const override { return _size; }
+ const uint8_t *base(void) const override { return _base; }
+
+private:
+ uint8_t *_base;
+ size_t _size;
+};
+
+class ExternalData final : public Data
+{
+public:
+ ExternalData(const uint8_t *base, size_t size) : _base{base}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ size_t size(void) const override { return _size; }
+ const uint8_t *base(void) const override { return _base; }
+
+private:
+ const uint8_t *_base;
+ const size_t _size;
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_DATA_H__
diff --git a/runtimes/neurun/src/graph/operand/DataType.h b/runtimes/neurun/src/graph/operand/DataType.h
new file mode 100644
index 000000000..8878901fd
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/DataType.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_DATATYPE_H__
+#define __NEURUN_GRAPH_OPERAND_DATATYPE_H__
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+enum class DataType
+{
+ SCALAR_FLOAT32 = 0,
+ SCALAR_INT32 = 1,
+ SCALAR_UINT32 = 2,
+
+ TENSOR_FLOAT32 = 3,
+ TENSOR_INT32 = 4,
+
+ TENSOR_QUANT8_ASYMM = 5,
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_DATATYPE_H__
diff --git a/runtimes/neurun/src/graph/operand/Index.h b/runtimes/neurun/src/graph/operand/Index.h
new file mode 100644
index 000000000..a6850d061
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/Index.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_INDEX_H__
+#define __NEURUN_GRAPH_OPERAND_INDEX_H__
+
+#include "graph/Index.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+using Index = ::neurun::graph::Index<uint32_t, struct IndexTag>;
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+namespace IO
+{
+
+using Index = ::neurun::graph::Index<uint32_t, struct IndexTag>;
+
+} // namespace IO
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_INDEX_H__
diff --git a/runtimes/neurun/src/graph/operand/IndexSet.cc b/runtimes/neurun/src/graph/operand/IndexSet.cc
new file mode 100644
index 000000000..037965a6d
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/IndexSet.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IndexSet.h"
+
+#include <algorithm>
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+IndexSet::IndexSet(std::initializer_list<Index> list) : _set(list)
+{
+ // DO NOTHING
+}
+
+IndexSet::IndexSet(std::initializer_list<int32_t> list)
+{
+ for (auto val : list)
+ {
+ _set.emplace_back(static_cast<uint32_t>(val));
+ }
+}
+
+IndexSet::IndexSet(std::initializer_list<uint32_t> list)
+{
+ for (auto val : list)
+ {
+ _set.emplace_back(val);
+ }
+}
+
+bool IndexSet::contains(const Index &index) const
+{
+ return std::find(_set.begin(), _set.end(), index) != _set.end();
+}
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operand/IndexSet.h b/runtimes/neurun/src/graph/operand/IndexSet.h
new file mode 100644
index 000000000..2d37de788
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/IndexSet.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_INDEX_SET_H__
+#define __NEURUN_GRAPH_OPERAND_INDEX_SET_H__
+
+#include <initializer_list>
+#include <vector>
+
+#include "Index.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+class IndexSet
+{
+public:
+ IndexSet(void) = default;
+ IndexSet(std::initializer_list<Index> list);
+ IndexSet(std::initializer_list<int32_t> list);
+ IndexSet(std::initializer_list<uint32_t> list);
+
+public:
+ void append(const Index &index) { _set.emplace_back(index); }
+
+public:
+ uint32_t size() const { return static_cast<uint32_t>(_set.size()); }
+ const Index &at(IO::Index set_index) const { return _set.at(set_index.asInt()); }
+ const Index &at(uint32_t index) const { return _set.at(index); }
+ bool contains(const Index &index) const;
+
+public:
+ std::vector<Index>::const_iterator begin(void) const { return _set.begin(); }
+ std::vector<Index>::const_iterator end(void) const { return _set.end(); }
+
+private:
+ std::vector<Index> _set;
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_INDEX_SET_H__
diff --git a/runtimes/neurun/src/graph/operand/Layout.h b/runtimes/neurun/src/graph/operand/Layout.h
new file mode 100644
index 000000000..023ecbdad
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/Layout.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_LAYOUT_H__
+#define __NEURUN_GRAPH_OPERAND_LAYOUT_H__
+
+#include <functional>
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+enum class Layout
+{
+ UNKNOWN = 0,
+ NHWC,
+ NCHW
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+namespace std
+{
+
+template <> struct hash<::neurun::graph::operand::Layout>
+{
+ size_t operator()(const ::neurun::graph::operand::Layout &value) const noexcept
+ {
+ using type = typename std::underlying_type<::neurun::graph::operand::Layout>::type;
+ return hash<type>()(static_cast<type>(value));
+ }
+};
+
+} // namespace std
+
+#endif // __NEURUN_GRAPH_OPERAND_LAYOUT_H__
diff --git a/runtimes/neurun/src/graph/operand/LayoutSet.cc b/runtimes/neurun/src/graph/operand/LayoutSet.cc
new file mode 100644
index 000000000..47bb5900a
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/LayoutSet.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LayoutSet.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+LayoutSet::LayoutSet(std::initializer_list<Layout> layouts)
+{
+ for (auto layout : layouts)
+ {
+ _set.insert(layout);
+ }
+}
+
+LayoutSet LayoutSet::operator|(const LayoutSet &other) const
+{
+ auto ret = *this;
+ for (auto layout : other)
+ {
+ ret.add(layout);
+ }
+ return ret;
+}
+
+LayoutSet LayoutSet::operator&(const LayoutSet &other) const
+{
+ LayoutSet ret;
+ for (auto layout : other)
+ {
+ if (contains(layout))
+ {
+ ret.add(layout);
+ }
+ }
+ return ret;
+}
+
+LayoutSet LayoutSet::operator-(const LayoutSet &other) const
+{
+ auto ret = *this;
+ for (auto layout : other)
+ {
+ ret.remove(layout);
+ }
+ return ret;
+}
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operand/LayoutSet.h b/runtimes/neurun/src/graph/operand/LayoutSet.h
new file mode 100644
index 000000000..928259c87
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/LayoutSet.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_LAYOUT_SET_H__
+#define __NEURUN_GRAPH_OPERAND_LAYOUT_SET_H__
+
+#include <initializer_list>
+#include <unordered_set>
+
+#include "Layout.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+class LayoutSet
+{
+public:
+ LayoutSet() = default;
+ LayoutSet(std::initializer_list<Layout> layouts);
+
+public:
+ void add(const Layout &layout) { _set.insert(layout); }
+ void remove(const Layout &layout) { _set.erase(layout); }
+ uint32_t size() const { return static_cast<uint32_t>(_set.size()); }
+ bool contains(const Layout &layout) const { return _set.find(layout) != _set.end(); }
+
+public:
+ LayoutSet operator|(const LayoutSet &other) const; // Union
+ LayoutSet operator&(const LayoutSet &other) const; // Intersect
+ LayoutSet operator-(const LayoutSet &other) const; // Minus
+
+public:
+ std::unordered_set<Layout>::const_iterator begin() const { return _set.begin(); }
+ std::unordered_set<Layout>::const_iterator end() const { return _set.end(); }
+
+private:
+ std::unordered_set<Layout> _set;
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_LAYOUT_SET_H__
diff --git a/runtimes/neurun/src/graph/operand/LowerInfo.cc b/runtimes/neurun/src/graph/operand/LowerInfo.cc
new file mode 100644
index 000000000..c26965911
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/LowerInfo.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+// NO IMPLEMENTATION YET
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operand/LowerInfo.h b/runtimes/neurun/src/graph/operand/LowerInfo.h
new file mode 100644
index 000000000..d91c29fb7
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/LowerInfo.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_LOWER_INFO_H__
+#define __NEURUN_GRAPH_OPERAND_LOWER_INFO_H__
+
+#include <stdint.h>
+
+#include "LayoutSet.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+class LowerInfo
+{
+public:
+ class Shape4D
+ {
+ public:
+ Shape4D(uint32_t n, uint32_t h, uint32_t w, uint32_t c) : _n{n}, _h{h}, _w{w}, _c{c}
+ {
+ // DO NOTHING
+ }
+
+ public:
+ uint32_t n(void) const { return _n; }
+ uint32_t h(void) const { return _h; }
+ uint32_t w(void) const { return _w; }
+ uint32_t c(void) const { return _c; }
+
+ private:
+ uint32_t _n;
+ uint32_t _h;
+ uint32_t _w;
+ uint32_t _c;
+ };
+
+public:
+ LowerInfo(const Shape4D &shape) : _shape{shape}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const Shape4D &shape(void) const { return _shape; }
+ const LayoutSet &def_layouts(void) const { return _def_layouts; }
+ const LayoutSet &use_layouts(void) const { return _use_layouts; }
+
+public:
+ void addDefLayout(const Layout &layout) { _def_layouts.add(layout); }
+ void addUseLayout(const Layout &layout) { _use_layouts.add(layout); }
+
+private:
+ Shape4D _shape;
+ LayoutSet _def_layouts;
+ LayoutSet _use_layouts;
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_LOWED_INFO_H__
diff --git a/runtimes/neurun/src/graph/operand/Object.cc b/runtimes/neurun/src/graph/operand/Object.cc
new file mode 100644
index 000000000..7b95cea97
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/Object.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Object.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+size_t Object::operandSize(void) const
+{
+ const uint32_t ranks = _shape.rank();
+ int32_t elements = 1;
+
+ for (uint32_t rank = 0; rank < ranks; rank++)
+ {
+ elements *= _shape.dim(rank);
+ }
+
+ DataType type = _type.type();
+ size_t element_size = 0;
+
+ // Value of type is matched with OperandCode enum in NeuralNetworks.h
+ switch (type)
+ {
+ case DataType::SCALAR_FLOAT32:
+ case DataType::TENSOR_FLOAT32:
+ element_size = sizeof(float);
+ break;
+ case DataType::SCALAR_INT32:
+ case DataType::TENSOR_INT32:
+ element_size = sizeof(int32_t);
+ break;
+ case DataType::SCALAR_UINT32:
+ element_size = sizeof(uint32_t);
+ break;
+ case DataType::TENSOR_QUANT8_ASYMM:
+ element_size = sizeof(uint8_t);
+ break;
+ default:
+ throw std::runtime_error{"Unsuppported type size"};
+ }
+
+ return element_size * elements;
+}
+
+bool Object::setUsage(const OperandUsage usage)
+{
+ if (usageIsDefined() && (_usage != usage))
+ {
+ // Already set as different type
+ return false;
+ }
+
+ _usage = usage;
+
+ return true;
+}
+
+void Object::appendUse(const ::neurun::graph::operation::Index &idx)
+{
+ assert(_usage != OperandUsage::NOT_DEFINED);
+ assert(!_uses.contains(idx));
+
+ _uses.append(idx);
+}
+
+void Object::removeUse(const ::neurun::graph::operation::Index &idx)
+{
+ assert(_usage != OperandUsage::NOT_DEFINED);
+ assert(_uses.contains(idx));
+
+ _uses.remove(idx);
+}
+
+void Object::appendDef(const ::neurun::graph::operation::Index &idx)
+{
+ assert(_usage != OperandUsage::NOT_DEFINED && _usage != OperandUsage::CONSTANT);
+ assert(_def.size() == 0);
+
+ _def.append(idx);
+}
+
+void Object::removeDef(const ::neurun::graph::operation::Index &idx)
+{
+ assert(_usage != OperandUsage::NOT_DEFINED);
+ assert(_def.contains(idx));
+
+ _def.remove(idx);
+}
+
+void Object::lower_info(std::unique_ptr<LowerInfo> &&lower_info)
+{
+ _lower_info = std::move(lower_info);
+}
+
+const LowerInfo *Object::lower_info() const { return _lower_info.get(); }
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operand/Object.h b/runtimes/neurun/src/graph/operand/Object.h
new file mode 100644
index 000000000..17e46ab28
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/Object.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_OBJECT_H__
+#define __NEURUN_GRAPH_OPERAND_OBJECT_H__
+
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <algorithm>
+
+#include "Shape.h"
+#include "Data.h"
+#include "TypeInfo.h"
+#include "LowerInfo.h"
+#include "graph/operation/IndexList.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+// Operand usage should be exact one of these
+enum class OperandUsage
+{
+ NOT_DEFINED,
+ MODEL_INPUT,
+ CONSTANT,
+ OPERATION_OUTPUT,
+};
+
+class Object
+{
+public:
+ explicit Object(const Shape &shape, const TypeInfo &type)
+ : _shape{shape}, _type{type}, _usage{OperandUsage::NOT_DEFINED}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const Shape &shape(void) const { return _shape; }
+ const TypeInfo &typeInfo(void) const { return _type; }
+ size_t operandSize(void) const;
+ bool setAsConstant() { return setUsage(OperandUsage::CONSTANT); }
+ bool setAsModelInput() { return setUsage(OperandUsage::MODEL_INPUT); }
+ bool setAsOperationOutput() { return setUsage(OperandUsage::OPERATION_OUTPUT); }
+ bool usageIsDefined(void) const { return _usage != OperandUsage::NOT_DEFINED; }
+ bool isModelInput(void) const { return _usage == OperandUsage::MODEL_INPUT; }
+
+ const operation::IndexList &getUses() const { return _uses; }
+ const operation::IndexList &getDef() const { return _def; }
+ void appendUse(const operation::Index &idx);
+ void removeUse(const operation::Index &idx);
+ void appendDef(const operation::Index &idx);
+ void removeDef(const operation::Index &idx);
+
+private:
+ bool setUsage(OperandUsage usage);
+
+public:
+ void data(std::unique_ptr<Data> &&data) { _data = std::move(data); }
+ const Data &data(void) const { return *_data; }
+
+public:
+ template <typename T, typename... Args> void data(Args &&... args)
+ {
+ data(std::unique_ptr<T>(new T{std::forward<Args>(args)...}));
+ }
+
+public:
+ template <typename T> T asScalar(void) const
+ {
+ assert((_shape.rank() == 0) || ((_shape.rank() == 1) && (_shape.dim(0) == 1)));
+ assert(_data != nullptr);
+ assert((_data->base() != nullptr) && (_data->size() == sizeof(T)));
+
+ return *(reinterpret_cast<const T *>(_data->base()));
+ }
+
+public:
+ void lower_info(std::unique_ptr<LowerInfo> &&lower_info);
+ const LowerInfo *lower_info() const;
+
+private:
+ const Shape _shape;
+ const TypeInfo _type;
+ std::unique_ptr<Data> _data;
+ OperandUsage _usage;
+
+ operation::IndexList _uses;
+ operation::IndexList _def; // size is 0 (constant) or 1 (from def operation)
+
+ std::unique_ptr<LowerInfo> _lower_info;
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_OBJECT_H__
diff --git a/runtimes/neurun/src/graph/operand/Set.cc b/runtimes/neurun/src/graph/operand/Set.cc
new file mode 100644
index 000000000..60dad2336
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/Set.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Set.h"
+
+#include "nnfw/std/memory.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+const Index Set::generateIndex()
+{
+ assert((_index_count) <= 0x7fffffff);
+
+ return Index{_index_count++};
+}
+
+Index Set::append(const Shape &shape, const TypeInfo &type)
+{
+ auto index = generateIndex();
+
+ _objects[index] = nnfw::make_unique<Object>(shape, type);
+
+ return index;
+}
+
+const Object &Set::at(const Index &index) const { return *(_objects.at(index)); }
+
+Object &Set::at(const Index &index) { return *(_objects.at(index)); }
+
+bool Set::exist(const Index &index) const { return index.value() < _objects.size(); }
+
+void Set::iterate(const std::function<void(const Index &, const Object &)> &fn) const
+{
+ for (const auto &e : _objects)
+ {
+ fn(e.first, *e.second);
+ }
+}
+
+void Set::iterate(const std::function<void(const Index &, Object &)> &fn)
+{
+ for (auto &e : _objects)
+ {
+ fn(e.first, *e.second);
+ }
+}
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operand/Set.h b/runtimes/neurun/src/graph/operand/Set.h
new file mode 100644
index 000000000..c8266fed0
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/Set.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_SET_H__
+#define __NEURUN_GRAPH_OPERAND_SET_H__
+
+#include <memory>
+#include <unordered_map>
+
+#include "Object.h"
+#include "Index.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+class Set
+{
+public:
+ Set() : _index_count(0) {}
+
+public:
+ Index append(const Shape &, const TypeInfo &);
+
+public:
+ const Object &at(const Index &) const;
+ Object &at(const Index &);
+ bool exist(const Index &) const;
+ void iterate(const std::function<void(const Index &, const Object &)> &fn) const;
+ void iterate(const std::function<void(const Index &, Object &)> &fn);
+
+private:
+ const Index generateIndex();
+
+private:
+ std::unordered_map<Index, std::unique_ptr<Object>> _objects;
+ uint32_t _index_count;
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_SET_H__
diff --git a/runtimes/neurun/src/graph/operand/Shape.cc b/runtimes/neurun/src/graph/operand/Shape.cc
new file mode 100644
index 000000000..f6d7a6999
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/Shape.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+
+#include "Shape.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+Shape::Shape(uint32_t rank) { _dims.resize(rank); }
+
+int32_t Shape::asVector(void) const
+{
+ assert(rank() == 1);
+
+ return dim(0);
+}
+
+nnfw::util::feature::Shape Shape::asFeature(void) const
+{
+ assert(rank() == 4);
+
+ // Feature Map in NNAPI
+ // - Dimension(0) -> Batch
+ // - Dimension(1) -> Height
+ // - Dimension(2) -> Width
+ // - Dimension(3) -> Depth
+ const auto batch = dim(0);
+ const auto depth = dim(3);
+ const auto height = dim(1);
+ const auto width = dim(2);
+
+ return nnfw::util::feature::Shape(batch, depth, height, width);
+}
+
+nnfw::util::kernel::Shape Shape::asKernel(void) const
+{
+ assert(rank() == 4);
+
+ // Convolution Kernel in NNAPI
+ // - Dimension(0) -> Count
+ // - Dimension(1) -> Height
+ // - Dimension(2) -> Width
+ // - Dimension(3) -> Depth
+ const auto count = dim(0);
+ const auto depth = dim(3);
+ const auto height = dim(1);
+ const auto width = dim(2);
+
+ return nnfw::util::kernel::Shape(count, depth, height, width);
+}
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operand/Shape.h b/runtimes/neurun/src/graph/operand/Shape.h
new file mode 100644
index 000000000..3ae970e85
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/Shape.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_SHAPE_H__
+#define __NEURUN_GRAPH_OPERAND_SHAPE_H__
+
+#include <vector>
+#include <cstdint>
+
+#include "util/feature/Shape.h"
+#include "util/kernel/Shape.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+struct Shape
+{
+public:
+ Shape(uint32_t rank);
+
+public:
+ uint32_t rank(void) const { return _dims.size(); }
+
+public:
+ int32_t dim(uint32_t n) const { return _dims.at(n); }
+ int32_t &dim(uint32_t n) { return _dims.at(n); }
+ const std::vector<int32_t> &dims() const { return _dims; }
+
+public:
+ int32_t asVector(void) const;
+ nnfw::util::feature::Shape asFeature(void) const;
+ nnfw::util::kernel::Shape asKernel(void) const;
+
+private:
+ std::vector<int32_t> _dims;
+};
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_SHAPE_H__
diff --git a/runtimes/neurun/src/graph/operand/Shape4DConvert.h b/runtimes/neurun/src/graph/operand/Shape4DConvert.h
new file mode 100644
index 000000000..b840d19b8
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/Shape4DConvert.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_SHAPE4D_CONVERT_H__
+#define __NEURUN_GRAPH_OPERAND_SHAPE4D_CONVERT_H__
+
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+inline LowerInfo::Shape4D asShape4D(const Shape &shape)
+{
+ switch (shape.rank())
+ {
+ case 0u:
+ return LowerInfo::Shape4D(1, 1, 1, 1);
+
+ case 1u:
+ return LowerInfo::Shape4D(1, 1, 1, shape.dim(0));
+
+ case 2u:
+ return LowerInfo::Shape4D(1, 1, shape.dim(1), shape.dim(0));
+
+ case 3u:
+ return LowerInfo::Shape4D(1, shape.dim(2), shape.dim(1), shape.dim(0));
+
+ case 4u:
+ return LowerInfo::Shape4D(shape.dim(3), shape.dim(2), shape.dim(1), shape.dim(0));
+
+ default:
+ throw "Unsupported rank > 4";
+ }
+}
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_SHAPE4D_CONVERT_H__
diff --git a/runtimes/neurun/src/graph/operand/TypeInfo.cc b/runtimes/neurun/src/graph/operand/TypeInfo.cc
new file mode 100644
index 000000000..5642b1e8f
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/TypeInfo.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TypeInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+DataType TypeInfo::typeFromOperandCode(OperandCode type)
+{
+ // Now neurun::graph::operand::DataType share same enum value with OperandCode
+ // in NeuralNetworks.h.
+ return static_cast<DataType>(static_cast<uint32_t>(type));
+}
+
+} // namespace operand
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operand/TypeInfo.h b/runtimes/neurun/src/graph/operand/TypeInfo.h
new file mode 100644
index 000000000..41f4453e5
--- /dev/null
+++ b/runtimes/neurun/src/graph/operand/TypeInfo.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERAND_TYPEINFO_H__
+#define __NEURUN_GRAPH_OPERAND_TYPEINFO_H__
+
+#include <cstdint>
+
+#include <NeuralNetworks.h>
+
+#include "DataType.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operand
+{
+
+class TypeInfo
+{
+public:
+ TypeInfo(OperandCode type, float scale, int32_t offset)
+ : _type(typeFromOperandCode(type)), _scale(scale), _offset(offset)
+ {
+ // DO NOTHING
+ }
+
+public:
+ DataType type() const { return _type; }
+ float scale() const { return _scale; }
+ int32_t offset() const { return _offset; }
+
+private:
+ // Now neurun::graph::operand::DataType share same enum value with OperandCode
+ // in NeuralNetworks.h.
+ // If we don't share same value, we must fix this mapping function.
+ DataType typeFromOperandCode(OperandCode type);
+
+private:
+ DataType _type;
+ float _scale;
+ int32_t _offset;
+};
+} // namespace operand
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERAND_TYPEINFO_H__
diff --git a/runtimes/neurun/src/graph/operation/AvgPool2D.cc b/runtimes/neurun/src/graph/operation/AvgPool2D.cc
new file mode 100644
index 000000000..b2612c6b5
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/AvgPool2D.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AvgPool2D.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace AvgPool2D
+{
+namespace Implicit
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 7);
+ assert(init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> IFM Tensor Index
+ // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 2 -> Horizontal (over width) Stride Index
+ // 3 -> Vertial (over height) Stride Index
+ // 4 -> Filter Width Index
+ // 5 -> Filter Height Index
+ // 6 -> FuseCode (activation) Index
+
+ setInputs({init_param.inputs[0]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.padding_index = init_param.inputs[1];
+ _param.hstride_index = init_param.inputs[2];
+ _param.vstride_index = init_param.inputs[3];
+
+ _param.kw_index = init_param.inputs[4];
+ _param.kh_index = init_param.inputs[5];
+ _param.activation_index = init_param.inputs[6];
+}
+
+void Node::setInputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setInputs(indexes);
+}
+
+void Node::setOutputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setOutputs(indexes);
+}
+
+} // namespace Implicit
+} // namespace AvgPool2D
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/AvgPool2D.h b/runtimes/neurun/src/graph/operation/AvgPool2D.h
new file mode 100644
index 000000000..a856b9cd4
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/AvgPool2D.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_AVGPOOL2D_H__
+#define __NEURUN_GRAPH_OPERATION_AVGPOOL2D_H__
+
+#include <memory>
+
+#include "graph/operation/Node.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace AvgPool2D
+{
+namespace Implicit
+{
+
+struct Param
+{
+ int32_t kw_index;
+ int32_t kh_index;
+
+ int32_t hstride_index;
+ int32_t vstride_index;
+
+ int32_t padding_index;
+ int32_t activation_index;
+};
+
+class Node : public graph::operation::Node
+{
+public:
+ Node(const graph::operation::Node::InitParam &init_param);
+
+public:
+ virtual void accept(NodeVisitor &&) const override;
+
+public:
+ virtual void setInputs(const operand::IndexSet &indexes) override;
+ virtual void setOutputs(const operand::IndexSet &indexes) override;
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace Implicit
+} // namespace AvgPool2D
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_AVGPOOL2D_H__
diff --git a/runtimes/neurun/src/graph/operation/Concat.cc b/runtimes/neurun/src/graph/operation/Concat.cc
new file mode 100644
index 000000000..952cf687c
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Concat.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Concat.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace Concat
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count > 2); // At least one one input tensor and axis
+ assert(init_param.output_count == 1);
+
+ // When there are N + 1 inputs, each input should be interpreted as follows:
+ //
+ // [0, N) -> Input tensors
+ // N -> Axis
+ //
+
+ {
+ operand::IndexSet inds;
+ for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
+ {
+ inds.append(operand::Index{init_param.inputs[n]});
+ }
+ setInputs(inds);
+ }
+ setOutputs({init_param.outputs[0]});
+
+ _param.axis_index = init_param.inputs[init_param.input_count - 1];
+}
+
+void Node::setOutputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setOutputs(indexes);
+}
+
+} // namespace Concat
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/Concat.h b/runtimes/neurun/src/graph/operation/Concat.h
new file mode 100644
index 000000000..dab17d031
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Concat.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_CONCAT_H__
+#define __NEURUN_GRAPH_OPERATION_CONCAT_H__
+
+#include <memory>
+
+#include "graph/operation/Node.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace Concat
+{
+
+struct Param
+{
+ int32_t axis_index;
+};
+
+class Node : public graph::operation::Node
+{
+public:
+ Node(const graph::operation::Node::InitParam &init_param);
+
+public:
+ virtual void accept(NodeVisitor &&) const override;
+
+public:
+ virtual void setOutputs(const operand::IndexSet &indexes) override;
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace Concat
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_CONCAT_H__
diff --git a/runtimes/neurun/src/graph/operation/Conv2D.cc b/runtimes/neurun/src/graph/operation/Conv2D.cc
new file mode 100644
index 000000000..f88955db1
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Conv2D.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Conv2D.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace Conv2D
+{
+namespace Implicit
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 7 && init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ //
+ // 0 -> IFM Tensor Index
+ // 1 -> Kernel Tensor Index
+ // 2 -> Bias Tensor Index
+ // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 4 -> Stride (width) Index
+ // 5 -> Stride (height) INdex
+ // 6 -> Activation Index
+
+ setInputs({init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.padding_index = init_param.inputs[3];
+ _param.hstride_index = init_param.inputs[4];
+ _param.vstride_index = init_param.inputs[5];
+ _param.activation_index = init_param.inputs[6];
+}
+
+void Node::setInputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 3);
+
+ graph::operation::Node::setInputs(indexes);
+}
+
+void Node::setOutputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setOutputs(indexes);
+}
+
+} // namespace Implicit
+} // namespace Conv2D
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/Conv2D.h b/runtimes/neurun/src/graph/operation/Conv2D.h
new file mode 100644
index 000000000..f75058a30
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Conv2D.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_CONV2D_H__
+#define __NEURUN_GRAPH_OPERATION_CONV2D_H__
+
+#include <memory>
+
+#include "graph/operation/Node.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace Conv2D
+{
+namespace Implicit
+{
+
+struct Param
+{
+ int32_t hstride_index;
+ int32_t vstride_index;
+
+ int32_t padding_index;
+ int32_t activation_index;
+};
+
+class Node : public graph::operation::Node
+{
+public:
+ Node(const graph::operation::Node::InitParam &);
+
+public:
+ virtual void accept(NodeVisitor &&) const override;
+
+public:
+ virtual void setInputs(const operand::IndexSet &indexes) override;
+ virtual void setOutputs(const operand::IndexSet &indexes) override;
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace Implicit
+} // namespace Conv2D
+} // namespace coperation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_CONV2D_H__
diff --git a/runtimes/neurun/src/graph/operation/FullyConnected.cc b/runtimes/neurun/src/graph/operation/FullyConnected.cc
new file mode 100644
index 000000000..0a6553d1e
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/FullyConnected.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FullyConnected.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace FullyConnected
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 4 && init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> A tensor, specifying the input.
+ // 1 -> A 2-D tensor, specifying the weights
+ // 2 -> A 1-D tensor, specifying the bias
+ // 3 -> An INT32 value, and has to be one of the FuseCode values
+
+ setInputs({init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.activation_index = init_param.inputs[3];
+}
+
+void Node::setInputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 3);
+
+ graph::operation::Node::setInputs(indexes);
+}
+
+void Node::setOutputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setOutputs(indexes);
+}
+
+} // namespace FullyConnected
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/FullyConnected.h b/runtimes/neurun/src/graph/operation/FullyConnected.h
new file mode 100644
index 000000000..a1f920e4b
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/FullyConnected.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_FULLYCONNECTED_H__
+#define __NEURUN_GRAPH_OPERATION_FULLYCONNECTED_H__
+
+#include <memory>
+
+#include "graph/operation/Node.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace FullyConnected
+{
+
+struct Param
+{
+ int32_t activation_index;
+};
+
+class Node : public graph::operation::Node
+{
+public:
+ Node(const graph::operation::Node::InitParam &init_param);
+
+public:
+ virtual void accept(NodeVisitor &&) const override;
+
+public:
+ virtual void setInputs(const operand::IndexSet &indexes) override;
+ virtual void setOutputs(const operand::IndexSet &indexes) override;
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace FullyConnected
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_FULLYCONNECTED_H__
diff --git a/runtimes/neurun/src/graph/operation/Index.h b/runtimes/neurun/src/graph/operation/Index.h
new file mode 100644
index 000000000..3902d039b
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Index.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_INDEX_H__
+#define __NEURUN_GRAPH_OPERATION_INDEX_H__
+
+#include "graph/Index.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+using Index = ::neurun::graph::Index<uint32_t, struct IndexTag>;
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_INDEX_H__
diff --git a/runtimes/neurun/src/graph/operation/IndexList.cc b/runtimes/neurun/src/graph/operation/IndexList.cc
new file mode 100644
index 000000000..cdc5997ea
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/IndexList.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IndexList.h"
+
+#include <algorithm>
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+IndexList::IndexList(std::initializer_list<Index> list) : _list(list)
+{
+ // DO NOTHING
+}
+
+bool IndexList::contains(const ::neurun::graph::operation::Index &index) const
+{
+ return std::find(_list.begin(), _list.end(), index) != _list.end();
+}
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/IndexList.h b/runtimes/neurun/src/graph/operation/IndexList.h
new file mode 100644
index 000000000..cfac46abc
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/IndexList.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_INDEX_LIST_H__
+#define __NEURUN_GRAPH_OPERATION_INDEX_LIST_H__
+
+#include <initializer_list>
+#include <list>
+
+#include "Index.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+class IndexList
+{
+public:
+ IndexList(void) = default;
+ IndexList(std::initializer_list<Index> list);
+
+public:
+ void append(const Index &index) { _list.push_back(index); }
+ void remove(const Index &index) { _list.remove(index); }
+
+public:
+ uint32_t size() const { return static_cast<uint32_t>(_list.size()); }
+ const std::list<Index> &list() const { return _list; }
+ bool contains(const Index &index) const;
+
+private:
+ std::list<Index> _list;
+};
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_INDEX_LIST_H__
diff --git a/runtimes/neurun/src/graph/operation/LowerInfo.cc b/runtimes/neurun/src/graph/operation/LowerInfo.cc
new file mode 100644
index 000000000..2998b1922
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/LowerInfo.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+LowerInfo::LowerInfo(const backend::Backend &backend) : _backend(backend)
+{
+ // DO NOTHING
+}
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/LowerInfo.h b/runtimes/neurun/src/graph/operation/LowerInfo.h
new file mode 100644
index 000000000..f3fbbf178
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/LowerInfo.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_LOWER_INFO_H__
+#define __NEURUN_GRAPH_OPERATION_LOWER_INFO_H__
+
+#include <string>
+
+#include "backend/BackendManager.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+class LowerInfo
+{
+public:
+ LowerInfo(const backend::Backend &backend);
+ const backend::Backend &backend() const { return _backend; }
+
+private:
+ backend::Backend _backend;
+};
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_LOWER_INFO_H__
diff --git a/runtimes/neurun/src/graph/operation/MaxPool2D.cc b/runtimes/neurun/src/graph/operation/MaxPool2D.cc
new file mode 100644
index 000000000..76648baf6
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/MaxPool2D.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MaxPool2D.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace MaxPool2D
+{
+namespace Implicit
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 7);
+ assert(init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> IFM Tensor Index
+ // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 2 -> Horizontal (over width) Stride Index
+ // 3 -> Vertial (over height) Stride Index
+ // 4 -> Filter Width Index
+ // 5 -> Filter Height Index
+ // 6 -> FuseCode (activation) Index
+
+ setInputs({init_param.inputs[0]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.padding_index = init_param.inputs[1];
+ _param.hstride_index = init_param.inputs[2];
+ _param.vstride_index = init_param.inputs[3];
+
+ _param.kw_index = init_param.inputs[4];
+ _param.kh_index = init_param.inputs[5];
+ _param.activation_index = init_param.inputs[6];
+}
+
+void Node::setInputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setInputs(indexes);
+}
+
+void Node::setOutputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setOutputs(indexes);
+}
+
+} // namespace Implicit
+} // namespace MaxPool2D
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/MaxPool2D.h b/runtimes/neurun/src/graph/operation/MaxPool2D.h
new file mode 100644
index 000000000..30f9b0b50
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/MaxPool2D.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_MAXPOOL2D_H__
+#define __NEURUN_GRAPH_OPERATION_MAXPOOL2D_H__
+
+#include <memory>
+
+#include "graph/operation/Node.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace MaxPool2D
+{
+namespace Implicit
+{
+
+struct Param
+{
+ int32_t kw_index;
+ int32_t kh_index;
+
+ int32_t hstride_index;
+ int32_t vstride_index;
+
+ int32_t padding_index;
+ int32_t activation_index;
+};
+
+class Node : public graph::operation::Node
+{
+public:
+ virtual void accept(NodeVisitor &&) const override;
+
+public:
+ Node(const graph::operation::Node::InitParam &init_param);
+
+public:
+ virtual void setInputs(const operand::IndexSet &indexes) override;
+ virtual void setOutputs(const operand::IndexSet &indexes) override;
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace Implicit
+} // namespace MaxPool2D
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_MAXPOOL2D_H__
diff --git a/runtimes/neurun/src/graph/operation/NOP.cc b/runtimes/neurun/src/graph/operation/NOP.cc
new file mode 100644
index 000000000..18c3246ce
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/NOP.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NOP.h"
+
+#include "NodeVisitor.h"
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace NOP
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+} // namespace NOP
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/NOP.h b/runtimes/neurun/src/graph/operation/NOP.h
new file mode 100644
index 000000000..51b0f6f71
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/NOP.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_NOP_H__
+#define __NEURUN_GRAPH_OPERATION_NOP_H__
+
+#include <memory>
+
+#include "graph/operation/Node.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace NOP
+{
+
+class Node : public graph::operation::Node
+{
+public:
+ Node(const graph::operation::Node::InitParam &) {}
+
+public:
+ virtual void accept(NodeVisitor &&) const override;
+};
+
+} // namespace NOP
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_NOP_H__
diff --git a/runtimes/neurun/src/graph/operation/Node.cc b/runtimes/neurun/src/graph/operation/Node.cc
new file mode 100644
index 000000000..f472bc08c
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Node.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Node.h"
+
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+Node::Node() = default;
+
+Node::~Node() = default;
+
+void Node::lower_info(std::unique_ptr<LowerInfo> &&lower_info)
+{
+ _lower_info = std::move(lower_info);
+}
+
+const LowerInfo *Node::lower_info() const { return _lower_info.get(); }
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/Node.h b/runtimes/neurun/src/graph/operation/Node.h
new file mode 100644
index 000000000..9e98184e3
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Node.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_NODE_H__
+#define __NEURUN_GRAPH_OPERATION_NODE_H__
+
+#include <memory>
+
+#include "graph/operand/IndexSet.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+class LowerInfo;
+struct NodeVisitor;
+
+class Node
+{
+public:
+ struct InitParam
+ {
+ uint32_t input_count;
+ const uint32_t *inputs;
+ uint32_t output_count;
+ const uint32_t *outputs;
+ };
+
+public:
+ Node();
+ virtual ~Node();
+
+public:
+ virtual void accept(NodeVisitor &&) const = 0;
+
+public:
+ virtual const operand::IndexSet &getInputs() const { return _inputs; }
+ virtual const operand::IndexSet &getOutputs() const { return _outputs; }
+ // It's for only input/output tensors but const data.
+ virtual void setInputs(const operand::IndexSet &indexes) { _inputs = indexes; }
+ virtual void setOutputs(const operand::IndexSet &indexes) { _outputs = indexes; }
+
+public:
+ void lower_info(std::unique_ptr<LowerInfo> &&lower_info);
+ const LowerInfo *lower_info() const;
+
+private:
+ operand::IndexSet _inputs;
+ operand::IndexSet _outputs;
+ std::unique_ptr<LowerInfo> _lower_info;
+};
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_NODE_H__
diff --git a/runtimes/neurun/src/graph/operation/NodeVisitor.h b/runtimes/neurun/src/graph/operation/NodeVisitor.h
new file mode 100644
index 000000000..28d19b0af
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/NodeVisitor.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_NODE_VISITOR_H__
+#define __NEURUN_GRAPH_OPERATION_NODE_VISITOR_H__
+
+#include "Conv2D.h"
+#include "MaxPool2D.h"
+#include "AvgPool2D.h"
+#include "Concat.h"
+#include "Reshape.h"
+#include "FullyConnected.h"
+#include "Softmax.h"
+#include "NOP.h"
+#include "Permute.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+struct NodeVisitor
+{
+ virtual ~NodeVisitor() = default;
+
+ virtual void visit(const Conv2D::Implicit::Node &) = 0;
+ virtual void visit(const MaxPool2D::Implicit::Node &) = 0;
+ virtual void visit(const AvgPool2D::Implicit::Node &) = 0;
+ virtual void visit(const Concat::Node &) = 0;
+ virtual void visit(const Reshape::Node &) = 0;
+ virtual void visit(const FullyConnected::Node &) = 0;
+ virtual void visit(const Softmax::Node &) = 0;
+ virtual void visit(const NOP::Node &) = 0;
+ virtual void visit(const Permute::Node &) = 0;
+};
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_NODE_VISITOR_H__
diff --git a/runtimes/neurun/src/graph/operation/Op.lst b/runtimes/neurun/src/graph/operation/Op.lst
new file mode 100644
index 000000000..23b4123cb
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Op.lst
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OP
+#error Define OP before including this file
+#endif
+
+// NOTE The relation between "Internal Name" and "NN API Name" is "1 : N".
+
+// Internal Name | NN API Name
+OP(Conv2D::Implicit , CONV_2D)
+OP(AvgPool2D::Implicit , AVERAGE_POOL_2D)
+OP(MaxPool2D::Implicit , MAX_POOL_2D)
+OP(Concat , CONCATENATION)
+OP(FullyConnected , FULLY_CONNECTED)
+OP(Reshape , RESHAPE)
+OP(Softmax , SOFTMAX)
diff --git a/runtimes/neurun/src/graph/operation/Permute.cc b/runtimes/neurun/src/graph/operation/Permute.cc
new file mode 100644
index 000000000..2688e5e5f
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Permute.cc
@@ -0,0 +1,41 @@
+#include "Permute.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace Permute
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+Node::Node(const operand::Index &input, const operand::Index &output)
+{
+ setInputs({input});
+ setOutputs({output});
+}
+
+void Node::setInputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setInputs(indexes);
+}
+
+void Node::setOutputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setOutputs(indexes);
+}
+
+} // namespace Permute
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/Permute.h b/runtimes/neurun/src/graph/operation/Permute.h
new file mode 100644
index 000000000..540f869b1
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Permute.h
@@ -0,0 +1,33 @@
+#ifndef __NEURUN_GRAPH_OPERATION_PERMUTE_PERMUTE_H__
+#define __NEURUN_GRAPH_OPERATION_PERMUTE_PERMUTE_H__
+
+#include "graph/operation/Node.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace Permute
+{
+
+class Node : public graph::operation::Node
+{
+public:
+ virtual void accept(NodeVisitor &&) const override;
+
+public:
+ Node(const operand::Index &input, const operand::Index &output);
+
+public:
+ virtual void setInputs(const operand::IndexSet &indexes) override;
+ virtual void setOutputs(const operand::IndexSet &indexes) override;
+};
+
+} // namespace Permute
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_PERMUTE_PERMUTE_H__
diff --git a/runtimes/neurun/src/graph/operation/Reshape.cc b/runtimes/neurun/src/graph/operation/Reshape.cc
new file mode 100644
index 000000000..e6bc2117f
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Reshape.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Reshape.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace Reshape
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> A tensor, specifying the tensor to be reshaped.
+ // 1 -> A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32, defining the shape of the output
+ // tensor
+
+ // TODO Second input should be shape tensor (init_param.inputs[1])
+ setInputs({init_param.inputs[0] /* , init_param.inputs[1] */});
+ setOutputs({init_param.outputs[0]});
+}
+
+void Node::setInputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1); // TODO Should be 2 (See also the constructor)
+
+ graph::operation::Node::setInputs(indexes);
+}
+
+void Node::setOutputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setOutputs(indexes);
+}
+
+} // namespace Reshape
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/Reshape.h b/runtimes/neurun/src/graph/operation/Reshape.h
new file mode 100644
index 000000000..168719b46
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Reshape.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_RESHAPE_H__
+#define __NEURUN_GRAPH_OPERATION_RESHAPE_H__
+
+#include <memory>
+
+#include "graph/operation/Node.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace Reshape
+{
+
+class Node : public graph::operation::Node
+{
+public:
+ virtual void accept(NodeVisitor &&) const override;
+
+public:
+ Node(const graph::operation::Node::InitParam &init_param);
+
+public:
+ virtual void setInputs(const operand::IndexSet &indexes) override;
+ virtual void setOutputs(const operand::IndexSet &indexes) override;
+};
+
+} // namespace Reshape
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_RESHAPE_H__
diff --git a/runtimes/neurun/src/graph/operation/Set.cc b/runtimes/neurun/src/graph/operation/Set.cc
new file mode 100644
index 000000000..a1ddfa6d4
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Set.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Set.h"
+
+#include <cassert>
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+const Index Set::generateIndex()
+{
+ assert((_index_count) <= 0x7fffffff);
+
+ return Index{_index_count++};
+}
+
+Index Set::append(std::unique_ptr<Node> &&node)
+{
+ auto index = generateIndex();
+
+ _nodes[index] = std::move(node);
+ return index;
+}
+
+const Node &Set::at(const Index &index) const { return *(_nodes.at(index)); }
+
+Node &Set::at(const Index &index) { return *(_nodes.at(index)); }
+
+bool Set::exist(const Index &index) const { return _nodes.find(index) != _nodes.end(); }
+
+void Set::iterate(const std::function<void(const Index &, const Node &)> &fn) const
+{
+ for (auto it = _nodes.begin(); it != _nodes.end(); ++it)
+ {
+ fn(it->first, *it->second);
+ }
+}
+
+void Set::iterate(const std::function<void(const Index &, Node &)> &fn)
+{
+ for (auto it = _nodes.begin(); it != _nodes.end(); ++it)
+ {
+ fn(it->first, *it->second);
+ }
+}
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/Set.h b/runtimes/neurun/src/graph/operation/Set.h
new file mode 100644
index 000000000..bc6913ff4
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Set.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_SET_H__
+#define __NEURUN_GRAPH_OPERATION_SET_H__
+
+#include <memory>
+
+#include "graph/operation/Index.h"
+#include "Node.h"
+
+#include <unordered_map>
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+
+class Set
+{
+public:
+ Set() : _index_count(0) {}
+
+public:
+ Index append(std::unique_ptr<Node> &&node);
+
+public:
+ const Node &at(const Index &) const;
+ Node &at(const Index &);
+ bool exist(const Index &) const;
+ uint32_t size() const { return _nodes.size(); }
+ void iterate(const std::function<void(const Index &, const Node &)> &fn) const;
+ void iterate(const std::function<void(const Index &, Node &)> &fn);
+
+private:
+ const Index generateIndex();
+
+private:
+ std::unordered_map<Index, std::unique_ptr<Node>> _nodes;
+ uint32_t _index_count;
+};
+
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_SET_H__
diff --git a/runtimes/neurun/src/graph/operation/Softmax.cc b/runtimes/neurun/src/graph/operation/Softmax.cc
new file mode 100644
index 000000000..3b3c8661f
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Softmax.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Softmax.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+#include "LowerInfo.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace Softmax
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+ // 1 -> FLOAT32 value, specifying the positive scaling factor for the exponent, beta.
+
+ setInputs({init_param.inputs[0]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.scale_index = init_param.inputs[1];
+}
+
+void Node::setInputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setInputs(indexes);
+}
+
+void Node::setOutputs(const operand::IndexSet &indexes)
+{
+ assert(indexes.size() == 1);
+
+ graph::operation::Node::setOutputs(indexes);
+}
+
+} // namespace Softmax
+} // namespace operation
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/operation/Softmax.h b/runtimes/neurun/src/graph/operation/Softmax.h
new file mode 100644
index 000000000..e87a27518
--- /dev/null
+++ b/runtimes/neurun/src/graph/operation/Softmax.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_OPERATION_SOFTMAX_H__
+#define __NEURUN_GRAPH_OPERATION_SOFTMAX_H__
+
+#include <memory>
+
+#include "graph/operation/Node.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+namespace Softmax
+{
+
+struct Param
+{
+ int32_t scale_index;
+};
+
+class Node : public graph::operation::Node
+{
+public:
+ virtual void accept(NodeVisitor &&) const override;
+
+public:
+ Node(const graph::operation::Node::InitParam &init_param);
+
+public:
+ virtual void setInputs(const operand::IndexSet &indexes) override;
+ virtual void setOutputs(const operand::IndexSet &indexes) override;
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace Softmax
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_OPERATION_SOFTMAX_H__
diff --git a/runtimes/neurun/src/graph/verifier/IVerifier.cc b/runtimes/neurun/src/graph/verifier/IVerifier.cc
new file mode 100644
index 000000000..f8402695a
--- /dev/null
+++ b/runtimes/neurun/src/graph/verifier/IVerifier.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IVerifier.h"
+
+#include "graph/Graph.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace verifier
+{
+
+bool DAGChecker::verify(const Graph &graph) const
+{
+ auto &operations = graph.operations();
+ bool cyclic = false;
+ std::vector<bool> visited(operations.size(), false);
+ std::vector<bool> on_stack(operations.size(), false);
+
+ std::function<void(const operation::Index &index, const operation::Node &)> dfs_recursive =
+ [&](const operation::Index &index, const operation::Node &node) -> void {
+ if (on_stack[index.value()])
+ cyclic = true;
+ if (visited[index.value()])
+ return;
+ visited[index.value()] = true;
+ on_stack[index.value()] = true;
+
+ auto outputs = node.getOutputs();
+ for (auto output : outputs)
+ {
+ // TODO Fix traversing algorithm
+ // Every time need to search for operations that has `outgoing` as incoming from all
+ // operations but we can hold that info cached
+ operations.iterate([&](const operation::Index &cand_index, const operation::Node &cand_node) {
+ auto inputs = cand_node.getInputs();
+ for (auto input : inputs)
+ {
+ if (output == input)
+ {
+ dfs_recursive(cand_index, cand_node);
+ }
+ }
+ });
+ }
+
+ on_stack[index.value()] = false;
+ };
+
+ operations.iterate(dfs_recursive);
+
+ return !cyclic;
+}
+
+} // namespace verifier
+} // namespace graph
+} // namespace neurun
diff --git a/runtimes/neurun/src/graph/verifier/IVerifier.h b/runtimes/neurun/src/graph/verifier/IVerifier.h
new file mode 100644
index 000000000..17fe03f24
--- /dev/null
+++ b/runtimes/neurun/src/graph/verifier/IVerifier.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_GRAPH_VERIFIER_I_VERIFIER_H__
+#define __NEURUN_GRAPH_VERIFIER_I_VERIFIER_H__
+
+namespace neurun
+{
+namespace graph
+{
+class Graph;
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+namespace verifier
+{
+
+struct IVerifier
+{
+ virtual ~IVerifier() = default;
+ virtual bool verify(const Graph &graph) const = 0;
+};
+
+} // namespace verifier
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+namespace verifier
+{
+
+class DAGChecker : public IVerifier
+{
+public:
+ virtual bool verify(const Graph &graph) const override;
+};
+
+} // namespace verifier
+} // namespace graph
+} // namespace neurun
+
+#endif // __NEURUN_GRAPH_VERIFIER_I_VERIFIER_H__
diff --git a/runtimes/neurun/src/internal/Convert.cc b/runtimes/neurun/src/internal/Convert.cc
new file mode 100644
index 000000000..c0260b04e
--- /dev/null
+++ b/runtimes/neurun/src/internal/Convert.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Convert.h"
+
+namespace internal
+{
+
+::arm_compute::TensorShape asTensorShape(int32_t h, int32_t w)
+{
+ return ::arm_compute::TensorShape(w, h);
+}
+
+::arm_compute::TensorShape asTensorShape(const nnfw::util::feature::Shape &shape)
+{
+ return ::arm_compute::TensorShape(shape.W, shape.H, shape.C, shape.N);
+}
+
+::arm_compute::TensorShape asTensorShape(const nnfw::util::kernel::Shape &shape)
+{
+ return ::arm_compute::TensorShape(shape.W, shape.H, shape.C, shape.N);
+}
+
+::arm_compute::TensorInfo asTensorInfo(const nnfw::util::feature::Shape &shape)
+{
+ return ::arm_compute::TensorInfo(asTensorShape(shape), 1, ::arm_compute::DataType::F32);
+}
+
+::arm_compute::TensorInfo asTensorInfo(const nnfw::util::kernel::Shape &shape)
+{
+ return ::arm_compute::TensorInfo(asTensorShape(shape), 1, ::arm_compute::DataType::F32);
+}
+
+::arm_compute::TensorInfo asTensorInfo(int32_t size)
+{
+ return ::arm_compute::TensorInfo(::arm_compute::TensorShape(size), 1,
+ ::arm_compute::DataType::F32);
+}
+
+::arm_compute::TensorInfo asTensorInfo(int32_t h, int32_t w)
+{
+ return ::arm_compute::TensorInfo(::arm_compute::TensorShape(w, h), 1,
+ ::arm_compute::DataType::F32);
+}
+
+} // namespace internal
diff --git a/runtimes/neurun/src/internal/Convert.h b/runtimes/neurun/src/internal/Convert.h
new file mode 100644
index 000000000..f279133aa
--- /dev/null
+++ b/runtimes/neurun/src/internal/Convert.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_CONVERT_H__
+#define __INTERNAL_CONVERT_H__
+
+#include <arm_compute/core/TensorInfo.h>
+#include <arm_compute/core/TensorShape.h>
+
+#include "util/feature/Shape.h"
+#include "util/kernel/Shape.h"
+
+namespace internal
+{
+
+::arm_compute::TensorShape asTensorShape(int32_t h, int32_t w);
+::arm_compute::TensorShape asTensorShape(const nnfw::util::feature::Shape &shape);
+::arm_compute::TensorShape asTensorShape(const nnfw::util::kernel::Shape &shape);
+
+::arm_compute::TensorInfo asTensorInfo(const nnfw::util::feature::Shape &shape);
+::arm_compute::TensorInfo asTensorInfo(const nnfw::util::kernel::Shape &shape);
+::arm_compute::TensorInfo asTensorInfo(int32_t size);
+::arm_compute::TensorInfo asTensorInfo(int32_t h, int32_t w);
+
+} // namespace internal
+
+#endif // __INTERNAL_CONVERT_H__
diff --git a/runtimes/neurun/src/internal/Padding.cc b/runtimes/neurun/src/internal/Padding.cc
new file mode 100644
index 000000000..200fa1a02
--- /dev/null
+++ b/runtimes/neurun/src/internal/Padding.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "internal/Padding.h"
+
+#include <algorithm>
+
+namespace internal
+{
+
+Padding valid_padding(void)
+{
+ //
+ // ANEURALNETWORKS_PADDING_VALID
+ //
+ // VALID padding. No padding.
+ //
+ // When the input size is not evenly divisible by the filter size,
+ // the input at the end that could not fill the whole filter tile
+ // will simply be ignored.
+ //
+ Padding padding;
+
+ padding.top = 0;
+ padding.bottom = 0;
+ padding.left = 0;
+ padding.right = 0;
+
+ return padding;
+}
+
+Padding same_padding(const nnfw::util::feature::Shape &ifm_shape,
+ const nnfw::util::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw,
+ uint32_t kh)
+{
+ Padding padding;
+
+ // ANEURALNETWORKS_PADDING_SAME (from NNAPI spec)
+ //
+ // SAME padding. Padding on both ends are the "same":
+ //
+ // padding_to_beginning = total_padding / 2
+ // padding_to_end = (total_padding + 1)/2.
+ //
+ const int32_t vertical_needed_input = (ofm_shape.H - 1) * stride.vertical + kh;
+ const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
+
+ const int32_t horizontal_needed_input = (ofm_shape.W - 1) * stride.horizontal + kw;
+ const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
+
+ padding.top = vertical_total_padding / 2;
+ padding.bottom = (vertical_total_padding + 1) / 2;
+ padding.left = horizontal_total_padding / 2;
+ padding.right = (horizontal_total_padding + 1) / 2;
+
+ return padding;
+}
+
+} // namespace internal
diff --git a/runtimes/neurun/src/internal/Padding.h b/runtimes/neurun/src/internal/Padding.h
new file mode 100644
index 000000000..84e081a78
--- /dev/null
+++ b/runtimes/neurun/src/internal/Padding.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_PADDING_H__
+#define __INTERNAL_PADDING_H__
+
+#include <stdint.h>
+
+#include <util/feature/Shape.h>
+
+namespace internal
+{
+
+struct Padding
+{
+ uint32_t top;
+ uint32_t bottom;
+ uint32_t left;
+ uint32_t right;
+};
+
+struct Stride
+{
+ uint32_t vertical;
+ uint32_t horizontal;
+};
+
+Padding valid_padding(void);
+Padding same_padding(const nnfw::util::feature::Shape &ifm_shape,
+ const nnfw::util::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw,
+ uint32_t kh);
+
+} // namespace internal
+
+#endif // __INTERNAL_PADDING_H__
diff --git a/runtimes/neurun/src/internal/nnapi/feature/Reader.h b/runtimes/neurun/src/internal/nnapi/feature/Reader.h
new file mode 100644
index 000000000..eb513512d
--- /dev/null
+++ b/runtimes/neurun/src/internal/nnapi/feature/Reader.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_NNAPI_FEATURE_READER_H__
+#define __INTERNAL_NNAPI_FEATURE_READER_H__
+
+#include "internal/nnapi/feature/Utils.h"
+
+#include "util/feature/Reader.h"
+
+namespace internal
+{
+namespace nnapi
+{
+namespace feature
+{
+
+template <typename T> class Reader;
+
+template <> class Reader<float> final : public nnfw::util::feature::Reader<float>
+{
+public:
+ Reader(const ::nnfw::util::feature::Shape &shape, const uint8_t *ptr, size_t len)
+ : _shape{shape}, _ptr{ptr}, _len{len}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const nnfw::util::feature::Shape &shape(void) const { return _shape; }
+
+public:
+ float at(uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, ch, row, col);
+
+ const auto arr = reinterpret_cast<const float *>(_ptr);
+
+ return arr[index];
+ }
+ float at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ const auto arr = reinterpret_cast<const float *>(_ptr);
+
+ return arr[index];
+ }
+
+private:
+ nnfw::util::feature::Shape _shape;
+
+private:
+ const uint8_t *_ptr;
+ const size_t _len;
+};
+
+} // namespace feature
+} // namespace nnapi
+} // namespace internal
+
+#endif // __INTERNAL_NNAPI_FEATURE_READER_H__
diff --git a/runtimes/neurun/src/internal/nnapi/feature/Utils.h b/runtimes/neurun/src/internal/nnapi/feature/Utils.h
new file mode 100644
index 000000000..e6e1e71bd
--- /dev/null
+++ b/runtimes/neurun/src/internal/nnapi/feature/Utils.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_NNAPI_FEATURE_UTILS_H__
+#define __INTERNAL_NNAPI_FEATURE_UTILS_H__
+
+#include "util/feature/Shape.h"
+
+namespace internal
+{
+namespace nnapi
+{
+namespace feature
+{
+
+inline uint32_t index_of(const ::nnfw::util::feature::Shape &shape, uint32_t ch, uint32_t row,
+ uint32_t col)
+{
+ uint32_t res = 0;
+
+ // NNAPI uses NHWC ordering
+ res += row * shape.W * shape.C;
+ res += col * shape.C;
+ res += ch;
+
+ return res;
+}
+
+inline uint32_t index_of(const ::nnfw::util::feature::Shape &shape, uint32_t batch, uint32_t ch,
+ uint32_t row, uint32_t col)
+{
+ uint32_t res = 0;
+
+ // NNAPI uses NHWC ordering
+ res += batch * shape.H * shape.W * shape.C;
+ res += row * shape.W * shape.C;
+ res += col * shape.C;
+ res += ch;
+
+ return res;
+}
+
+} // namespace feature
+} // namespace nnapi
+} // namespace internal
+
+#endif // __INTERNAL_NNAPI_FEATURE_UTILS_H__
diff --git a/runtimes/neurun/src/internal/nnapi/feature/View.h b/runtimes/neurun/src/internal/nnapi/feature/View.h
new file mode 100644
index 000000000..60335dbf1
--- /dev/null
+++ b/runtimes/neurun/src/internal/nnapi/feature/View.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_NNAPI_FEATURE_VIEW_H__
+#define __INTERNAL_NNAPI_FEATURE_VIEW_H__
+
+#include "internal/nnapi/feature/Utils.h"
+
+#include "util/feature/Reader.h"
+
+namespace internal
+{
+namespace nnapi
+{
+namespace feature
+{
+
+template <typename T> class View final : public nnfw::util::feature::Reader<float>
+{
+public:
+ View(const ::nnfw::util::feature::Shape &shape, uint8_t *ptr, size_t len)
+ : _shape{shape}, _ptr{ptr}, _len{len}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const nnfw::util::feature::Shape &shape(void) const { return _shape; }
+
+public:
+ T at(uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, ch, row, col);
+
+ T *arr = reinterpret_cast<T *>(_ptr);
+
+ return arr[index];
+ }
+
+ T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ T *arr = reinterpret_cast<T *>(_ptr);
+
+ return arr[index];
+ }
+
+ T &at(uint32_t ch, uint32_t row, uint32_t col)
+ {
+ uint32_t index = index_of(_shape, ch, row, col);
+
+ T *arr = reinterpret_cast<T *>(_ptr);
+
+ return arr[index];
+ }
+
+ T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ T *arr = reinterpret_cast<T *>(_ptr);
+
+ return arr[index];
+ }
+
+private:
+ nnfw::util::feature::Shape _shape;
+
+private:
+ uint8_t *_ptr;
+ const size_t _len;
+};
+
+} // namespace feature
+} // namespace nnapi
+} // namespace internal
+
+#endif // __INTERNAL_NNAPI_FEATURE_VIEW_H__
diff --git a/runtimes/neurun/src/internal/nnapi/kernel/Reader.h b/runtimes/neurun/src/internal/nnapi/kernel/Reader.h
new file mode 100644
index 000000000..9d93800bf
--- /dev/null
+++ b/runtimes/neurun/src/internal/nnapi/kernel/Reader.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_NNAPI_KERNEL_READER_H__
+#define __INTERNAL_NNAPI_KERNEL_READER_H__
+
+#include "util/kernel/Shape.h"
+#include "util/kernel/Reader.h"
+
+namespace internal
+{
+namespace nnapi
+{
+namespace kernel
+{
+
+template <typename T> class Reader final : public nnfw::util::kernel::Reader<T>
+{
+public:
+ Reader(const ::nnfw::util::kernel::Shape &shape, const uint8_t *base, size_t size)
+ : _shape{shape}, _base{base}, _size{size}
+ {
+ // DO NOTHING
+ }
+
+public:
+ const nnfw::util::kernel::Shape &shape(void) const { return _shape; }
+
+public:
+ T at(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ // NNAPI uses NHWC ordering
+ uint32_t index = 0;
+
+ index += nth * _shape.H * _shape.W * _shape.C;
+ index += row * _shape.W * _shape.C;
+ index += col * _shape.C;
+ index += ch;
+
+ const T *ptr = reinterpret_cast<const T *>(_base);
+
+ return ptr[index];
+ }
+
+private:
+ nnfw::util::kernel::Shape _shape;
+
+private:
+ const uint8_t *_base;
+ const size_t _size;
+};
+
+} // namespace kernel
+} // namespace nnapi
+} // namespace internal
+
+#endif // __INTERNAL_NNAPI_KERNEL_READER_H__
diff --git a/runtimes/neurun/src/internal/nnapi/kernel/View.h b/runtimes/neurun/src/internal/nnapi/kernel/View.h
new file mode 100644
index 000000000..86d19b87f
--- /dev/null
+++ b/runtimes/neurun/src/internal/nnapi/kernel/View.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_NNAPI_KERNEL_VIEW_H__
+#define __INTERNAL_NNAPI_KERNEL_VIEW_H__
+
+#include "util/kernel/Shape.h"
+#include "util/kernel/Reader.h"
+
+#include <arm_compute/core/ITensor.h>
+
+namespace internal
+{
+namespace nnapi
+{
+namespace kernel
+{
+
+template <typename T> class View final : public nnfw::util::kernel::Reader<float>
+{
+public:
+ View(::arm_compute::ITensor *tensor) : _tensor{tensor}
+ {
+ assert(tensor->info()->data_type() == ::arm_compute::DataType::F32);
+
+ _shape.N = tensor->info()->dimension(3);
+ _shape.C = tensor->info()->dimension(2);
+ _shape.H = tensor->info()->dimension(1);
+ _shape.W = tensor->info()->dimension(0);
+ }
+
+public:
+ const nnfw::util::kernel::Shape &shape(void) const { return _shape; }
+
+public:
+ float at(uint32_t nth, uint32_t row, uint32_t col, uint32_t ch) const override
+ {
+ // NNAPI uses NHWC ordering
+ uint32_t index = 0;
+
+ index += nth * _shape.H * _shape.W * _shape.C;
+ index += row * _shape.W * _shape.C;
+ index += col * _shape.C;
+ index += ch;
+
+ float *ptr = reinterpret_cast<float *>(_tensor->buffer());
+
+ return ptr[index];
+ }
+
+ float &at(uint32_t nth, uint32_t row, uint32_t col, uint32_t ch)
+ {
+ // NNAPI uses NHWC ordering
+ uint32_t index = 0;
+
+ index += nth * _shape.H * _shape.W * _shape.C;
+ index += row * _shape.W * _shape.C;
+ index += col * _shape.C;
+ index += ch;
+
+ float *ptr = reinterpret_cast<float *>(_tensor->buffer());
+
+ return ptr[index];
+ }
+
+private:
+ nnfw::util::kernel::Shape _shape;
+ ::arm_compute::ITensor *_tensor;
+};
+
+} // namespace kernel
+} // namespace nnapi
+} // namespace internal
+
+#endif // __INTERNAL_NNAPI_KERNEL_VIEW_H__
diff --git a/runtimes/neurun/src/kernel/CMakeLists.txt b/runtimes/neurun/src/kernel/CMakeLists.txt
new file mode 100644
index 000000000..a39823102
--- /dev/null
+++ b/runtimes/neurun/src/kernel/CMakeLists.txt
@@ -0,0 +1,2 @@
+add_subdirectory(cpu)
+add_subdirectory(acl_cl)
diff --git a/runtimes/neurun/src/kernel/acl_cl/CMakeLists.txt b/runtimes/neurun/src/kernel/acl_cl/CMakeLists.txt
new file mode 100644
index 000000000..857fe6fe6
--- /dev/null
+++ b/runtimes/neurun/src/kernel/acl_cl/CMakeLists.txt
@@ -0,0 +1,15 @@
+file(GLOB SOURCES "*.cc")
+
+add_library(${LIB_NEURUN_KERNEL_ACL_CL} STATIC ${SOURCES})
+
+target_include_directories(${LIB_NEURUN_KERNEL_ACL_CL} PUBLIC ${NNFW_INCLUDE_DIR})
+target_include_directories(${LIB_NEURUN_KERNEL_ACL_CL} PUBLIC ${NEURUN_INCLUDE_DIR})
+target_include_directories(${LIB_NEURUN_KERNEL_ACL_CL} PUBLIC ${CMAKE_SOURCE_DIR}/externals/tensorflow) # TODO We should not need this
+
+target_link_libraries(${LIB_NEURUN_KERNEL_ACL_CL} arm_compute)
+target_link_libraries(${LIB_NEURUN_KERNEL_ACL_CL} tensorflow-lite) # TODO We should not need this
+target_link_libraries(${LIB_NEURUN_KERNEL_ACL_CL} ${LIB_NEURUN_KERNEL_CPU}) # TODO We should not need this
+
+set_target_properties(${LIB_NEURUN_KERNEL_ACL_CL} PROPERTIES POSITION_INDEPENDENT_CODE ON)
+set_target_properties(${LIB_NEURUN_KERNEL_ACL_CL} PROPERTIES OUTPUT_NAME kernel_acl_cl)
+install(TARGETS ${LIB_NEURUN_KERNEL_ACL_CL} DESTINATION lib/neurun)
diff --git a/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc b/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc
new file mode 100644
index 000000000..b75ac90f0
--- /dev/null
+++ b/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.cc
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConcatLayer.h"
+
+#include <arm_compute/runtime/CL/CLScheduler.h>
+
+#include "backend/acl_cl/kernel/View.h"
+#include "logging.h"
+
+namespace
+{
+
+bool matchSizeExceptAxis(const ::arm_compute::ICLTensor *t1, const ::arm_compute::ICLTensor *t2,
+ uint32_t axis)
+{
+ assert(t1->info()->num_dimensions() <= 4);
+ assert(t2->info()->num_dimensions() <= 4);
+
+ for (uint32_t i = 0; i < 4; i++)
+ {
+ if (axis == i)
+ continue;
+ if (t1->info()->dimension(i) != t2->info()->dimension(i))
+ return false;
+ }
+ return true;
+}
+
+} // namespace {anonymous}
+
+namespace neurun
+{
+namespace kernel
+{
+namespace acl_cl
+{
+
+ConcatLayer::ConcatLayer()
+ : _input_allocs(), _output_alloc(nullptr), _axis(0), _input_type(OperandType::SCALAR_FLOAT32)
+{
+ // DO NOTHING
+}
+
+bool ConcatLayer::concatenationFloat32()
+{
+ // Input and output size check
+ {
+ // NOTE Support only tensor with dimension 4 or less
+
+ uint32_t axis_sum = 0;
+
+ for (auto input : _input_allocs)
+ {
+ assert(matchSizeExceptAxis(_output_alloc, input, _axis));
+ axis_sum += input->info()->dimension(_axis);
+ }
+
+ assert(_output_alloc->info()->dimension(_axis) == axis_sum);
+ }
+
+ VERBOSE(Concat_RUN) << "START Concat" << std::endl;
+
+ // Perform operation
+ {
+ uint32_t axis_offset = 0;
+
+ auto &queue = ::arm_compute::CLScheduler::get().queue();
+
+ _output_alloc->map(queue);
+ ::internal::arm_compute::kernel::View<float> output_view{_output_alloc};
+
+ for (auto input : _input_allocs)
+ {
+ input->map(queue);
+ const ::internal::arm_compute::kernel::View<float> input_reader{input};
+
+ for (uint32_t n = 0; n < input_reader.shape().N; n++)
+ {
+ for (uint32_t c = 0; c < input_reader.shape().C; c++)
+ {
+ for (uint32_t h = 0; h < input_reader.shape().H; h++)
+ {
+ for (uint32_t w = 0; w < input_reader.shape().W; w++)
+ {
+ uint32_t no = (_axis == 3) ? axis_offset : 0;
+ uint32_t co = (_axis == 2) ? axis_offset : 0;
+ uint32_t ho = (_axis == 1) ? axis_offset : 0;
+ uint32_t wo = (_axis == 0) ? axis_offset : 0;
+ output_view.at(n + no, c + co, h + ho, w + wo) = input_reader.at(n, c, h, w);
+ }
+ }
+ }
+ }
+ if (_axis == 3)
+ axis_offset += input_reader.shape().N;
+ if (_axis == 2)
+ axis_offset += input_reader.shape().C;
+ if (_axis == 1)
+ axis_offset += input_reader.shape().H;
+ if (_axis == 0)
+ axis_offset += input_reader.shape().W;
+
+ input->unmap(queue);
+ }
+ _output_alloc->unmap(queue);
+ }
+
+ VERBOSE(Concat_RUN) << "End Concat" << std::endl;
+
+ return true;
+}
+
+void ConcatLayer::configure(const std::vector<::arm_compute::ICLTensor *> &input_allocs,
+ int32_t axis, ::arm_compute::ICLTensor *output_alloc)
+{
+ _input_allocs = input_allocs;
+ _output_alloc = output_alloc;
+
+ assert(axis < 4);
+
+ // This map converts NHWC to NCHW(reversed)
+ // NHWC -> WHCN
+ static const uint32_t axis_map[] = {3, 1, 0, 2};
+ _axis = axis_map[axis];
+
+ // TODO Support Quant8
+ _input_type = OperandType::TENSOR_FLOAT32;
+}
+
+void ConcatLayer::run()
+{
+ if (_input_type == OperandType::TENSOR_FLOAT32)
+ {
+ concatenationFloat32();
+ }
+ else if (_input_type == OperandType::TENSOR_QUANT8_ASYMM)
+ {
+ throw std::runtime_error("NYI - concatenationQuant8()");
+ }
+}
+
+} // namespace acl_cl
+} // namespace kernel
+} // namespace neurun
diff --git a/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.h b/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.h
new file mode 100644
index 000000000..4767721fa
--- /dev/null
+++ b/runtimes/neurun/src/kernel/acl_cl/ConcatLayer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERNAL_KERNEL_ACL_CL_CONCAT_LAYER_H__
+#define __INTERNAL_KERNEL_ACL_CL_CONCAT_LAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/core/CL/ICLTensor.h>
+#include <arm_compute/runtime/IFunction.h>
+
+#include "graph/operand/DataType.h"
+
+using OperandType = neurun::graph::operand::DataType;
+
+namespace neurun
+{
+namespace kernel
+{
+namespace acl_cl
+{
+
+//
+// neurun::kernel::acl_cl::ConcatLayer
+// A naive implementation of ConcatLayer for ACL
+//
+
+class ConcatLayer : public ::arm_compute::IFunction
+{
+public:
+ ConcatLayer();
+
+public:
+ void configure(const std::vector<::arm_compute::ICLTensor *> &input_allocs,
+ int32_t axis /* NNAPI tensor axis from NHWC order */,
+ ::arm_compute::ICLTensor *output_alloc);
+
+ void run();
+
+private:
+ bool concatenationFloat32();
+
+private:
+ std::vector<::arm_compute::ICLTensor *> _input_allocs;
+ ::arm_compute::ICLTensor *_output_alloc;
+ int32_t _axis;
+ OperandType _input_type;
+};
+
+} // namespace acl_cl
+} // namespace kernel
+} // namespace neurun
+
+#endif // __INTERNAL_KERNEL_ACL_CL_CONCAT_LAYER_H__
diff --git a/runtimes/neurun/src/kernel/acl_cl/TensorConvertFromCommonLayer.cc b/runtimes/neurun/src/kernel/acl_cl/TensorConvertFromCommonLayer.cc
new file mode 100644
index 000000000..fa1d77579
--- /dev/null
+++ b/runtimes/neurun/src/kernel/acl_cl/TensorConvertFromCommonLayer.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
+#include "TensorConvertFromCommonLayer.h"
+
+#include "internal/nnapi/feature/Reader.h"
+#include "backend/acl_cl/feature/View.h"
+
+#include <util/feature/IndexIterator.h>
+#include <arm_compute/runtime/CL/CLScheduler.h>
+
+namespace neurun
+{
+namespace kernel
+{
+namespace acl_cl
+{
+
+bool TensorConvertFromCommonLayer::convert()
+{
+ auto inputBuffer = _inputTensor->buffer();
+ auto inputSize = _inputTensor->info()->total_size();
+
+ auto &queue = ::arm_compute::CLScheduler::get().queue();
+
+ _outputTensor->map(queue);
+
+ if (_tensorShape.rank() == 2)
+ {
+ const auto len = _tensorShape.dim(1);
+
+ auto base = reinterpret_cast<const float *>(inputBuffer);
+
+ for (int32_t n = 0; n < len; ++n)
+ {
+ auto from = base + n;
+ auto into =
+ reinterpret_cast<float *>(_outputTensor->ptr_to_element(::arm_compute::Coordinates{n}));
+
+ *into = *from;
+ }
+ }
+ else if (_tensorShape.rank() == 4)
+ {
+ auto featureShape = _tensorShape.asFeature();
+
+ const ::internal::nnapi::feature::Reader<float> from{featureShape, inputBuffer, inputSize};
+ ::internal::arm_compute::feature::View<float> into{_outputTensor};
+
+ ::nnfw::util::feature::iterate(featureShape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
+ }
+
+ _outputTensor->unmap(queue);
+}
+
+void TensorConvertFromCommonLayer::configure(::internal::common::Tensor *inputTensor,
+ ::arm_compute::ICLTensor *outputTensor,
+ const ::neurun::graph::operand::Shape &tensorShape)
+{
+ _inputTensor = inputTensor;
+ _outputTensor = outputTensor;
+ _tensorShape = tensorShape;
+}
+
+void TensorConvertFromCommonLayer::run() { convert(); }
+
+} // namespace acl_cl
+} // namespace kernel
+} // namespace neurun
+
+#endif
diff --git a/runtimes/neurun/src/kernel/acl_cl/TensorConvertFromCommonLayer.h b/runtimes/neurun/src/kernel/acl_cl/TensorConvertFromCommonLayer.h
new file mode 100644
index 000000000..bd031a106
--- /dev/null
+++ b/runtimes/neurun/src/kernel/acl_cl/TensorConvertFromCommonLayer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
+#ifndef __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+#define __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+#include <arm_compute/core/CL/ICLTensor.h>
+
+#include "internal/Model.h"
+#include "internal/common/Tensor.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace acl_cl
+{
+
+class TensorConvertFromCommonLayer : public ::arm_compute::IFunction
+{
+public:
+ TensorConvertFromCommonLayer() {}
+
+public:
+ bool convert();
+
+ void configure(::internal::common::Tensor *inputTensor, ::arm_compute::ICLTensor *outputTensor,
+ const ::neurun::graph::operand::Shape &tensorShape);
+
+ void run();
+
+private:
+ ::internal::common::Tensor *_inputTensor;
+ ::arm_compute::ICLTensor *_outputTensor;
+
+ ::neurun::graph::operand::Shape _tensorShape{1};
+};
+
+} // namespace acl_cl
+} // namespace kernel
+} // namespace neurun
+
+#endif // __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+
+#endif
diff --git a/runtimes/neurun/src/kernel/acl_cl/TensorConvertToCommonLayer.cc b/runtimes/neurun/src/kernel/acl_cl/TensorConvertToCommonLayer.cc
new file mode 100644
index 000000000..985524bc3
--- /dev/null
+++ b/runtimes/neurun/src/kernel/acl_cl/TensorConvertToCommonLayer.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
+#include "TensorConvertToCommonLayer.h"
+
+#include "backend/acl_cl/feature/View.h"
+#include "internal/nnapi/feature/View.h"
+
+#include <util/feature/IndexIterator.h>
+#include <arm_compute/runtime/CL/CLScheduler.h>
+
+namespace neurun
+{
+namespace kernel
+{
+namespace acl_cl
+{
+
+bool TensorConvertToCommonLayer::convert()
+{
+ auto outputBuffer = _outputTensor->buffer();
+ auto outputSize = _outputTensor->info()->total_size();
+
+ auto &queue = ::arm_compute::CLScheduler::get().queue();
+
+ _inputTensor->map(queue);
+
+ if (_tensorShape.rank() == 2)
+ {
+ const auto len = _tensorShape.dim(1);
+
+ auto base = reinterpret_cast<float *>(outputBuffer);
+
+ for (int32_t n = 0; n < len; ++n)
+ {
+ auto from = reinterpret_cast<const float *>(
+ _inputTensor->ptr_to_element(::arm_compute::Coordinates{n}));
+ auto into = base + n;
+
+ *into = *from;
+ }
+ }
+ else if (_tensorShape.rank() == 4)
+ {
+ auto featureShape = _tensorShape.asFeature();
+
+ const ::internal::arm_compute::feature::View<float> from{_inputTensor};
+ ::internal::nnapi::feature::View<float> into{featureShape, outputBuffer, outputSize};
+
+ ::nnfw::util::feature::iterate(featureShape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
+ }
+
+ _inputTensor->unmap(queue);
+}
+
+void TensorConvertToCommonLayer::configure(::arm_compute::ICLTensor *inputTensor,
+ ::internal::common::Tensor *outputTensor,
+ const ::neurun::graph::operand::Shape &tensorShape)
+{
+ _inputTensor = inputTensor;
+ _outputTensor = outputTensor;
+ _tensorShape = tensorShape;
+}
+
+void TensorConvertToCommonLayer::run() { convert(); }
+
+} // namespace acl_cl
+} // namespace kernel
+} // namespace neurun
+
+#endif
diff --git a/runtimes/neurun/src/kernel/acl_cl/TensorConvertToCommonLayer.h b/runtimes/neurun/src/kernel/acl_cl/TensorConvertToCommonLayer.h
new file mode 100644
index 000000000..576f1ee71
--- /dev/null
+++ b/runtimes/neurun/src/kernel/acl_cl/TensorConvertToCommonLayer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
+#ifndef __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+#define __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+#include <arm_compute/core/CL/ICLTensor.h>
+
+#include "internal/Model.h"
+#include "internal/common/Tensor.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace acl_cl
+{
+
+class TensorConvertToCommonLayer : public ::arm_compute::IFunction
+{
+public:
+ TensorConvertToCommonLayer() {}
+
+public:
+ bool convert();
+
+ void configure(::arm_compute::ICLTensor *inputTensor, ::internal::common::Tensor *outputTensor,
+ const ::neurun::graph::operand::Shape &tensorShape);
+
+ void run();
+
+private:
+ ::arm_compute::ICLTensor *_inputTensor;
+ ::internal::common::Tensor *_outputTensor;
+
+ ::neurun::graph::operand::Shape _tensorShape{1};
+};
+
+} // namespace acl_cl
+} // namespace kernel
+} // namespace neurun
+
+#endif // __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+
+#endif
diff --git a/runtimes/neurun/src/kernel/cpu/AvgPoolLayer.cc b/runtimes/neurun/src/kernel/cpu/AvgPoolLayer.cc
new file mode 100644
index 000000000..2a6a84e10
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/AvgPoolLayer.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AvgPoolLayer.h"
+
+#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+#define AVGPOOLING_PARAMETERS \
+ uint32_t height = getSizeOfDimension(_inputShape, 1); \
+ uint32_t width = getSizeOfDimension(_inputShape, 2); \
+ uint32_t outHeight = getSizeOfDimension(_outputShape, 1); \
+ uint32_t outWidth = getSizeOfDimension(_outputShape, 2); \
+ \
+ uint32_t paddingHeight = (uint32_t)_paddingTop; \
+ uint32_t paddingWidth = (uint32_t)_paddingLeft;
+
+AvgPoolLayer::AvgPoolLayer()
+ : _inputData(nullptr), _outputData(nullptr), _inputShape(), _outputShape(), _paddingLeft(0),
+ _paddingTop(0), _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0),
+ _kernelWidth(0), _kernelHeight(0), _activation(ANEURALNETWORKS_FUSED_NONE),
+ _inputType(OperandType::SCALAR_FLOAT32)
+{
+ // DO NOTHING
+}
+
+bool AvgPoolLayer::averagePoolFloat32()
+{
+
+ AVGPOOLING_PARAMETERS
+ float output_activation_min, output_activation_max;
+ CalculateActivationRangeFloat(_activation, &output_activation_min, &output_activation_max);
+
+ ::tflite::optimized_ops::AveragePool(
+ reinterpret_cast<const float *>(_inputData), convertShapeToDims(_inputShape), _strideWidth,
+ _strideHeight, paddingWidth, paddingHeight, _kernelWidth, _kernelHeight,
+ output_activation_min, output_activation_max, reinterpret_cast<float *>(_outputData),
+ convertShapeToDims(_outputShape));
+ return true;
+}
+bool AvgPoolLayer::averagePoolQuant8()
+{
+
+ AVGPOOLING_PARAMETERS
+ int32_t output_activation_min = 0;
+ int32_t output_activation_max = 0;
+ CalculateActivationRangeUint8(_activation, _outputShape, &output_activation_min,
+ &output_activation_max);
+
+ ::tflite::optimized_ops::AveragePool(_inputData, convertShapeToDims(_inputShape), _strideWidth,
+ _strideHeight, paddingWidth, paddingHeight, _kernelWidth,
+ _kernelHeight, output_activation_min, output_activation_max,
+ _outputData, convertShapeToDims(_outputShape));
+ return true;
+}
+
+void AvgPoolLayer::configure(uint8_t *inputData, const Shape inputShape, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop,
+ const uint32_t paddingBottom, const uint32_t strideWidth,
+ const uint32_t strideHeight, const uint32_t kernelWidth,
+ const uint32_t kernelHeight, const FuseCode activation,
+ uint8_t *outputData, const Shape outputShape)
+{
+ _inputData = inputData;
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
+ _paddingLeft = paddingLeft;
+ _paddingRight = paddingRight;
+ _paddingTop = paddingTop;
+ _paddingBottom = paddingBottom;
+ _strideWidth = strideWidth;
+ _strideHeight = strideHeight;
+ _kernelWidth = kernelWidth;
+ _kernelHeight = kernelHeight;
+ _activation = activation;
+ _outputData = outputData;
+ _outputShape = outputShape;
+}
+
+void AvgPoolLayer::run()
+{
+ if (_inputType == OperandType::TENSOR_FLOAT32)
+ {
+ averagePoolFloat32();
+ }
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
+ {
+ throw std::runtime_error{"AvgPoolLayer : Not tested for TENSOR_QUANT8_ASYMM"};
+ // averagePoolQuant8();
+ }
+}
+
+#undef AVGPOOLING_PARAMETERS
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
diff --git a/runtimes/neurun/src/kernel/cpu/AvgPoolLayer.h b/runtimes/neurun/src/kernel/cpu/AvgPoolLayer.h
new file mode 100644
index 000000000..9f390a9e1
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/AvgPoolLayer.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_KERNEL_CPU_AVGPOOLLAYER_H__
+#define __NEURUN_KERNEL_CPU_AVGPOOLLAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+class AvgPoolLayer : public ::arm_compute::IFunction
+{
+public:
+ AvgPoolLayer();
+
+public:
+ bool averagePoolFloat32();
+
+ bool averagePoolQuant8();
+
+ void configure(uint8_t *inputData, const Shape inputShape, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop,
+ const uint32_t paddingBottom, const uint32_t strideWidth,
+ const uint32_t strideHeight, const uint32_t kernelWidth,
+ const uint32_t kernelHeight, const FuseCode activation, uint8_t *outputData,
+ const Shape outputShape);
+
+ void run();
+
+private:
+ uint8_t *_inputData;
+ uint8_t *_outputData;
+
+ Shape _inputShape;
+ Shape _outputShape;
+
+ uint32_t _paddingLeft;
+ uint32_t _paddingTop;
+ uint32_t _paddingRight;
+ uint32_t _paddingBottom;
+
+ uint32_t _strideWidth;
+ uint32_t _strideHeight;
+ uint32_t _kernelWidth;
+ uint32_t _kernelHeight;
+
+ FuseCode _activation;
+
+ OperandType _inputType;
+};
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NEURUN_KERNEL_CPU_AVGPOOLLAYER_H__
diff --git a/runtimes/neurun/src/kernel/cpu/CMakeLists.txt b/runtimes/neurun/src/kernel/cpu/CMakeLists.txt
new file mode 100644
index 000000000..dddf154c3
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/CMakeLists.txt
@@ -0,0 +1,14 @@
+file(GLOB SOURCES "*.cc")
+
+add_library(${LIB_NEURUN_KERNEL_CPU} STATIC ${SOURCES})
+
+target_include_directories(${LIB_NEURUN_KERNEL_CPU} PUBLIC ${NNFW_INCLUDE_DIR})
+target_include_directories(${LIB_NEURUN_KERNEL_CPU} PUBLIC ${NEURUN_INCLUDE_DIR})
+target_include_directories(${LIB_NEURUN_KERNEL_CPU} PUBLIC ${CMAKE_SOURCE_DIR}/externals/tensorflow)
+
+target_link_libraries(${LIB_NEURUN_KERNEL_CPU} arm_compute) # TODO We should not need this
+target_link_libraries(${LIB_NEURUN_KERNEL_CPU} tensorflow-lite)
+
+set_target_properties(${LIB_NEURUN_KERNEL_CPU} PROPERTIES POSITION_INDEPENDENT_CODE ON)
+set_target_properties(${LIB_NEURUN_KERNEL_CPU} PROPERTIES OUTPUT_NAME kernel_cpu)
+install(TARGETS ${LIB_NEURUN_KERNEL_CPU} DESTINATION lib/neurun)
diff --git a/runtimes/neurun/src/kernel/cpu/ConcatLayer.cc b/runtimes/neurun/src/kernel/cpu/ConcatLayer.cc
new file mode 100644
index 000000000..5fe5e3993
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/ConcatLayer.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConcatLayer.h"
+
+#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+ConcatLayer::ConcatLayer()
+ : _inputDataPtrs(), _outputData(nullptr), _axis(0), _inputShapes(), _outputShape(),
+ _inputType(OperandType::SCALAR_FLOAT32)
+{
+ // DO NOTHING
+}
+
+bool ConcatLayer::concatenationFloat32()
+{
+ int num_inputs = _inputShapes.size();
+ std::vector<::tflite::Dims<4> *> inputDimsPtr(num_inputs);
+ std::vector<::tflite::Dims<4>> inputDims(num_inputs);
+ for (int i = 0; i < num_inputs; i++)
+ {
+ inputDims[i] = convertShapeToDims(_inputShapes[i]);
+ inputDimsPtr[i] = &inputDims[i];
+ }
+
+ std::vector<const float *> inputFloatPtrs;
+
+ for (auto ptr : _inputDataPtrs)
+ {
+ inputFloatPtrs.emplace_back(reinterpret_cast<const float *>(ptr));
+ }
+
+ ::tflite::optimized_ops::Concatenation<::tflite::FusedActivationFunctionType::kNone, float>(
+ getNumberOfDimensions(_outputShape) - _axis - 1, inputFloatPtrs.data(), inputDimsPtr.data(),
+ num_inputs, reinterpret_cast<float *>(_outputData), convertShapeToDims(_outputShape));
+ return true;
+}
+bool ConcatLayer::concatenationQuant8()
+{
+ int num_inputs = _inputShapes.size();
+ std::vector<::tflite::Dims<4> *> inputDimsPtr(num_inputs);
+ std::vector<::tflite::Dims<4>> inputDims(num_inputs);
+ for (int i = 0; i < num_inputs; i++)
+ {
+ inputDims[i] = convertShapeToDims(_inputShapes[i]);
+ inputDimsPtr[i] = &inputDims[i];
+ }
+ ::tflite::optimized_ops::Concatenation<::tflite::FusedActivationFunctionType::kNone, uint8_t>(
+ getNumberOfDimensions(_outputShape) - _axis - 1, _inputDataPtrs.data(), inputDimsPtr.data(),
+ num_inputs, _outputData, convertShapeToDims(_outputShape));
+ return true;
+}
+
+void ConcatLayer::configure(const std::vector<const uint8_t *> &inputDataPtrs,
+ const std::vector<Shape> &inputShapes, int32_t axis,
+ uint8_t *outputData, const Shape outputShape)
+{
+ _inputDataPtrs = inputDataPtrs;
+
+ for (auto shape : inputShapes)
+ {
+ _inputShapes.emplace_back(shape);
+ _inputType = shape.type;
+ }
+
+ _axis = axis;
+
+ _outputData = outputData;
+ _outputShape = outputShape;
+}
+
+void ConcatLayer::run()
+{
+ if (_inputType == OperandType::TENSOR_FLOAT32)
+ {
+ concatenationFloat32();
+ }
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
+ {
+ throw std::runtime_error{"ConcatLayer : Not tested for TENSOR_QUANT8_ASYMM"};
+ // concatenationQuant8();
+ }
+}
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
diff --git a/runtimes/neurun/src/kernel/cpu/ConcatLayer.h b/runtimes/neurun/src/kernel/cpu/ConcatLayer.h
new file mode 100644
index 000000000..9aacab5e8
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/ConcatLayer.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_KERNEL_CPU_CONCATLAYER_H__
+#define __NEURUN_KERNEL_CPU_CONCATLAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+class ConcatLayer : public ::arm_compute::IFunction
+{
+public:
+ ConcatLayer();
+
+public:
+ bool concatenationFloat32();
+
+ bool concatenationQuant8();
+
+ void configure(const std::vector<const uint8_t *> &inputDataPtrs,
+ const std::vector<Shape> &inputShapes, int32_t axis, uint8_t *outputData,
+ const Shape outputShape);
+
+ void run();
+
+private:
+ std::vector<const uint8_t *> _inputDataPtrs;
+ uint8_t *_outputData;
+
+ int32_t _axis;
+
+ std::vector<Shape> _inputShapes;
+ Shape _outputShape;
+
+ OperandType _inputType;
+};
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NEURUN_KERNEL_CPU_CONCATLAYER_H__
diff --git a/runtimes/neurun/src/kernel/cpu/ConvolutionLayer.cc b/runtimes/neurun/src/kernel/cpu/ConvolutionLayer.cc
new file mode 100644
index 000000000..81e88e0f0
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/ConvolutionLayer.cc
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConvolutionLayer.h"
+
+#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
+#include "kernel/cpu/OperationUtils.h"
+
+#include <mutex>
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+// If possible we will use this static buffer for the tensor.
+static constexpr int kStaticBufferSize = 1605632;
+static char static_scratch_buffer[kStaticBufferSize];
+static std::mutex executionMutex;
+
+#define ANDROID_NN_CONV_PARAMETERS(Type) \
+ uint32_t height = getSizeOfDimension(_inputShape, 1); \
+ uint32_t width = getSizeOfDimension(_inputShape, 2); \
+ uint32_t kernelHeight = getSizeOfDimension(_kernelShape, 1); \
+ uint32_t kernelWidth = getSizeOfDimension(_kernelShape, 2); \
+ uint32_t outHeight = getSizeOfDimension(_outputShape, 1); \
+ uint32_t outWidth = getSizeOfDimension(_outputShape, 2); \
+ uint32_t inDepth = getSizeOfDimension(_inputShape, 3); \
+ \
+ uint32_t paddingHeight = (uint32_t)_paddingTop; \
+ uint32_t paddingWidth = (uint32_t)_paddingLeft; \
+ \
+ ::tflite::Dims<4> im2colDim; \
+ im2colDim.sizes[3] = (int)getSizeOfDimension(_outputShape, 0); \
+ im2colDim.sizes[2] = (int)getSizeOfDimension(_outputShape, 1); \
+ im2colDim.sizes[1] = (int)getSizeOfDimension(_outputShape, 2); \
+ im2colDim.sizes[0] = (int)inDepth * kernelHeight * kernelWidth; \
+ \
+ im2colDim.strides[0] = 1; \
+ for (int i = 1; i < 4; i++) \
+ { \
+ im2colDim.strides[i] = im2colDim.strides[i - 1] * im2colDim.sizes[i - 1]; \
+ } \
+ Type *im2colData = nullptr; \
+ uint64_t im2colByteSize = sizeof(Type); \
+ std::unique_ptr<Type[]> im2colGuard; \
+ for (int i = 0; i < 4; i++) \
+ { \
+ im2colByteSize *= im2colDim.sizes[i]; \
+ } \
+ /* http://b/77982879, tflite::optimized_ops::Conv uses int for offsets */ \
+ if (im2colByteSize >= 0x7fffffff) \
+ { \
+ std::cout << "Conv size is too large, not enough memory" << std::endl; \
+ return false; \
+ } \
+ if (im2colByteSize <= kStaticBufferSize) \
+ { \
+ im2colData = reinterpret_cast<Type *>(static_scratch_buffer); \
+ } \
+ else \
+ { \
+ im2colData = new (std::nothrow) Type[im2colByteSize / sizeof(Type)]; \
+ if (im2colData == nullptr) \
+ { \
+ std::cout << "Conv size is too large, not enough memory" << std::endl; \
+ return false; \
+ } \
+ im2colGuard.reset(im2colData); \
+ }
+
+ConvolutionLayer::ConvolutionLayer()
+ : _inputData(nullptr), _kernelData(nullptr), _outputData(nullptr), _biasData(nullptr),
+ _inputShape(), _kernelShape(), _outputShape(), _biasShape(), _paddingLeft(0), _paddingTop(0),
+ _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0),
+ _activation(ANEURALNETWORKS_FUSED_NONE), _inputType(OperandType::SCALAR_FLOAT32)
+{
+ // DO NOTHING
+}
+
+bool ConvolutionLayer::convFloat32()
+{
+ ANDROID_NN_CONV_PARAMETERS(float)
+
+ const ::tflite::Dims<4> &kernel_dim = convertShapeToDims(_kernelShape);
+ const int kernel_width = ArraySize(kernel_dim, 1);
+ const int kernel_height = ArraySize(kernel_dim, 2);
+ const bool need_im2col =
+ _strideWidth != 1 || _strideHeight != 1 || kernel_width != 1 || kernel_height != 1;
+
+ float *im2colDataToPass = nullptr;
+ if (need_im2col)
+ {
+ im2colDataToPass = im2colData;
+ }
+
+ float output_activation_min, output_activation_max;
+ CalculateActivationRangeFloat(_activation, &output_activation_min, &output_activation_max);
+ int32_t dilationWidthFactor = 1, dilationHeightFactor = 1;
+ ::tflite::optimized_ops::Conv(
+ reinterpret_cast<const float *>(_inputData), convertShapeToDims(_inputShape),
+ reinterpret_cast<const float *>(_kernelData), convertShapeToDims(_kernelShape),
+ reinterpret_cast<const float *>(_biasData), convertShapeToDims(_biasShape), _strideWidth,
+ _strideHeight, dilationWidthFactor, dilationHeightFactor, paddingWidth, paddingHeight,
+ output_activation_min, output_activation_max, reinterpret_cast<float *>(_outputData),
+ convertShapeToDims(_outputShape), im2colDataToPass, im2colDim);
+ return true;
+}
+
+bool ConvolutionLayer::convQuant8()
+{
+ ANDROID_NN_CONV_PARAMETERS(uint8_t)
+ int32_t inputOffset = -_inputShape.offset;
+ int32_t kernelOffset = -_kernelShape.offset;
+ int32_t outputOffset = _outputShape.offset;
+ float real_multiplier = 0.0;
+ int32_t output_multiplier = 0;
+ int32_t output_shift = 0;
+ int32_t output_activation_min = 0;
+ int32_t output_activation_max = 0;
+ if (!GetQuantizedConvolutionMultipler(_inputShape, _kernelShape, _biasShape, _outputShape,
+ &real_multiplier) ||
+ !QuantizeMultiplierSmallerThanOne(real_multiplier, &output_multiplier, &output_shift))
+ {
+ return false;
+ }
+ CalculateActivationRangeUint8(_activation, _outputShape, &output_activation_min,
+ &output_activation_max);
+ static gemmlowp::GemmContext gemm_context;
+ // Prevent concurrent executions that may access the scratch buffer and
+ // gemm_context.
+ std::unique_lock<std::mutex> lock(executionMutex);
+ // Alow gemmlowp automatically decide how many threads to use.
+ gemm_context.set_max_num_threads(0);
+ ::tflite::optimized_ops::Conv(
+ _inputData, convertShapeToDims(_inputShape), inputOffset, _kernelData,
+ convertShapeToDims(_kernelShape), kernelOffset, reinterpret_cast<const int32_t *>(_biasData),
+ convertShapeToDims(_biasShape), _strideWidth, _strideHeight, paddingWidth, paddingHeight,
+ outputOffset, output_multiplier, output_shift, output_activation_min, output_activation_max,
+ _outputData, convertShapeToDims(_outputShape), im2colData, im2colDim, &gemm_context);
+ return true;
+}
+
+void ConvolutionLayer::configure(uint8_t *inputData, const Shape inputShape, uint8_t *kernelData,
+ const Shape kernelShape, uint8_t *biasData, const Shape biasShape,
+ const uint32_t paddingLeft, const uint32_t paddingRight,
+ const uint32_t paddingTop, const uint32_t paddingBottom,
+ const uint32_t strideWidth, const uint32_t strideHeight,
+ const FuseCode activation, uint8_t *outputData,
+ const Shape outputShape)
+{
+ _inputData = inputData;
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
+ _kernelData = kernelData;
+ _kernelShape = kernelShape;
+ _biasData = biasData;
+ _biasShape = biasShape;
+ _paddingLeft = paddingLeft;
+ _paddingRight = paddingRight;
+ _paddingTop = paddingTop;
+ _paddingBottom = paddingBottom;
+ _strideWidth = strideWidth;
+ _strideHeight = strideHeight;
+ _activation = activation;
+ _outputData = outputData;
+ _outputShape = outputShape;
+}
+
+void ConvolutionLayer::run()
+{
+ if (_inputType == OperandType::TENSOR_FLOAT32)
+ {
+ convFloat32();
+ }
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
+ {
+ throw std::runtime_error{"ConvolutionLayer : Not tested for TENSOR_QUANT8_ASYMM"};
+ // convQuant8();
+ }
+}
+
+#undef ANDROID_NN_CONV_PARAMETERS
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
diff --git a/runtimes/neurun/src/kernel/cpu/ConvolutionLayer.h b/runtimes/neurun/src/kernel/cpu/ConvolutionLayer.h
new file mode 100644
index 000000000..b7afbcec6
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/ConvolutionLayer.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_KERNEL_CPU_CONVOLUTIONLAYER_H__
+#define __NEURUN_KERNEL_CPU_CONVOLUTIONLAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+class ConvolutionLayer : public ::arm_compute::IFunction
+{
+public:
+ ConvolutionLayer();
+
+public:
+ bool convFloat32();
+
+ bool convQuant8();
+
+ void configure(uint8_t *inputData, const Shape inputShape, uint8_t *kernelData,
+ const Shape kernelShape, uint8_t *biasData, const Shape biasShape,
+ const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop,
+ const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
+ const FuseCode activation, uint8_t *outputData, const Shape outputShape);
+
+ void run();
+
+private:
+ uint8_t *_inputData;
+ uint8_t *_kernelData;
+ uint8_t *_outputData;
+ uint8_t *_biasData;
+
+ Shape _inputShape;
+ Shape _kernelShape;
+ Shape _outputShape;
+ Shape _biasShape;
+
+ uint32_t _paddingLeft;
+ uint32_t _paddingTop;
+ uint32_t _paddingRight;
+ uint32_t _paddingBottom;
+
+ uint32_t _strideWidth;
+ uint32_t _strideHeight;
+
+ FuseCode _activation;
+
+ OperandType _inputType;
+};
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NEURUN_KERNEL_CPU_CONVOLUTIONLAYER_H__
diff --git a/runtimes/neurun/src/kernel/cpu/FullyConnectedLayer.cc b/runtimes/neurun/src/kernel/cpu/FullyConnectedLayer.cc
new file mode 100644
index 000000000..41b9afc0c
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/FullyConnectedLayer.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FullyConnectedLayer.h"
+
+#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
+#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
+#include "kernel/cpu/OperationUtils.h"
+
+#include <mutex>
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+FullyConnectedLayer::FullyConnectedLayer()
+ : _inputData(nullptr), _weightsData(nullptr), _biasData(nullptr), _outputData(nullptr),
+ _inputShape(), _weightsShape(), _biasShape(), _outputShape(),
+ _activation(ANEURALNETWORKS_FUSED_NONE), _inputType(OperandType::SCALAR_FLOAT32)
+{
+ // DO NOTHING
+}
+
+// executionMutex is used to protect concurrent access of non-threadsafe resources
+// like gemmlowp::GemmContext.
+// std::mutex is safe for pthreads on Android.
+static std::mutex executionMutex;
+bool FullyConnectedLayer::fullyConnectedFloat32()
+{
+ float output_activation_min, output_activation_max;
+ CalculateActivationRangeFloat(_activation, &output_activation_min, &output_activation_max);
+ // b/80425683, optimized implementation produces incorrect results when the
+ // number of input elements is the squre of batch_size.
+ uint32_t batch_size = getSizeOfDimension(_outputShape, 0);
+ uint32_t input_n_elements = getNumberOfElements(_inputShape);
+ if (batch_size * batch_size == input_n_elements)
+ {
+ ::tflite::reference_ops::FullyConnected(
+ reinterpret_cast<const float *>(_inputData), convertShapeToDims(_inputShape),
+ reinterpret_cast<const float *>(_weightsData), convertShapeToDims(_weightsShape),
+ reinterpret_cast<const float *>(_biasData), convertShapeToDims(_biasShape),
+ output_activation_min, output_activation_max, reinterpret_cast<float *>(_outputData),
+ convertShapeToDims(_outputShape));
+ }
+ else
+ {
+ ::tflite::optimized_ops::FullyConnected(
+ reinterpret_cast<const float *>(_inputData), convertShapeToDims(_inputShape),
+ reinterpret_cast<const float *>(_weightsData), convertShapeToDims(_weightsShape),
+ reinterpret_cast<const float *>(_biasData), convertShapeToDims(_biasShape),
+ output_activation_min, output_activation_max, reinterpret_cast<float *>(_outputData),
+ convertShapeToDims(_outputShape));
+ }
+ return true;
+}
+
+bool FullyConnectedLayer::fullyConnectedQuant8()
+{
+ int32_t inputOffset = -_inputShape.offset;
+ int32_t weightsOffset = -_weightsShape.offset;
+ int32_t outputOffset = _outputShape.offset;
+ float real_multiplier = 0.0;
+ int32_t output_multiplier = 0;
+ int32_t output_shift = 0;
+ int32_t output_activation_min = 0;
+ int32_t output_activation_max = 0;
+ // Caution : 'Convolution' can make misleading. It seems it is just math term.
+ if (!GetQuantizedConvolutionMultipler(_inputShape, _weightsShape, _biasShape, _outputShape,
+ &real_multiplier) ||
+ !QuantizeMultiplierSmallerThanOne(real_multiplier, &output_multiplier, &output_shift))
+ {
+ return false;
+ }
+ CalculateActivationRangeUint8(_activation, _outputShape, &output_activation_min,
+ &output_activation_max);
+ static gemmlowp::GemmContext gemm_context;
+ // Prevent concurrent executions that access gemm_context.
+ std::unique_lock<std::mutex> lock(executionMutex);
+ // Alow gemmlowp automatically decide how many threads to use.
+ gemm_context.set_max_num_threads(0);
+ ::tflite::optimized_ops::FullyConnected(
+ _inputData, convertShapeToDims(_inputShape), inputOffset, _weightsData,
+ convertShapeToDims(_weightsShape), weightsOffset,
+ reinterpret_cast<const int32_t *>(_biasData), convertShapeToDims(_biasShape), outputOffset,
+ output_multiplier, output_shift, output_activation_min, output_activation_max, _outputData,
+ convertShapeToDims(_outputShape), &gemm_context);
+ return true;
+}
+
+void FullyConnectedLayer::configure(uint8_t *inputData, const Shape inputShape,
+ uint8_t *weightsData, const Shape weightsShape,
+ uint8_t *biasData, const Shape biasShape, FuseCode activation,
+ uint8_t *outputData, const Shape outputShape)
+{
+ _inputData = inputData;
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
+ _weightsData = weightsData;
+ _weightsShape = weightsShape;
+ _biasData = biasData;
+ _biasShape = biasShape;
+ _activation = activation;
+ _outputData = outputData;
+ _outputShape = outputShape;
+}
+
+void FullyConnectedLayer::run()
+{
+ if (_inputType == OperandType::TENSOR_FLOAT32)
+ {
+ fullyConnectedFloat32();
+ }
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
+ {
+ throw std::runtime_error{"FullyConnectedLayer : Not tested for TENSOR_QUANT8_ASYMM"};
+ // fullyConnectedQuant8();
+ }
+}
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
diff --git a/runtimes/neurun/src/kernel/cpu/FullyConnectedLayer.h b/runtimes/neurun/src/kernel/cpu/FullyConnectedLayer.h
new file mode 100644
index 000000000..b1ba172b0
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/FullyConnectedLayer.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_KERNEL_CPU_FULLYCONNECTEDLAYER_H__
+#define __NEURUN_KERNEL_CPU_FULLYCONNECTEDLAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+class FullyConnectedLayer : public ::arm_compute::IFunction
+{
+public:
+ FullyConnectedLayer();
+
+public:
+ bool fullyConnectedFloat32();
+
+ bool fullyConnectedQuant8();
+
+ void configure(uint8_t *inputData, const Shape inputShape, uint8_t *weightsData,
+ const Shape weightsShape, uint8_t *biasData, const Shape biasShape,
+ FuseCode activation, uint8_t *outputData, const Shape outputShape);
+
+ void run();
+
+private:
+ uint8_t *_inputData;
+ uint8_t *_weightsData;
+ uint8_t *_biasData;
+ uint8_t *_outputData;
+
+ Shape _inputShape;
+ Shape _weightsShape;
+ Shape _biasShape;
+ Shape _outputShape;
+
+ FuseCode _activation;
+
+ OperandType _inputType;
+};
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NEURUN_KERNEL_CPU_FULLYCONNECTEDLAYER_H__
diff --git a/runtimes/neurun/src/kernel/cpu/MaxPoolLayer.cc b/runtimes/neurun/src/kernel/cpu/MaxPoolLayer.cc
new file mode 100644
index 000000000..3d96bb401
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/MaxPoolLayer.cc
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MaxPoolLayer.h"
+
+#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+#define MAXPOOLING_PARAMETERS \
+ uint32_t height = getSizeOfDimension(_inputShape, 1); \
+ uint32_t width = getSizeOfDimension(_inputShape, 2); \
+ uint32_t outHeight = getSizeOfDimension(_outputShape, 1); \
+ uint32_t outWidth = getSizeOfDimension(_outputShape, 2); \
+ \
+ uint32_t paddingHeight = (uint32_t)_paddingTop; \
+ uint32_t paddingWidth = (uint32_t)_paddingLeft;
+
+MaxPoolLayer::MaxPoolLayer()
+ : _inputData(nullptr), _outputData(nullptr), _inputShape(), _outputShape(), _paddingLeft(0),
+ _paddingTop(0), _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0),
+ _kernelWidth(0), _kernelHeight(0), _activation(ANEURALNETWORKS_FUSED_NONE),
+ _inputType(OperandType::SCALAR_FLOAT32)
+{
+ // DO NOTHING
+}
+
+bool MaxPoolLayer::maxPoolFloat32()
+{
+
+ MAXPOOLING_PARAMETERS
+ float output_activation_min, output_activation_max;
+ CalculateActivationRangeFloat(_activation, &output_activation_min, &output_activation_max);
+
+ ::tflite::optimized_ops::MaxPool(
+ reinterpret_cast<const float *>(_inputData), convertShapeToDims(_inputShape), _strideWidth,
+ _strideHeight, paddingWidth, paddingHeight, _kernelWidth, _kernelHeight,
+ output_activation_min, output_activation_max, reinterpret_cast<float *>(_outputData),
+ convertShapeToDims(_outputShape));
+ return true;
+}
+bool MaxPoolLayer::maxPoolQuant8()
+{
+
+ MAXPOOLING_PARAMETERS
+ int32_t output_activation_min = 0;
+ int32_t output_activation_max = 0;
+ CalculateActivationRangeUint8(_activation, _outputShape, &output_activation_min,
+ &output_activation_max);
+
+ ::tflite::optimized_ops::MaxPool(_inputData, convertShapeToDims(_inputShape), _strideWidth,
+ _strideHeight, paddingWidth, paddingHeight, _kernelWidth,
+ _kernelHeight, output_activation_min, output_activation_max,
+ _outputData, convertShapeToDims(_outputShape));
+ return true;
+}
+
+void MaxPoolLayer::configure(uint8_t *inputData, const Shape inputShape, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop,
+ const uint32_t paddingBottom, const uint32_t strideWidth,
+ const uint32_t strideHeight, const uint32_t kernelWidth,
+ const uint32_t kernelHeight, const FuseCode activation,
+ uint8_t *outputData, const Shape outputShape)
+{
+ _inputData = inputData;
+
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
+ _paddingLeft = paddingLeft;
+ _paddingRight = paddingRight;
+ _paddingTop = paddingTop;
+ _paddingBottom = paddingBottom;
+ _strideWidth = strideWidth;
+ _strideHeight = strideHeight;
+ _kernelWidth = kernelWidth;
+ _kernelHeight = kernelHeight;
+ _activation = activation;
+ _outputData = outputData;
+ _outputShape = outputShape;
+}
+
+void MaxPoolLayer::run()
+{
+ if (_inputType == OperandType::TENSOR_FLOAT32)
+ {
+ maxPoolFloat32();
+ }
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
+ {
+ throw std::runtime_error{"MaxPoolLayer : Not tested for TENSOR_QUANT8_ASYMM"};
+ // maxPoolQuant8();
+ }
+}
+
+#undef MAXPOOLING_PARAMETERS
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
diff --git a/runtimes/neurun/src/kernel/cpu/MaxPoolLayer.h b/runtimes/neurun/src/kernel/cpu/MaxPoolLayer.h
new file mode 100644
index 000000000..b42efb9f6
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/MaxPoolLayer.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_KERNEL_CPU_MAXPOOLLAYER_H__
+#define __NEURUN_KERNEL_CPU_MAXPOOLLAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+class MaxPoolLayer : public ::arm_compute::IFunction
+{
+public:
+ MaxPoolLayer();
+
+public:
+ bool maxPoolFloat32();
+
+ bool maxPoolQuant8();
+
+ void configure(uint8_t *inputData, const Shape inputShape, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop,
+ const uint32_t paddingBottom, const uint32_t strideWidth,
+ const uint32_t strideHeight, const uint32_t kernelWidth,
+ const uint32_t kernelHeight, const FuseCode activation, uint8_t *outputData,
+ const Shape outputShape);
+
+ void run();
+
+private:
+ uint8_t *_inputData;
+ uint8_t *_outputData;
+
+ Shape _inputShape;
+ Shape _outputShape;
+
+ uint32_t _paddingLeft;
+ uint32_t _paddingTop;
+ uint32_t _paddingRight;
+ uint32_t _paddingBottom;
+
+ uint32_t _strideWidth;
+ uint32_t _strideHeight;
+ uint32_t _kernelWidth;
+ uint32_t _kernelHeight;
+
+ FuseCode _activation;
+
+ OperandType _inputType;
+};
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NEURUN_KERNEL_CPU_MAXPOOLLAYER_H__
diff --git a/runtimes/neurun/src/kernel/cpu/OperationUtils.cc b/runtimes/neurun/src/kernel/cpu/OperationUtils.cc
new file mode 100644
index 000000000..5ec2f8e62
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/OperationUtils.cc
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kernel/cpu/OperationUtils.h"
+
+#include <cmath>
+#include <algorithm>
+#include <cassert>
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+uint32_t getNumberOfDimensions(const Shape &shape) { return shape.dimensions.size(); }
+
+uint32_t getNumberOfElements(const Shape &shape)
+{
+ uint32_t count = 1;
+ for (size_t i = 0; i < shape.dimensions.size(); i++)
+ {
+ count *= shape.dimensions[i];
+ }
+ return count;
+}
+
+uint32_t getSizeOfDimension(const Shape &shape, uint32_t dimensionIdx)
+{
+ if (dimensionIdx >= shape.dimensions.size())
+ {
+ // TODO, log the error
+ return 0;
+ }
+ return shape.dimensions[dimensionIdx];
+}
+
+bool QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t *quantized_multiplier,
+ int32_t *right_shift)
+{
+ assert(double_multiplier >= 0.);
+ assert(double_multiplier < 1.);
+ if (double_multiplier == 0.)
+ {
+ *quantized_multiplier = 0;
+ *right_shift = 0;
+ return true;
+ }
+ assert(double_multiplier > 0.);
+ const double q = std::frexp(double_multiplier, right_shift);
+ *right_shift *= -1;
+ int64_t q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
+ assert(q_fixed <= (1ll << 31));
+ if (q_fixed == (1ll << 31))
+ {
+ q_fixed /= 2;
+ --*right_shift;
+ }
+ assert(*right_shift >= 0);
+ assert(q_fixed <= std::numeric_limits<int32_t>::max());
+ *quantized_multiplier = static_cast<int32_t>(q_fixed);
+ return true;
+}
+
+bool GetQuantizedConvolutionMultipler(const Shape &inputShape, const Shape &filterShape,
+ const Shape &biasShape, const Shape &outputShape,
+ float *multiplier)
+{
+ const float input_product_scale = inputShape.scale * filterShape.scale;
+ const float bias_scale = biasShape.scale;
+ const float output_scale = outputShape.scale;
+ // The following conditions must be guaranteed by the training pipeline.
+ assert(std::abs(input_product_scale - bias_scale) <=
+ 1e-6 * std::min(input_product_scale, bias_scale));
+ assert(input_product_scale >= 0);
+ assert(input_product_scale < output_scale);
+ *multiplier = input_product_scale / output_scale;
+ return true;
+}
+
+bool QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
+ int *left_shift)
+{
+ assert(double_multiplier > 1.);
+ const double q = std::frexp(double_multiplier, left_shift);
+ int64_t q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
+ assert(q_fixed <= (1ll << 31));
+ if (q_fixed == (1ll << 31))
+ {
+ q_fixed /= 2;
+ ++*left_shift;
+ }
+ assert(*left_shift >= 0);
+ assert(q_fixed <= std::numeric_limits<int32_t>::max());
+ *quantized_multiplier = static_cast<int32_t>(q_fixed);
+ return true;
+}
+
+void CalculateActivationRangeFloat(int32_t activation, float *activation_min, float *activation_max)
+{
+ if (activation == ANEURALNETWORKS_FUSED_RELU)
+ {
+ *activation_min = 0.f;
+ *activation_max = std::numeric_limits<float>::max();
+ }
+ else if (activation == ANEURALNETWORKS_FUSED_RELU6)
+ {
+ *activation_min = 0.f;
+ *activation_max = 6.f;
+ }
+ else if (activation == ANEURALNETWORKS_FUSED_RELU1)
+ {
+ *activation_min = -1.f;
+ *activation_max = 1.f;
+ }
+ else if (activation == ANEURALNETWORKS_FUSED_NONE)
+ {
+ *activation_min = std::numeric_limits<float>::lowest();
+ *activation_max = std::numeric_limits<float>::max();
+ }
+ else
+ {
+ std::cout << "Unsupported fused activation function." << std::endl;
+ }
+}
+
+void CalculateActivationRangeUint8(int32_t activation, const Shape &outputShape, int32_t *act_min,
+ int32_t *act_max)
+{
+ const int32_t qmin = std::numeric_limits<uint8_t>::min();
+ const int32_t qmax = std::numeric_limits<uint8_t>::max();
+ const auto scale = outputShape.scale;
+ const auto zero_point = outputShape.offset;
+ auto quantize = [scale, zero_point](float f) {
+ return zero_point + static_cast<int32_t>(std::round(f / scale));
+ };
+ if (activation == ANEURALNETWORKS_FUSED_RELU)
+ {
+ *act_min = std::max(qmin, quantize(0.0));
+ *act_max = qmax;
+ }
+ else if (activation == ANEURALNETWORKS_FUSED_RELU6)
+ {
+ *act_min = std::max(qmin, quantize(0.0));
+ *act_max = std::min(qmax, quantize(6.0));
+ }
+ else if (activation == ANEURALNETWORKS_FUSED_RELU1)
+ {
+ *act_min = std::max(qmin, quantize(-1.0));
+ *act_max = std::min(qmax, quantize(1.0));
+ }
+ else if (activation == ANEURALNETWORKS_FUSED_NONE)
+ {
+ *act_min = qmin;
+ *act_max = qmax;
+ }
+ else
+ {
+ std::cout << "Unsupported fused activation function." << std::endl;
+ }
+}
+
+int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift)
+{
+ const double max_input_rescaled = 1.0 * ((1 << input_integer_bits) - 1) *
+ (1ll << (31 - input_integer_bits)) / (1ll << input_left_shift);
+ // Tighten bound using floor. Suppose that we could use the exact value.
+ // After scaling the difference, the result would be at the maximum. Thus we
+ // must ensure that our value has lower magnitude.
+ return static_cast<int32_t>(std::floor(max_input_rescaled));
+}
+
+Shape getShape(const ::neurun::graph::operand::Object &o)
+{
+ Shape shape;
+
+ shape.type = static_cast<OperandType>(static_cast<int32_t>(o.typeInfo().type()));
+ shape.dimensions = std::vector<uint32_t>(o.shape().dims().begin(), o.shape().dims().end());
+ shape.scale = o.typeInfo().scale();
+ // shape.offset = _offset;
+
+ return shape;
+}
+
+size_t sizeOfData(OperandType type, const std::vector<uint32_t> &dimensions)
+{
+ size_t size = 4;
+
+ switch (type)
+ {
+ case OperandType::SCALAR_FLOAT32:
+ case OperandType::SCALAR_INT32:
+ case OperandType::SCALAR_UINT32:
+ case OperandType::TENSOR_FLOAT32:
+ case OperandType::TENSOR_INT32:
+ size = 4;
+ break;
+ case OperandType::TENSOR_QUANT8_ASYMM:
+ size = 1;
+ break;
+ default:
+ throw std::runtime_error("Not supported operand type.");
+ break;
+ }
+
+ for (auto d : dimensions)
+ {
+ size *= d;
+ }
+
+ return size;
+}
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
diff --git a/runtimes/neurun/src/kernel/cpu/OperationUtils.h b/runtimes/neurun/src/kernel/cpu/OperationUtils.h
new file mode 100644
index 000000000..5914d04e3
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/OperationUtils.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
+#define __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
+
+#include <NeuralNetworks.h>
+
+#include <iostream>
+#include <limits>
+#include <vector>
+
+#include "tensorflow/contrib/lite/kernels/internal/types.h"
+#include "graph/operand/Object.h"
+#include "graph/operand/DataType.h"
+
+using OperandType = neurun::graph::operand::DataType;
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+struct Shape
+{
+ OperandType type;
+ std::vector<uint32_t> dimensions;
+ float scale;
+ int32_t offset;
+};
+
+uint32_t getNumberOfDimensions(const Shape &shape);
+
+uint32_t getNumberOfElements(const Shape &shape);
+
+uint32_t getSizeOfDimension(const Shape &shape, uint32_t dimensionIdx);
+
+inline ::tflite::Dims<4> convertShapeToDims(const Shape &shape)
+{
+ // nnAssert(shape.dimensions.size() <= 4);
+ ::tflite::Dims<4> dims;
+ // The dimensions are reversed in Dims<4>.
+ for (int i = 0; i < 4; ++i)
+ {
+ int src = static_cast<int>(shape.dimensions.size()) - i - 1;
+ if (src >= 0)
+ {
+ dims.sizes[i] = static_cast<int>(getSizeOfDimension(shape, src));
+ }
+ else
+ {
+ dims.sizes[i] = 1;
+ }
+ }
+ dims.strides[0] = 1;
+ for (int i = 1; i < 4; i++)
+ {
+ dims.strides[i] = dims.strides[i - 1] * dims.sizes[i - 1];
+ }
+ return dims;
+}
+
+__wur bool QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t *quantized_multiplier,
+ int32_t *right_shift);
+
+__wur bool GetQuantizedConvolutionMultipler(const Shape &inputShape, const Shape &filterShape,
+ const Shape &biasShape, const Shape &outputShape,
+ float *multiplier);
+__wur bool QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
+ int *left_shift);
+
+void CalculateActivationRangeFloat(int32_t activation, float *activation_min,
+ float *activation_max);
+
+void CalculateActivationRangeUint8(int32_t activation, const Shape &outputShape, int32_t *act_min,
+ int32_t *act_max);
+
+int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
+
+Shape getShape(const ::neurun::graph::operand::Object &o);
+
+uint32_t sizeOfData(OperandType type, const std::vector<uint32_t> &dimensions);
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
diff --git a/runtimes/neurun/src/kernel/cpu/ReshapeLayer.cc b/runtimes/neurun/src/kernel/cpu/ReshapeLayer.cc
new file mode 100644
index 000000000..377f783e0
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/ReshapeLayer.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReshapeLayer.h"
+
+#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
+#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+ReshapeLayer::ReshapeLayer()
+ : _inputData(nullptr), _outputData(nullptr), _inputShape(), _outputShape()
+{
+ // DO NOTHING
+}
+
+bool ReshapeLayer::reshapeGeneric()
+{
+ size_t count = sizeOfData(_inputShape.type, _inputShape.dimensions);
+ memcpy(reinterpret_cast<void *>(_outputData), reinterpret_cast<const void *>(_inputData), count);
+ return true;
+}
+
+void ReshapeLayer::configure(uint8_t *inputData, const Shape &inputShape, uint8_t *outputData,
+ const Shape &outputShape)
+{
+ _inputData = inputData;
+ _inputShape = inputShape;
+ _outputData = outputData;
+ _outputShape = outputShape;
+}
+
+void ReshapeLayer::run() { reshapeGeneric(); }
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
diff --git a/runtimes/neurun/src/kernel/cpu/ReshapeLayer.h b/runtimes/neurun/src/kernel/cpu/ReshapeLayer.h
new file mode 100644
index 000000000..395cc1d7f
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/ReshapeLayer.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_KERNEL_CPU_RESHAPELAYER_H__
+#define __NEURUN_KERNEL_CPU_RESHAPELAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+class ReshapeLayer : public ::arm_compute::IFunction
+{
+public:
+ ReshapeLayer();
+
+public:
+ bool reshapeGeneric();
+
+ void configure(uint8_t *inputData, const Shape &inputShape, uint8_t *outputData,
+ const Shape &outputShape);
+
+ void run();
+
+private:
+ uint8_t *_inputData;
+ uint8_t *_outputData;
+
+ Shape _inputShape;
+ Shape _outputShape;
+};
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NEURUN_KERNEL_CPU_RESHAPELAYER_H__
diff --git a/runtimes/neurun/src/kernel/cpu/SoftMaxLayer.cc b/runtimes/neurun/src/kernel/cpu/SoftMaxLayer.cc
new file mode 100644
index 000000000..4f5a69f2e
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/SoftMaxLayer.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SoftMaxLayer.h"
+
+#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+SoftMaxLayer::SoftMaxLayer()
+ : _inputData(nullptr), _outputData(nullptr), _beta(0.0), _inputShape(), _outputShape(),
+ _inputType(OperandType::SCALAR_FLOAT32)
+{
+ // DO NOTHING
+}
+
+bool SoftMaxLayer::softmaxFloat32()
+{
+ ::tflite::Dims<4> dim;
+ if (getNumberOfDimensions(_inputShape) == 2)
+ {
+ uint32_t batch_size = getSizeOfDimension(_inputShape, 0);
+ uint32_t input_size = getNumberOfElements(_inputShape) / batch_size;
+ Shape shapeIn4D;
+ shapeIn4D.dimensions = {batch_size, 1, 1, input_size};
+ dim = convertShapeToDims(shapeIn4D);
+ }
+ else if (getNumberOfDimensions(_inputShape) == 4)
+ {
+ dim = convertShapeToDims(_inputShape);
+ }
+ else
+ {
+ std::cout << "only 2D and 4D tensors supported" << std::endl;
+ return false;
+ }
+ ::tflite::optimized_ops::Softmax(reinterpret_cast<const float *>(_inputData), dim, _beta,
+ reinterpret_cast<float *>(_outputData), dim);
+ return true;
+}
+
+bool SoftMaxLayer::softmaxQuant8()
+{
+ ::tflite::Dims<4> dim;
+ if (getNumberOfDimensions(_inputShape) == 2)
+ {
+ uint32_t batch_size = getSizeOfDimension(_inputShape, 0);
+ uint32_t input_size = getNumberOfElements(_inputShape) / batch_size;
+ Shape shapeIn4D;
+ shapeIn4D.dimensions = {batch_size, 1, 1, input_size};
+ dim = convertShapeToDims(shapeIn4D);
+ }
+ else if (getNumberOfDimensions(_inputShape) == 4)
+ {
+ dim = convertShapeToDims(_inputShape);
+ }
+ else
+ {
+ std::cout << "only 2D and 4D tensors supported" << std::endl;
+ return false;
+ }
+ if (_outputShape.offset != 0 || _outputShape.scale != 1.f / 256)
+ {
+ std::cout << "incorrect scale / offset for output" << std::endl;
+ return false;
+ }
+ static const int32_t kScaledDiffIntegerBits = 5;
+ const double input_beta_real_multiplier = std::min(
+ 1.0 * _beta * _inputShape.scale * (1 << (31 - kScaledDiffIntegerBits)), (1ll << 31) - 1.0);
+ int32_t input_multiplier = 0;
+ int32_t input_left_shift = 0;
+ if (!QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier, &input_multiplier,
+ &input_left_shift))
+ {
+ return false;
+ }
+ float diff_min = -1.0f * CalculateInputRadius(kScaledDiffIntegerBits, input_left_shift);
+ ::tflite::optimized_ops::Softmax(_inputData, dim, input_multiplier, input_left_shift, diff_min,
+ _outputData, dim);
+ return true;
+}
+
+void SoftMaxLayer::configure(uint8_t *inputData, const Shape &inputShape, const float beta,
+ uint8_t *outputData, const Shape &outputShape)
+{
+ _inputData = inputData;
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
+ _outputData = outputData;
+ _outputShape = outputShape;
+ _beta = beta;
+}
+
+void SoftMaxLayer::run()
+{
+ if (_inputType == OperandType::TENSOR_FLOAT32)
+ {
+ softmaxFloat32();
+ }
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
+ {
+ throw std::runtime_error{"SoftMaxLayer : Not tested for TENSOR_QUANT8_ASYMM"};
+ // softmaxQuant8();
+ }
+}
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
diff --git a/runtimes/neurun/src/kernel/cpu/SoftMaxLayer.h b/runtimes/neurun/src/kernel/cpu/SoftMaxLayer.h
new file mode 100644
index 000000000..8057be52f
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/SoftMaxLayer.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_KERNEL_CPU_SOFTMAXLAYER_H__
+#define __NEURUN_KERNEL_CPU_SOFTMAXLAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+
+#include "kernel/cpu/OperationUtils.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+class SoftMaxLayer : public ::arm_compute::IFunction
+{
+public:
+ SoftMaxLayer();
+
+public:
+ bool softmaxFloat32();
+
+ bool softmaxQuant8();
+
+ void configure(uint8_t *inputData, const Shape &inputShape, const float beta, uint8_t *outputData,
+ const Shape &outputShape);
+
+ void run();
+
+private:
+ uint8_t *_inputData;
+ uint8_t *_outputData;
+
+ float _beta;
+
+ Shape _inputShape;
+ Shape _outputShape;
+
+ OperandType _inputType;
+};
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NEURUN_KERNEL_CPU_SOFTMAXLAYER_H__
diff --git a/runtimes/neurun/src/kernel/cpu/TensorConvertFromCommonLayer.cc b/runtimes/neurun/src/kernel/cpu/TensorConvertFromCommonLayer.cc
new file mode 100644
index 000000000..00e914732
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/TensorConvertFromCommonLayer.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
+#include "TensorConvertFromCommonLayer.h"
+
+#include "internal/nnapi/feature/Reader.h"
+#include "internal/nnapi/feature/View.h"
+
+#include <util/feature/IndexIterator.h>
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+bool TensorConvertFromCommonLayer::convert()
+{
+ auto inputBuffer = _inputTensor->buffer();
+ auto inputSize = _inputTensor->info()->total_size();
+
+ auto outputBuffer = _outputTensor->buffer();
+ auto outputSize = _outputTensor->info()->total_size();
+
+ if (_tensorShape.rank() == 2)
+ {
+ const auto len = _tensorShape.dim(1);
+
+ auto base = reinterpret_cast<const float *>(inputBuffer);
+
+ for (int32_t n = 0; n < len; ++n)
+ {
+ auto from = base + n;
+ auto into =
+ reinterpret_cast<float *>(_outputTensor->ptr_to_element(::arm_compute::Coordinates{n}));
+
+ *into = *from;
+ }
+ }
+ else if (_tensorShape.rank() == 4)
+ {
+ auto featureShape = _tensorShape.asFeature();
+
+ const ::internal::nnapi::feature::Reader<float> from{featureShape, inputBuffer, inputSize};
+ ::internal::nnapi::feature::View<float> into{featureShape, outputBuffer, outputSize};
+
+ ::nnfw::util::feature::iterate(featureShape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
+ }
+}
+
+void TensorConvertFromCommonLayer::configure(::internal::common::Tensor *inputTensor,
+ ::internal::cpu::Tensor *outputTensor,
+ const Shape &tensorShape)
+{
+ _inputTensor = inputTensor;
+ _outputTensor = outputTensor;
+ _tensorShape = tensorShape;
+}
+
+void TensorConvertFromCommonLayer::run() { convert(); }
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif
diff --git a/runtimes/neurun/src/kernel/cpu/TensorConvertFromCommonLayer.h b/runtimes/neurun/src/kernel/cpu/TensorConvertFromCommonLayer.h
new file mode 100644
index 000000000..56f7bcf32
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/TensorConvertFromCommonLayer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
+#ifndef __NEURUN_KERNEL_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+#define __NEURUN_KERNEL_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+
+#include "internal/Model.h"
+#include "internal/common/Tensor.h"
+#include "internal/cpu.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+class TensorConvertFromCommonLayer : public ::arm_compute::IFunction
+{
+public:
+ TensorConvertFromCommonLayer() {}
+
+public:
+ bool convert();
+
+ void configure(::internal::common::Tensor *inputTensor, ::internal::cpu::Tensor *outputTensor,
+ const Shape &tensorShape);
+
+ void run();
+
+private:
+ ::internal::common::Tensor *_inputTensor;
+ ::internal::cpu::Tensor *_outputTensor;
+
+ Shape _tensorShape{1};
+};
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NEURUN_KERNEL_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+
+#endif
diff --git a/runtimes/neurun/src/kernel/cpu/TensorConvertToCommonLayer.cc b/runtimes/neurun/src/kernel/cpu/TensorConvertToCommonLayer.cc
new file mode 100644
index 000000000..7d721f494
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/TensorConvertToCommonLayer.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
+#include "TensorConvertToCommonLayer.h"
+
+#include "internal/nnapi/feature/Reader.h"
+#include "internal/nnapi/feature/View.h"
+
+#include <util/feature/IndexIterator.h>
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+bool TensorConvertToCommonLayer::convert()
+{
+ auto inputBuffer = _inputTensor->buffer();
+ auto inputSize = _inputTensor->info()->total_size();
+
+ auto outputBuffer = _outputTensor->buffer();
+ auto outputSize = _outputTensor->info()->total_size();
+
+ if (_tensorShape.rank() == 2)
+ {
+ const auto len = _tensorShape.dim(1);
+
+ auto base = reinterpret_cast<float *>(outputBuffer);
+
+ for (int32_t n = 0; n < len; ++n)
+ {
+ auto from = reinterpret_cast<const float *>(
+ _inputTensor->ptr_to_element(::arm_compute::Coordinates{n}));
+ auto into = base + n;
+
+ *into = *from;
+ }
+ }
+ else if (_tensorShape.rank() == 4)
+ {
+ auto featureShape = _tensorShape.asFeature();
+
+ const ::internal::nnapi::feature::Reader<float> from{featureShape, inputBuffer, inputSize};
+ ::internal::nnapi::feature::View<float> into{featureShape, outputBuffer, outputSize};
+
+ ::nnfw::util::feature::iterate(featureShape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
+ }
+}
+
+void TensorConvertToCommonLayer::configure(::internal::cpu::Tensor *inputTensor,
+ ::internal::common::Tensor *outputTensor,
+ const Shape &tensorShape)
+{
+ _inputTensor = inputTensor;
+ _outputTensor = outputTensor;
+ _tensorShape = tensorShape;
+}
+
+void TensorConvertToCommonLayer::run() { convert(); }
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif
diff --git a/runtimes/neurun/src/kernel/cpu/TensorConvertToCommonLayer.h b/runtimes/neurun/src/kernel/cpu/TensorConvertToCommonLayer.h
new file mode 100644
index 000000000..7e96d1aff
--- /dev/null
+++ b/runtimes/neurun/src/kernel/cpu/TensorConvertToCommonLayer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
+#ifndef __NEURUN_KERNEL_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+#define __NEURUN_KERNEL_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+
+#include <NeuralNetworks.h>
+
+#include <arm_compute/runtime/IFunction.h>
+
+#include "internal/Model.h"
+#include "internal/common/Tensor.h"
+#include "internal/cpu.h"
+
+namespace neurun
+{
+namespace kernel
+{
+namespace cpu
+{
+
+class TensorConvertToCommonLayer : public ::arm_compute::IFunction
+{
+public:
+ TensorConvertToCommonLayer() {}
+
+public:
+ bool convert();
+
+ void configure(::internal::cpu::Tensor *inputTensor, ::internal::common::Tensor *outputTensor,
+ const Shape &tensorShape);
+
+ void run();
+
+private:
+ ::internal::cpu::Tensor *_inputTensor;
+ ::internal::common::Tensor *_outputTensor;
+
+ Shape _tensorShape{1};
+};
+
+} // namespace cpu
+} // namespace kernel
+} // namespace neurun
+
+#endif // __NEURUN_KERNEL_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+
+#endif
diff --git a/runtimes/neurun/src/library_info.cc b/runtimes/neurun/src/library_info.cc
new file mode 100644
index 000000000..4adf70465
--- /dev/null
+++ b/runtimes/neurun/src/library_info.cc
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+volatile static const char info[] = "library information : runtime=neurun";
diff --git a/runtimes/neurun/src/linear/Linear.cc b/runtimes/neurun/src/linear/Linear.cc
new file mode 100644
index 000000000..2ffcbdb93
--- /dev/null
+++ b/runtimes/neurun/src/linear/Linear.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Linear.h"
+
+#include "graph/Graph.h"
+
+#include "graph/operation/LowerInfo.h"
+#include "backend/IStageGenerator.h"
+
+namespace neurun
+{
+namespace linear
+{
+
+Linear::Linear(const graph::Graph &graph)
+{
+ // Linearize with topological sort
+ //
+ // Topological sort algorithm
+ // 1. Iterate with DFS
+ // 2. Append the node to vector when DFS for the node finishes(post order)
+ // 3. Reverse the order of nodes
+
+ graph::Graph::PostDfsConstIterator().iterate(
+ graph, [&](const neurun::graph::operation::Node &node) { _operations.emplace_back(&node); });
+
+ std::reverse(std::begin(_operations), std::end(_operations));
+}
+
+void Linear::accept(graph::operation::NodeVisitor &&visitor) const
+{
+ for (const auto op : _operations)
+ {
+ op->accept(std::move(visitor));
+ }
+}
+
+backend::TensorBuilderSet Linear::markTensors() const
+{
+ backend::TensorBuilderSet tensor_builders;
+ for (const auto op : _operations)
+ {
+ const auto tensor_builder = op->lower_info()->backend().stage_gen()->tensor_builder();
+ for (const auto &ind : op->getInputs())
+ {
+ tensor_builder->mark(ind);
+ tensor_builders.insert(tensor_builder);
+ }
+ for (const auto &ind : op->getOutputs())
+ {
+ tensor_builder->mark(ind);
+ tensor_builders.insert(tensor_builder);
+ }
+ }
+ return tensor_builders;
+}
+
+} // namespace linear
+} // namespace neurun
diff --git a/runtimes/neurun/src/linear/Linear.h b/runtimes/neurun/src/linear/Linear.h
new file mode 100644
index 000000000..ffbc68ecb
--- /dev/null
+++ b/runtimes/neurun/src/linear/Linear.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_LINEAR_LINEAR_H__
+#define __NEURUN_LINEAR_LINEAR_H__
+
+#include <vector>
+
+#include "graph/operation/Node.h"
+#include "backend/ITensorBuilder.h"
+
+namespace neurun
+{
+namespace graph
+{
+namespace operation
+{
+struct NodeVisitor;
+} // namespace operation
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace graph
+{
+class Graph;
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace linear
+{
+
+class Linear
+{
+public:
+ Linear(const graph::Graph &graph);
+
+public:
+ Linear(const Linear &linear) = delete;
+
+public:
+ void accept(graph::operation::NodeVisitor &&visitor) const;
+
+ // TODO Should not return TensorBuilderSet
+ virtual backend::TensorBuilderSet markTensors() const;
+
+public:
+private:
+ std::vector<const graph::operation::Node *> _operations;
+};
+
+} // namespace linear
+} // namespace neurun
+
+#endif // __NEURUN_LINEAR_LINEAR_H__
diff --git a/runtimes/neurun/src/logging.h b/runtimes/neurun/src/logging.h
new file mode 100644
index 000000000..950df7b52
--- /dev/null
+++ b/runtimes/neurun/src/logging.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_LOGGING_H__
+#define __NEURUN_LOGGING_H__
+
+#include <iostream>
+
+namespace logging
+{
+
+class Context
+{
+public:
+ Context() : _enabled{false}
+ {
+ auto env = std::getenv("NEURUN_LOG_ENABLE");
+
+ if (env && std::atoi(env) > 0)
+ {
+ _enabled = true;
+ }
+ }
+
+public:
+ bool enabled(void) const { return _enabled; }
+
+private:
+ bool _enabled;
+};
+
+static Context ctx;
+
+} // namespace logging
+
+#define VERBOSE(name) \
+ if (::logging::ctx.enabled()) \
+ std::cout << "[" << #name << "] "
+
+#endif // __NEURUN_LOGGING_H__