summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/backend')
-rw-r--r--runtimes/neurun/src/backend/BackendManager.cc118
-rw-r--r--runtimes/neurun/src/backend/BackendManager.h94
-rw-r--r--runtimes/neurun/src/backend/CMakeLists.txt2
-rw-r--r--runtimes/neurun/src/backend/acl_cl/CMakeLists.txt15
-rw-r--r--runtimes/neurun/src/backend/acl_cl/Config.cc32
-rw-r--r--runtimes/neurun/src/backend/acl_cl/Config.h47
-rw-r--r--runtimes/neurun/src/backend/acl_cl/Convert.cc87
-rw-r--r--runtimes/neurun/src/backend/acl_cl/Convert.h47
-rw-r--r--runtimes/neurun/src/backend/acl_cl/PluginClassesAllocator.cc43
-rw-r--r--runtimes/neurun/src/backend/acl_cl/StageGenerator.cc593
-rw-r--r--runtimes/neurun/src/backend/acl_cl/StageGenerator.h54
-rw-r--r--runtimes/neurun/src/backend/acl_cl/Swizzle.h95
-rw-r--r--runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc246
-rw-r--r--runtimes/neurun/src/backend/acl_cl/TensorBuilder.h94
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc61
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.h63
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc81
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h67
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.cc48
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.h73
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/Object.cc43
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/Object.h62
-rw-r--r--runtimes/neurun/src/backend/cpu/CMakeLists.txt18
-rw-r--r--runtimes/neurun/src/backend/cpu/Config.cc33
-rw-r--r--runtimes/neurun/src/backend/cpu/Config.h51
-rw-r--r--runtimes/neurun/src/backend/cpu/MemoryPlanner.cc127
-rw-r--r--runtimes/neurun/src/backend/cpu/MemoryPlanner.h166
-rw-r--r--runtimes/neurun/src/backend/cpu/PluginClassesAllocator.cc43
-rw-r--r--runtimes/neurun/src/backend/cpu/StageGenerator.cc547
-rw-r--r--runtimes/neurun/src/backend/cpu/StageGenerator.h55
-rw-r--r--runtimes/neurun/src/backend/cpu/TensorBuilder.cc124
-rw-r--r--runtimes/neurun/src/backend/cpu/TensorBuilder.h82
-rw-r--r--runtimes/neurun/src/backend/cpu/operand/Object.cc37
-rw-r--r--runtimes/neurun/src/backend/cpu/operand/Object.h61
-rw-r--r--runtimes/neurun/src/backend/cpu/operand/Tensor.cc39
-rw-r--r--runtimes/neurun/src/backend/cpu/operand/Tensor.h74
-rw-r--r--runtimes/neurun/src/backend/interface/IConfig.h44
-rw-r--r--runtimes/neurun/src/backend/interface/IStageGenerator.h72
-rw-r--r--runtimes/neurun/src/backend/interface/ITensorBuilder.h79
-rw-r--r--runtimes/neurun/src/backend/interface/operand/IObject.h43
-rw-r--r--runtimes/neurun/src/backend/interface/operand/ITensor.h49
41 files changed, 0 insertions, 3809 deletions
diff --git a/runtimes/neurun/src/backend/BackendManager.cc b/runtimes/neurun/src/backend/BackendManager.cc
deleted file mode 100644
index 5d19d4015..000000000
--- a/runtimes/neurun/src/backend/BackendManager.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <dlfcn.h>
-#include "BackendManager.h"
-
-#include "backend/interface/IConfig.h"
-#include "backend/interface/ITensorBuilder.h"
-#include "backend/interface/IStageGenerator.h"
-#include "util/logging.h"
-#include "util/config/ConfigManager.h"
-
-namespace neurun
-{
-namespace backend
-{
-
-Backend::Backend(const std::shared_ptr<neurun::backend::IConfig> &backend_config,
- const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen)
- : _config(backend_config), _stage_gen(stage_gen)
-{
- backend_config->initialize();
-}
-
-const std::shared_ptr<neurun::backend::IConfig> Backend::config() const { return _config; }
-
-const std::shared_ptr<neurun::backend::IStageGenerator> Backend::stage_gen() const
-{
- return _stage_gen;
-}
-
-const std::shared_ptr<neurun::backend::ITensorBuilder> Backend::tensor_builder() const
-{
- return _stage_gen->tensor_builder();
-}
-
-template <typename T, class... Types>
-void BackendManager::loadObjectFromPlugin(std::shared_ptr<T> &object_of_plugin_class,
- const std::string obj_creator_func_name, void *handle,
- Types &&... args)
-{
- T *(*allocate_obj)(Types && ... Args);
- // load object creator function
- allocate_obj = (T * (*)(Types && ... Args))dlsym(handle, obj_creator_func_name.c_str());
- if (allocate_obj == nullptr)
- {
- fprintf(stderr, "BackendManager: unable to open function %s: %s\n",
- obj_creator_func_name.c_str(), dlerror());
- abort();
- }
-
- object_of_plugin_class.reset(allocate_obj(args...));
-}
-
-void BackendManager::loadBackend(const std::string &backend,
- const neurun::model::operand::Set &operands)
-{
- const std::string backend_plugin = "libbackend_" + backend + ".so";
- void *handle = dlopen(backend_plugin.c_str(), RTLD_LAZY | RTLD_LOCAL);
- if (handle == nullptr)
- {
- fprintf(stderr, "BackendManager::loadBackend failed to load plugin of %s backend: %s\n",
- backend.c_str(), dlerror());
- abort();
- }
- VERBOSE(BackendManager::loadBackend) << "loaded " << backend_plugin << " as a plugin of "
- << backend << " backend\n";
-
- // load Config
- std::shared_ptr<neurun::backend::IConfig> config;
- loadObjectFromPlugin(config, std::string("allocate_Config"), handle);
-
- // load TensorBuilder
- std::shared_ptr<neurun::backend::ITensorBuilder> tensor_builder;
- loadObjectFromPlugin(tensor_builder, std::string("allocate_TensorBuilder"), handle);
-
- // load StageGenerator
- std::shared_ptr<neurun::backend::IStageGenerator> stage_gen;
- loadObjectFromPlugin(stage_gen, std::string("allocate_StageGenerator"), handle, operands,
- tensor_builder);
- _gen_map[config->id()] = {config, stage_gen};
-}
-
-BackendManager::BackendManager(const neurun::model::operand::Set &operands)
-{
- const auto backends = config::ConfigManager::instance().get<std::string>("BACKENDS");
- size_t prev_pos = 0;
- auto pos = backends.find(";");
- while (pos != std::string::npos)
- {
- loadBackend(backends.substr(prev_pos, pos - prev_pos), operands);
- prev_pos = pos + 1;
- pos = backends.find(";", prev_pos);
- }
- // if backends doesn't terminate with ";"
- if (prev_pos < backends.size())
- {
- loadBackend(backends.substr(prev_pos), operands);
- }
-}
-
-Backend *BackendManager::get(const std::string &key) { return &_gen_map.at(key); }
-
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/BackendManager.h b/runtimes/neurun/src/backend/BackendManager.h
deleted file mode 100644
index 428542b1e..000000000
--- a/runtimes/neurun/src/backend/BackendManager.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_BACKEND_MANAGER_H__
-#define __NEURUN_BACKEND_BACKEND_MANAGER_H__
-
-#include <memory>
-#include <map>
-
-#include "model/operand/Set.h"
-
-namespace neurun
-{
-namespace backend
-{
-
-struct IConfig;
-struct IStageGenerator;
-struct ITensorBuilder;
-
-class Backend
-{
-public:
- Backend(const std::shared_ptr<neurun::backend::IConfig> &backend_config,
- const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen);
-
- Backend(void) : _config(nullptr), _stage_gen(nullptr)
- {
- // DO NOTHING
- }
-
-public:
- const std::shared_ptr<neurun::backend::IConfig> config() const;
- const std::shared_ptr<neurun::backend::IStageGenerator> stage_gen() const;
- const std::shared_ptr<neurun::backend::ITensorBuilder> tensor_builder() const;
-
-private:
- std::shared_ptr<neurun::backend::IConfig> _config;
- std::shared_ptr<neurun::backend::IStageGenerator> _stage_gen;
-};
-
-class BackendManager
-{
-public:
- BackendManager(const neurun::model::operand::Set &operands);
-
- Backend *get(const std::string &key);
-
-private:
- std::map<std::string, Backend> _gen_map;
- /**
- * @brief Allocate an object of a class of a plugin by loading a plugin function, that does
- * allocation, and calling it
- *
- * @param object_of_plugin_class target object
- * @param obj_creator_func_name name of the plugin function, that allocates an object
- * @param handle handle of the plugin
- * @param args arguments to pass to constructor of the plugin class
- *
- * @return
- */
- template <typename T, class... Types>
- void loadObjectFromPlugin(std::shared_ptr<T> &object_of_plugin_class,
- const std::string obj_creator_func_name, void *handle,
- Types &&... args);
-
- /**
- * @brief load backend plugin
- *
- * @param backend backend to be loaded
- * @param operands operands to construct StageGenerator
- *
- * @return
- */
- void loadBackend(const std::string &backend, const neurun::model::operand::Set &operands);
-};
-
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_BACKEND_MANAGER_H__
diff --git a/runtimes/neurun/src/backend/CMakeLists.txt b/runtimes/neurun/src/backend/CMakeLists.txt
deleted file mode 100644
index a39823102..000000000
--- a/runtimes/neurun/src/backend/CMakeLists.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-add_subdirectory(cpu)
-add_subdirectory(acl_cl)
diff --git a/runtimes/neurun/src/backend/acl_cl/CMakeLists.txt b/runtimes/neurun/src/backend/acl_cl/CMakeLists.txt
deleted file mode 100644
index f1ea22bc5..000000000
--- a/runtimes/neurun/src/backend/acl_cl/CMakeLists.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-file(GLOB_RECURSE SOURCES "*.cc")
-
-add_library(${LIB_NEURUN_BACKEND_ACL_CL} SHARED ${SOURCES})
-
-target_include_directories(${LIB_NEURUN_BACKEND_ACL_CL} PUBLIC ${NNFW_INCLUDE_DIR})
-target_include_directories(${LIB_NEURUN_BACKEND_ACL_CL} PUBLIC ${NEURUN_INCLUDE_DIR})
-
-target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} arm_compute)
-target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} ${LIB_NEURUN_KERNEL_ACL_CL})
-target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} ${LIB_NEURUN})
-
-target_compile_options(${LIB_NEURUN_BACKEND_ACL_CL} PRIVATE -Wall -Wextra -Werror -Wno-unused-parameter)
-
-set_target_properties(${LIB_NEURUN_BACKEND_ACL_CL} PROPERTIES OUTPUT_NAME backend_acl_cl)
-install(TARGETS ${LIB_NEURUN_BACKEND_ACL_CL} DESTINATION lib/neurun)
diff --git a/runtimes/neurun/src/backend/acl_cl/Config.cc b/runtimes/neurun/src/backend/acl_cl/Config.cc
deleted file mode 100644
index cad9b8988..000000000
--- a/runtimes/neurun/src/backend/acl_cl/Config.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <arm_compute/runtime/CL/CLScheduler.h>
-
-#include "backend/acl_cl/Config.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-void Config::initialize() { arm_compute::CLScheduler::get().default_init(); }
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/Config.h b/runtimes/neurun/src/backend/acl_cl/Config.h
deleted file mode 100644
index cb43bfbe0..000000000
--- a/runtimes/neurun/src/backend/acl_cl/Config.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_CL_CONFIG_H__
-#define __NEURUN_BACKEND_ACL_CL_CONFIG_H__
-
-#include "backend/interface/IConfig.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-class Config : public IConfig
-{
-public:
- Config()
- {
- // DO NOTHING
- }
-
- virtual std::string id() override { return "acl_cl"; }
- virtual void initialize() override;
- virtual graph::operand::Layout getOperandLayout() { return graph::operand::Layout::NCHW; }
- virtual bool SupportSubTensorAlloc() override { return true; }
-};
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_CL_CONFIG_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/Convert.cc b/runtimes/neurun/src/backend/acl_cl/Convert.cc
deleted file mode 100644
index ed0a089c4..000000000
--- a/runtimes/neurun/src/backend/acl_cl/Convert.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Convert.h"
-
-#include "Swizzle.h"
-#include "model/operand/DataType.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-::arm_compute::TensorShape asTensorShape(const ::neurun::model::operand::Shape &shape,
- bool apply_dim_correction)
-{
- const uint32_t rank = shape.rank();
-
- ::arm_compute::TensorShape res{};
-
- res.set_num_dimensions(rank);
-
- for (uint32_t axis = 0; axis < rank; ++axis)
- {
- // NOTE In some cases, in incorrect dimensions is required.
- // For example, intput_size is 1 in LSTM. The input-to-input weights([num_units, input_size]) of
- // LSTM is used as the weight of the FullyConnected.
- // The FullyConnected's weight must be greater or equal than 2-dimensions.
- // However, if the dimension correction is applied to input_to_input_weights with input_size
- // equal to 1, it will be changed to 1-D.
- // So input_to_input_weights is not used by the weight of FullyConnected.
- res.set(ToARMComputeAxis(rank, axis).value(), shape.dim(axis), apply_dim_correction);
- }
-
- return res;
-}
-
-::arm_compute::DataType asDataType(const ::neurun::model::operand::DataType &type)
-{
- switch (type)
- {
- case ::neurun::model::operand::DataType::SCALAR_FLOAT32:
- case ::neurun::model::operand::DataType::TENSOR_FLOAT32:
- return ::arm_compute::DataType::F32;
- case ::neurun::model::operand::DataType::SCALAR_INT32:
- case ::neurun::model::operand::DataType::TENSOR_INT32:
- return ::arm_compute::DataType::S32;
- case ::neurun::model::operand::DataType::SCALAR_UINT32:
- return ::arm_compute::DataType::U32;
- case ::neurun::model::operand::DataType::TENSOR_QUANT8_ASYMM:
- return ::arm_compute::DataType::QASYMM8;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-::arm_compute::QuantizationInfo asQuantizationInfo(const float scale, const int32_t offset)
-{
- return ::arm_compute::QuantizationInfo(scale, offset);
-}
-
-::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::operand::Shape &shape,
- const ::neurun::model::operand::TypeInfo &typeInfo)
-{
- return ::arm_compute::TensorInfo(asTensorShape(shape), 1, asDataType(typeInfo.type()),
- asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
-}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/Convert.h b/runtimes/neurun/src/backend/acl_cl/Convert.h
deleted file mode 100644
index 1a233fb87..000000000
--- a/runtimes/neurun/src/backend/acl_cl/Convert.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_CL_CONVERT_H__
-#define __NEURUN_BACKEND_ACL_CL_CONVERT_H__
-
-#include <arm_compute/core/TensorInfo.h>
-#include <arm_compute/core/SubTensorInfo.h>
-#include <arm_compute/core/TensorShape.h>
-
-#include "model/operand/Object.h"
-#include "model/operand/Shape.h"
-#include "model/operand/TypeInfo.h"
-#include "misc/feature/Shape.h"
-#include "misc/kernel/Shape.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-::arm_compute::TensorShape asTensorShape(const ::neurun::model::operand::Shape &shape,
- bool apply_dim_correction = true);
-::arm_compute::DataType asDataType(const ::neurun::model::operand::DataType &type);
-::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::operand::Shape &shape,
- const ::neurun::model::operand::TypeInfo &typeInfo);
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_CL_CONVERT_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/PluginClassesAllocator.cc b/runtimes/neurun/src/backend/acl_cl/PluginClassesAllocator.cc
deleted file mode 100644
index f33e71d33..000000000
--- a/runtimes/neurun/src/backend/acl_cl/PluginClassesAllocator.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <memory>
-#include "TensorBuilder.h"
-#include "StageGenerator.h"
-#include "Config.h"
-#include "util/logging.h"
-
-extern "C" {
-neurun::backend::acl_cl::TensorBuilder *allocate_TensorBuilder()
-{
- VERBOSE(allocate_TensorBuilder) << "loaded from acl_cl\n";
- return new neurun::backend::acl_cl::TensorBuilder;
-}
-
-neurun::backend::acl_cl::StageGenerator *allocate_StageGenerator(
- const neurun::model::operand::Set &operand_ctx,
- const std::shared_ptr<neurun::backend::acl_cl::TensorBuilder> &tensor_builder)
-{
- VERBOSE(allocate_StageGenerator) << "loaded from acl_cl\n";
- return new neurun::backend::acl_cl::StageGenerator(operand_ctx, tensor_builder);
-}
-
-neurun::backend::acl_cl::Config *allocate_Config()
-{
- VERBOSE(allocate_Config) << "loaded from acl_cl\n";
- return new neurun::backend::acl_cl::Config;
-}
-}
diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
deleted file mode 100644
index 89bbd7bd2..000000000
--- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
+++ /dev/null
@@ -1,593 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/acl_cl/StageGenerator.h"
-
-#include "kernel/acl_cl/CLFunction.h"
-
-#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
-#include <arm_compute/runtime/CL/functions/CLPoolingLayer.h>
-#include <arm_compute/runtime/CL/functions/CLActivationLayer.h>
-#include <arm_compute/runtime/CL/functions/CLReshapeLayer.h>
-#include <arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h>
-#include <arm_compute/runtime/CL/functions/CLSoftmaxLayer.h>
-
-#include "kernel/acl_cl/ConcatLayer.h"
-
-#include "util/Padding.h"
-
-#include "model/operand/Index.h"
-
-#include "util/logging.h"
-
-#include "NeuralNetworks.h"
-
-#include "util/Utils.h"
-
-template <typename T> std::unique_ptr<T> make_layer(void) { return std::unique_ptr<T>{new T}; }
-
-std::unique_ptr<::neurun::kernel::acl_cl::CLFunction>
-make_cl_function(std::unique_ptr<::arm_compute::IFunction> &&layer)
-{
- return std::unique_ptr<::neurun::kernel::acl_cl::CLFunction>(
- new ::neurun::kernel::acl_cl::CLFunction(std::move(layer)));
-}
-
-::arm_compute::PadStrideInfo asPadStringInfo(const neurun::util::Padding &padding,
- const neurun::util::Stride &stride)
-{
- return ::arm_compute::PadStrideInfo{stride.horizontal,
- stride.vertical,
- padding.left,
- padding.right,
- padding.top,
- padding.bottom,
- ::arm_compute::DimensionRoundingType::FLOOR};
-}
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-//
-// ActivationBuilder
-//
-class ActivationBuilder
-{
-public:
- ActivationBuilder(IExecutionBuilder &builder) : _builder(builder)
- {
- // DO NOTHING
- }
-
-private:
- void appendReLU(::arm_compute::ICLTensor *tensor);
-
-public:
- void append(FuseCode code, ::arm_compute::ICLTensor *tensor);
-
-private:
- IExecutionBuilder &_builder;
-};
-
-void ActivationBuilder::appendReLU(::arm_compute::ICLTensor *ifm_alloc)
-{
- const ::arm_compute::ActivationLayerInfo act_info{
- ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
-
- auto fn = make_layer<::arm_compute::CLActivationLayer>();
-
- fn->configure(ifm_alloc, nullptr, act_info);
-
- auto acl_fn = make_cl_function(std::move(fn));
-
- _builder.append(std::move(acl_fn));
-}
-
-void ActivationBuilder::append(FuseCode code, ::arm_compute::ICLTensor *ifm_alloc)
-{
- switch (code)
- {
- case ANEURALNETWORKS_FUSED_NONE:
- {
- // DO NOTHING
- break;
- }
- case ANEURALNETWORKS_FUSED_RELU:
- {
- appendReLU(ifm_alloc);
- break;
- }
- default:
- {
- throw std::runtime_error("Not supported, yet");
- }
- }
-}
-
-//
-// StageGenerator
-//
-StageGenerator::StageGenerator(const neurun::model::operand::Set &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder)
- : _ctx(ctx), _tensor_builder(tensor_builder)
-{
- // DO NOTHING
-}
-
-void StageGenerator::visit(const model::operation::Conv2DNode &node)
-{
- using model::operation::Conv2DNode;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(Conv2DNode::Input::INPUT)};
- const auto ker_index{node.getInputs().at(Conv2DNode::Input::KERNEL)};
- const auto bias_index{node.getInputs().at(Conv2DNode::Input::BIAS)};
-
- const auto vstride_index{node.param().vstride_index};
- const auto hstride_index{node.param().hstride_index};
-
- const auto padding_index{node.param().padding_index};
- const auto activation_index{node.param().activation_index};
-
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
- const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
-
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
- neurun::util::Stride stride;
-
- stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
- stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
-
- // Construct operation parameters
- struct Param
- {
- model::operand::Index ofm_index;
- model::operand::Index ifm_index;
- model::operand::Index ker_index;
- model::operand::Index bias_index;
-
- neurun::util::Padding padding;
- neurun::util::Stride stride;
-
- FuseCode activation;
- };
-
- Param param;
-
- param.ofm_index = ofm_index;
- param.ifm_index = ifm_index;
- param.ker_index = ker_index;
- param.bias_index = bias_index;
-
- param.stride = stride;
- param.padding =
- (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
- : neurun::util::valid_padding();
-
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto ofm_alloc = tensors->at(param.ofm_index).get();
- auto ifm_alloc = tensors->at(param.ifm_index).get();
- auto ker_alloc = tensors->at(param.ker_index).get();
- auto bias_alloc = tensors->at(param.bias_index).get();
-
- const auto conv_info = asPadStringInfo(param.padding, param.stride);
-
- std::unique_ptr<::arm_compute::CLConvolutionLayer> fn{new ::arm_compute::CLConvolutionLayer};
-
- fn->configure(ifm_alloc->handle(), ker_alloc->handle(), bias_alloc->handle(),
- ofm_alloc->handle(), conv_info);
-
- auto acl_fn = make_cl_function(std::move(fn));
-
- builder.append(std::move(acl_fn));
-
- ActivationBuilder{builder}.append(param.activation, ofm_alloc->handle());
- });
-}
-
-void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(model::operation::MaxPool2DNode::Input::INPUT)};
-
- const auto kh_index{node.param().kh_index};
- const auto kw_index{node.param().kw_index};
-
- const auto vstride_index{node.param().vstride_index};
- const auto hstride_index{node.param().hstride_index};
-
- const auto padding_index{node.param().padding_index};
-
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
-
- const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
- const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
-
- const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
- const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
-
- // Construct operation parameters
- struct Param
- {
- model::operand::Index ofm_index;
- model::operand::Index ifm_index;
-
- uint32_t kw;
- uint32_t kh;
-
- neurun::util::Padding padding;
- neurun::util::Stride stride;
-
- // TODO Add 'activation' field
- };
-
- Param param;
-
- param.ofm_index = ofm_index;
- param.ifm_index = ifm_index;
-
- param.kh = kh;
- param.kw = kw;
-
- param.stride.vertical = vstride;
- param.stride.horizontal = hstride;
-
- param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
- : neurun::util::valid_padding();
-
- VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
- VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
- VERBOSE(MaxPool2D) << "OFM_H: " << ofm_shape.H << std::endl;
- VERBOSE(MaxPool2D) << "OFM_W: " << ofm_shape.W << std::endl;
- VERBOSE(MaxPool2D) << "KER_H: " << kh << std::endl;
- VERBOSE(MaxPool2D) << "KER_W: " << kw << std::endl;
- VERBOSE(MaxPool2D) << "STRIDE_H: " << vstride << std::endl;
- VERBOSE(MaxPool2D) << "STRIDE_W: " << hstride << std::endl;
- VERBOSE(MaxPool2D) << "PAD(T): " << param.padding.top << std::endl;
- VERBOSE(MaxPool2D) << "PAD(B): " << param.padding.bottom << std::endl;
- VERBOSE(MaxPool2D) << "PAD(L): " << param.padding.left << std::endl;
- VERBOSE(MaxPool2D) << "PAD(R): " << param.padding.right << std::endl;
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto ofm_alloc = tensors->at(param.ofm_index).get();
- auto ifm_alloc = tensors->at(param.ifm_index).get();
-
- ::arm_compute::PoolingLayerInfo info{::arm_compute::PoolingType::MAX,
- ::arm_compute::Size2D{param.kw, param.kh},
- asPadStringInfo(param.padding, param.stride)};
-
- std::unique_ptr<::arm_compute::CLPoolingLayer> fn{new ::arm_compute::CLPoolingLayer};
-
- fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), info);
-
- auto acl_fn = make_cl_function(std::move(fn));
-
- builder.append((std::move(acl_fn)));
- });
-}
-
-void StageGenerator::visit(const model::operation::AvgPool2DNode &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(model::operation::AvgPool2DNode::Input::INPUT)};
-
- const auto kh_index{node.param().kh_index};
- const auto kw_index{node.param().kw_index};
-
- const auto vstride_index{node.param().vstride_index};
- const auto hstride_index{node.param().hstride_index};
-
- const auto padding_index{node.param().padding_index};
-
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
-
- const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
- const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
-
- const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
- const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
-
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
- // Construct operation parameters
- struct Param
- {
- model::operand::Index ofm_index;
- model::operand::Index ifm_index;
-
- uint32_t kw;
- uint32_t kh;
-
- neurun::util::Padding padding;
- neurun::util::Stride stride;
-
- // TODO Add 'activation' field
- };
-
- Param param;
-
- param.ofm_index = ofm_index;
- param.ifm_index = ifm_index;
-
- param.kh = kh;
- param.kw = kw;
-
- param.stride.vertical = vstride;
- param.stride.horizontal = hstride;
-
- param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
- : neurun::util::valid_padding();
-
- VERBOSE(AvgPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
- VERBOSE(AvgPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
- VERBOSE(AvgPool2D) << "OFM_H: " << ofm_shape.H << std::endl;
- VERBOSE(AvgPool2D) << "OFM_W: " << ofm_shape.W << std::endl;
- VERBOSE(AvgPool2D) << "KER_H: " << kh << std::endl;
- VERBOSE(AvgPool2D) << "KER_W: " << kw << std::endl;
- VERBOSE(AvgPool2D) << "STRIDE_H: " << vstride << std::endl;
- VERBOSE(AvgPool2D) << "STRIDE_W: " << hstride << std::endl;
- VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl;
- VERBOSE(AvgPool2D) << "PAD(T): " << param.padding.top << std::endl;
- VERBOSE(AvgPool2D) << "PAD(B): " << param.padding.bottom << std::endl;
- VERBOSE(AvgPool2D) << "PAD(L): " << param.padding.left << std::endl;
- VERBOSE(AvgPool2D) << "PAD(R): " << param.padding.right << std::endl;
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto ofm_alloc = tensors->at(param.ofm_index).get();
- auto ifm_alloc = tensors->at(param.ifm_index).get();
-
- ::arm_compute::PoolingLayerInfo info{
- ::arm_compute::PoolingType::AVG, ::arm_compute::Size2D{param.kw, param.kh},
- asPadStringInfo(param.padding, param.stride), true /* exclude_padding */};
-
- std::unique_ptr<::arm_compute::CLPoolingLayer> fn{new ::arm_compute::CLPoolingLayer};
-
- fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), info);
-
- auto acl_fn = make_cl_function(std::move(fn));
-
- builder.append((std::move(acl_fn)));
- });
-}
-
-void StageGenerator::visit(const model::operation::ConcatNode &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto axis_index{node.param().axis_index};
-
- struct Param
- {
- model::operand::Index output_index;
- std::vector<model::operand::Index> input_indexes;
-
- int32_t axis;
- };
-
- Param param;
-
- param.output_index = ofm_index;
- for (const auto &e : node.getInputs())
- {
- param.input_indexes.emplace_back(e);
- }
- param.axis = _ctx.at(axis_index).asScalar<int32_t>();
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- // If tensor allocator allocate as subtensor
- bool canEliminate = true;
- for (auto ifm_ind : param.input_indexes)
- {
- if (!tensors->isSubTensorOf(param.output_index, ifm_ind))
- {
- canEliminate = false;
- break;
- }
- }
- if (canEliminate)
- {
- // If concat eliminated, return with nothing to do
- return;
- }
-
- auto output_alloc = tensors->at(param.output_index).get();
-
- std::vector<::neurun::backend::acl_cl::operand::ICLTensor *> input_allocs;
- for (auto ifm_ind : param.input_indexes)
- {
- input_allocs.emplace_back(
- dynamic_cast<::neurun::backend::acl_cl::operand::CLTensor *>(tensors->at(ifm_ind).get()));
- }
-
- std::unique_ptr<::neurun::kernel::acl_cl::ConcatLayer> fn{
- new ::neurun::kernel::acl_cl::ConcatLayer};
-
- fn->configure(input_allocs, param.axis,
- dynamic_cast<::neurun::backend::acl_cl::operand::CLTensor *>(output_alloc));
-
- auto acl_fn = make_cl_function(std::move(fn));
-
- builder.append(std::move(acl_fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::FullyConnectedNode &node)
-{
- using model::operation::FullyConnectedNode;
-
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(FullyConnectedNode::Input::INPUT)};
- const auto weight_index{node.getInputs().at(FullyConnectedNode::Input::WEIGHT)};
- const auto bias_index{node.getInputs().at(FullyConnectedNode::Input::BIAS)};
- const auto activation_index{node.param().activation_index};
-
- // Construct operation parameters
- struct Param
- {
- model::operand::Index output_index;
-
- model::operand::Index input_index;
- model::operand::Index weight_index;
- model::operand::Index bias_index;
-
- FuseCode activation;
- };
-
- Param param;
-
- param.output_index = output_index;
- param.input_index = input_index;
- param.weight_index = weight_index;
- param.bias_index = bias_index;
-
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto output_alloc = tensors->at(param.output_index).get();
- auto input_alloc = tensors->at(param.input_index).get();
- auto weight_alloc = tensors->at(param.weight_index).get();
- auto bias_alloc = tensors->at(param.bias_index).get();
-
- auto fn = make_layer<::arm_compute::CLFullyConnectedLayer>();
-
- fn->configure(input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(),
- output_alloc->handle());
-
- auto acl_fn = make_cl_function(std::move(fn));
-
- builder.append(std::move(acl_fn));
-
- ActivationBuilder{builder}.append(param.activation, output_alloc->handle());
- });
-}
-
-void StageGenerator::visit(const model::operation::ReshapeNode &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(model::operation::ReshapeNode::Input::INPUT)};
-
- struct Param
- {
- model::operand::Index output_index;
- model::operand::Index input_index;
- };
-
- Param param;
-
- param.output_index = output_index;
- param.input_index = input_index;
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto output_alloc = tensors->at(param.output_index).get();
- auto input_alloc = tensors->at(param.input_index).get();
-
- auto fn = make_layer<::arm_compute::CLReshapeLayer>();
-
- fn->configure(input_alloc->handle(), output_alloc->handle());
-
- auto acl_fn = make_cl_function(std::move(fn));
-
- builder.append(std::move(acl_fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::SoftmaxNode &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(model::operation::SoftmaxNode::Input::INPUT)};
- const auto scale_index{node.param().scale_index};
-
- assert(_ctx.at(scale_index).shape().rank() == 0);
-
- struct Param
- {
- model::operand::Index output_index;
- model::operand::Index input_index;
- float scale;
- };
-
- Param param;
-
- param.output_index = output_index;
- param.input_index = input_index;
- param.scale = _ctx.at(scale_index).asScalar<float>();
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto output_alloc = tensors->at(param.output_index).get();
- auto input_alloc = tensors->at(param.input_index).get();
-
- auto fn = make_layer<::arm_compute::CLSoftmaxLayer>();
-
- fn->configure(input_alloc->handle(), output_alloc->handle(), param.scale);
-
- auto acl_fn = make_cl_function(std::move(fn));
-
- builder.append(std::move(acl_fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::PermuteNode & /* node */)
-{
- throw "Unsupported";
-}
-
-void StageGenerator::visit(const model::operation::AddNode &)
-{
- VERBOSE(Add) << "generate CPU Add" << std::endl;
-
- throw std::runtime_error("NYI");
-}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.h b/runtimes/neurun/src/backend/acl_cl/StageGenerator.h
deleted file mode 100644
index 1dac2592b..000000000
--- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
-#define __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
-
-#include "backend/interface/IStageGenerator.h"
-
-#include "model/operand/Set.h"
-#include "backend/acl_cl/TensorBuilder.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-class StageGenerator : public IStageGenerator
-{
-public:
- StageGenerator(const neurun::model::operand::Set &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
-
- virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
-
-#define OP(InternalName, IsNnApi, NnApiName) \
- virtual void visit(const model::operation::InternalName &) override;
-#include "model/operation/Op.lst"
-#undef OP
-
-private:
- const neurun::model::operand::Set &_ctx;
- std::shared_ptr<TensorBuilder> _tensor_builder;
-};
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/Swizzle.h b/runtimes/neurun/src/backend/acl_cl/Swizzle.h
deleted file mode 100644
index 838e57162..000000000
--- a/runtimes/neurun/src/backend/acl_cl/Swizzle.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_CL_SWIZZLE_H__
-#define __NEURUN_BACKEND_ACL_CL_SWIZZLE_H__
-
-#include <cassert>
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-class ARMComputeAxis
-{
-public:
- ARMComputeAxis() = default;
-
-public:
- explicit ARMComputeAxis(uint32_t value) : _value{value}
- {
- // DO NOTHING
- }
-
-public:
- uint32_t value(void) const { return _value; }
-
-private:
- uint32_t _value;
-};
-
-// Convert T/F Lite / NNAPI axis (based on ...NHWC) to ARMCompute axis (WHCN...)
-inline ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis)
-{
- assert(rank > axis);
- const ARMComputeAxis reversed{(rank - axis) - 1};
-
- if (rank < 4)
- {
- return reversed;
- }
-
- // DEPTH
- if (0 == reversed.value())
- {
- return ARMComputeAxis{2};
- }
- // WIDTH
- if (1 == reversed.value())
- {
- return ARMComputeAxis{0};
- }
- // HEIGHT
- if (2 == reversed.value())
- {
- return ARMComputeAxis{1};
- }
-
- // ELSE
- return reversed;
-}
-
-template <typename T> inline T ReorderBits(T in, size_t numOfBits)
-{
- assert(numOfBits > 0);
- T out = 0;
- for (int32_t i = numOfBits - 1; i >= 0; --i)
- {
- const uint32_t toShift = numOfBits - ToARMComputeAxis(numOfBits, i).value() - 1;
- out += ((in & 1) << toShift);
- in >>= 1;
- }
- return out;
-}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_CL_SWIZZLE_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
deleted file mode 100644
index b5c038200..000000000
--- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/acl_cl/TensorBuilder.h"
-
-#include <cassert>
-#include <stack>
-
-#include "operand/Object.h"
-#include "Convert.h"
-
-#include "util/logging.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-TensorBuilder::TensorBuilder()
-{
- // DO NOTHING
-}
-
-void TensorBuilder::registerTensorInfo(const model::operand::Index &ind,
- const compiler::TensorInfo &info)
-{
- assert(_tensors.size() == 0);
-
- _tensor_info_map.insert({ind, info});
-}
-
-void TensorBuilder::registerSubTensorInfo(const model::operand::Index &ind,
- const compiler::SubTensorInfo &info)
-{
- assert(_tensors.size() == 0);
-
- _subtensor_info_map.insert({ind, info});
-}
-
-void TensorBuilder::notifyFirstUse(const model::operand::Index &)
-{
- // DO NOTHING
-}
-
-void TensorBuilder::notifyLastUse(const model::operand::Index &)
-{
- // DO NOTHING
-}
-
-void TensorBuilder::prepare(void)
-{
- assert(_tensors.size() == 0);
-
- // TODO Handle SubTensor(subsumption)
- // Currently this TensorBuilder does not have subsumption info yet
- // Allocated subtensor will be mapped to _subtensors instead of _tensors
- assert(_subtensors.size() == 0);
-
- for (auto &entry : _tensor_info_map)
- {
- auto ind = entry.first;
- const auto &info = entry.second;
- auto tensor = std::make_shared<::neurun::backend::acl_cl::operand::CLTensor>(info);
- _tensors[ind] = tensor;
- }
-
- // To make subtensor, parent tensor must be made first
- // For this condition, use stack
- // 1) Push one subtensor index to stack (iterate subtensors)
- // 2) If tensor at stack top is already made, pop and go to 4)
- // 3) If tensor pushed at 1) is not made, check parent tensor
- // 3-1) If parent tensor is already made, we can make child tensor
- // Make child tensor and pop, go to 4)
- // 3-2) If parent tensor is not made, we can't make child tensor yet
- // Push parent tensor index to stack and return to 4)
- // 4) If stack is empty, return to 1), else return to 2)
- for (auto &entry : _subtensor_info_map)
- {
- model::operand::Index ind = entry.first;
-
- std::stack<model::operand::Index> stack;
- stack.push(ind);
-
- while (!stack.empty())
- {
- const auto current = stack.top();
- const auto &info = _subtensor_info_map.at(current);
-
- // Already generated CLSubTensor
- if (_subtensors.find(current) != _subtensors.end())
- {
- stack.pop();
- continue;
- }
-
- auto parent = info.parent();
- std::shared_ptr<::neurun::backend::acl_cl::operand::ICLTensor> parent_tensor;
-
- if (_tensors.find(parent) != _tensors.end())
- {
- // Parent is allocated as tensor
- parent_tensor = _tensors[parent];
- }
- else if (_subtensors.find(parent) != _subtensors.end())
- {
- // Parent is allocated as subtensor
- parent_tensor = _subtensors[parent];
- }
- else
- {
- // Cannot find allocated parent tensor: allocate parent first
- assert(_subtensor_info_map.find(parent) != _subtensor_info_map.end());
- stack.push(parent);
- continue;
- }
- assert(parent_tensor != nullptr);
-
- // Child's type should be same with parent
- assert(info.type().offset() == parent_tensor->info()->quantization_info().offset);
- assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
- assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
- auto shape = asTensorShape(info.shape());
-
- // Only support axis: 3 (channel)
- ::arm_compute::Coordinates coordinates;
- coordinates.set_num_dimensions(4);
- assert(info.offset().h() == 0);
- assert(info.offset().n() == 0);
- assert(info.offset().w() == 0);
- coordinates[2] = info.offset().c();
- auto tensor = std::make_shared<::neurun::backend::acl_cl::operand::CLSubTensor>(
- parent_tensor.get(), shape, coordinates, true);
- _subtensors[current] = tensor;
- stack.pop();
- }
- }
-}
-
-void TensorBuilder::allocate(void)
-{
- assert(_tensor_info_map.size() == _tensors.size());
-
- for (const auto &tensor_entry : _tensors)
- {
- auto tensor = tensor_entry.second;
- tensor->allocator()->allocate();
- }
-}
-
-std::shared_ptr<::neurun::backend::operand::ITensor>
-TensorBuilder::tensorAt(const model::operand::Index &ind)
-{
- if (_tensors.find(ind) != _tensors.end())
- {
- return _tensors.at(ind);
- }
- else
- {
- return _subtensors.at(ind);
- }
-}
-
-std::shared_ptr<backend::operand::IObject>
-TensorBuilder::wrapTensor(const model::operand::Index &ind)
-{
- if (_objects.find(ind) != _objects.end())
- {
- return _objects.at(ind);
- }
- else
- {
- if (_tensors.find(ind) != _tensors.end())
- {
- return _objects[ind] = std::make_shared<operand::Object>(_tensors.at(ind));
- }
- else
- {
- return _objects[ind] = std::make_shared<operand::Object>(_subtensors.at(ind));
- }
- }
-}
-
-void TensorBuilder::iterate(const IterateFunction &fn)
-{
- for (auto it : _tensors)
- {
- fn(it.first);
- }
- for (auto it : _subtensors)
- {
- fn(it.first);
- }
-}
-
-std::shared_ptr<::neurun::backend::acl_cl::operand::ICLTensor>
-TensorBuilder::at(const ::neurun::model::operand::Index &ind)
-{
- if (_tensors.find(ind) != _tensors.end())
- {
- return _tensors.at(ind);
- }
- else
- {
- return _subtensors.at(ind);
- }
-}
-
-bool TensorBuilder::isSubTensorOf(const model::operand::Index &parent,
- const model::operand::Index &child)
-{
- if (_subtensor_info_map.find(child) == _subtensor_info_map.end())
- {
- return false;
- }
-
- if (_subtensors.find(child) == _subtensors.end())
- {
- return false;
- }
-
- if (_subtensor_info_map.at(child).parent() != parent)
- {
- return false;
- }
-
- return true;
-}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h
deleted file mode 100644
index 64d81721a..000000000
--- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
-#define __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
-
-#include "backend/interface/ITensorBuilder.h"
-#include "backend/acl_cl/operand/CLTensor.h"
-#include "backend/acl_cl/operand/CLSubTensor.h"
-#include "backend/acl_cl/operand/Object.h"
-
-#include <unordered_map>
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-class TensorBuilder : public ITensorBuilder
-{
-public:
- TensorBuilder();
-
- /**
- * @brief Register tensor information to allocate on ACL-CL backend
- * @param[in] ind Operand index
- * @param[in] info Tensor information
- */
- virtual void registerTensorInfo(const model::operand::Index &ind,
- const compiler::TensorInfo &info) override;
- /**
- * @brief Register subtensor information to allocate on ACL-CL backend
- * @param[in] ind Operand index
- * @param[in] info Tensor information
- */
- virtual void registerSubTensorInfo(const model::operand::Index &ind,
- const compiler::SubTensorInfo &info) override;
-
- virtual void notifyFirstUse(const model::operand::Index &) override;
- virtual void notifyLastUse(const model::operand::Index &) override;
-
- virtual void prepare(void) override;
- virtual void allocate(void) override;
-
- virtual std::shared_ptr<::neurun::backend::operand::ITensor>
- tensorAt(const model::operand::Index &ind) override;
- virtual std::shared_ptr<backend::operand::IObject>
- wrapTensor(const model::operand::Index &ind) override;
- virtual void iterate(const IterateFunction &fn) override;
-
- std::shared_ptr<::neurun::backend::acl_cl::operand::ICLTensor>
- at(const ::neurun::model::operand::Index &ind);
- /**
- * @brief Check child tensor is allocated as subtensor of parent tensor
- * @param[in] parent Index of parent
- * @param[in] child Index of child
- * @return @c true if child is allocated as subtensor of parent, otherwise @c false
- */
- bool isSubTensorOf(const model::operand::Index &parent, const model::operand::Index &child);
-
-private:
- std::unordered_map<model::operand::Index, compiler::TensorInfo> _tensor_info_map;
- std::unordered_map<model::operand::Index, compiler::SubTensorInfo> _subtensor_info_map;
- std::unordered_map<model::operand::Index,
- std::shared_ptr<::neurun::backend::acl_cl::operand::CLTensor>>
- _tensors;
- std::unordered_map<model::operand::Index,
- std::shared_ptr<::neurun::backend::acl_cl::operand::CLSubTensor>>
- _subtensors;
- std::unordered_map<model::operand::Index,
- std::shared_ptr<::neurun::backend::acl_cl::operand::Object>>
- _objects;
-};
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc b/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc
deleted file mode 100644
index f64b521dd..000000000
--- a/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "CLSubTensor.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-CLSubTensor::CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
- const arm_compute::Coordinates &coords, bool extend_parent)
- : _cl_sub_tensor(std::make_shared<arm_compute::CLSubTensor>(parent->handle(), tensor_shape,
- coords, extend_parent))
-{
- // DO NOTHING
-}
-
-arm_compute::CLSubTensor *CLSubTensor::handle() const { return _cl_sub_tensor.get(); }
-
-arm_compute::CLSubTensor *CLSubTensor::handle() { return _cl_sub_tensor.get(); }
-
-void CLSubTensor::map(bool blocking) { _cl_sub_tensor->map(blocking); }
-
-void CLSubTensor::unmap() { _cl_sub_tensor->unmap(); }
-
-uint8_t *CLSubTensor::doMap(cl::CommandQueue &q, bool blocking)
-{
- assert(cl_buffer().get() == nullptr);
- return static_cast<uint8_t *>(q.enqueueMapBuffer(cl_buffer(), blocking ? CL_TRUE : CL_FALSE,
- CL_MAP_READ | CL_MAP_WRITE, 0,
- info()->total_size()));
-}
-
-void CLSubTensor::doUnmap(cl::CommandQueue &q)
-{
- assert(cl_buffer().get() == nullptr);
- q.enqueueUnmapMemObject(cl_buffer(), buffer());
-}
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.h b/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.h
deleted file mode 100644
index cef78c196..000000000
--- a/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_CL_OPERAND_CL_SUB_TENSOR_H__
-#define __NEURUN_BACKEND_ACL_CL_OPERAND_CL_SUB_TENSOR_H__
-
-#include <arm_compute/runtime/CL/CLSubTensor.h>
-#include "ICLTensor.h"
-#include "compiler/SubTensorInfo.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-class CLSubTensor : public ICLTensor
-{
-public:
- CLSubTensor() = delete;
-
-public:
- CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
- const arm_compute::Coordinates &coords, bool extend_parent = false);
-
-public:
- arm_compute::CLSubTensor *handle() const override;
- arm_compute::CLSubTensor *handle() override;
-
-public:
- void map(bool blocking = true);
- void unmap();
-
-protected:
- uint8_t *doMap(cl::CommandQueue &q, bool blocking) override;
- virtual void doUnmap(cl::CommandQueue &q) override;
-
-private:
- std::shared_ptr<arm_compute::CLSubTensor> _cl_sub_tensor;
-};
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_CL_OPERAND_CL_SUB_TENSOR_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc
deleted file mode 100644
index e7b718df3..000000000
--- a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <arm_compute/runtime/CL/CLScheduler.h>
-#include <arm_compute/runtime/CL/CLMemory.h>
-#include <arm_compute/runtime/CL/CLMemoryRegion.h>
-#include "CLTensor.h"
-
-#include "backend/acl_cl/Convert.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-CLTensor::CLTensor(const compiler::TensorInfo &info)
- : _cl_tensor(std::make_shared<arm_compute::CLTensor>())
-{
- auto acl_cl_info = asTensorInfo(info.shape(), info.typeInfo());
- allocator()->init(acl_cl_info);
-}
-
-arm_compute::CLTensor *CLTensor::handle() const { return _cl_tensor.get(); }
-
-arm_compute::CLTensor *CLTensor::handle() { return _cl_tensor.get(); }
-
-arm_compute::CLTensorAllocator *CLTensor::allocator() { return _cl_tensor->allocator(); }
-
-void CLTensor::map(bool blocking) { _cl_tensor->map(blocking); }
-
-void CLTensor::unmap() { _cl_tensor->unmap(); }
-
-uint8_t *CLTensor::doMap(cl::CommandQueue &q, bool blocking)
-{
- return allocator()->map(q, blocking);
-}
-
-void CLTensor::doUnmap(cl::CommandQueue &q) { allocator()->unmap(q, buffer()); }
-
-// handle() is Deprecated on acl v18.11
-// TODO Update this
-#if 0
-void CLTensor::setBuffer(void *host_ptr)
-{
- // create empty MemoryRegion: just context. Since flag isn't used here, no matter which flag to
- // pass
- auto memory = arm_compute::CLMemory(std::make_shared<arm_compute::CLBufferMemoryRegion>(
- arm_compute::CLScheduler::get().context(), CL_MEM_USE_HOST_PTR | CL_MEM_READ_WRITE, 0));
-
- // set buffer
- auto mem = reinterpret_cast<cl::Buffer *>(memory.region()->handle());
- *mem = cl::Buffer(arm_compute::CLScheduler::get().context(),
- CL_MEM_USE_HOST_PTR | CL_MEM_READ_WRITE, info()->total_size(), host_ptr);
- // set correct buffer size
- memory.region()->set_size(info()->total_size());
- // import memory
- allocator()->import_memory(memory);
-}
-#endif
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h
deleted file mode 100644
index 31c96e201..000000000
--- a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_CL_OPERAND_CL_TENSOR_H__
-#define __NEURUN_BACKEND_ACL_CL_OPERAND_CL_TENSOR_H__
-
-#include <arm_compute/core/TensorInfo.h>
-#include <arm_compute/runtime/CL/CLTensor.h>
-#include <arm_compute/runtime/CL/CLScheduler.h>
-#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "ICLTensor.h"
-#include "compiler/TensorInfo.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-class CLTensor : public ICLTensor
-{
-public:
- CLTensor() = delete;
-
-public:
- CLTensor(const compiler::TensorInfo &info);
-
-public:
- arm_compute::CLTensor *handle() const override;
- arm_compute::CLTensor *handle() override;
-
-public:
- arm_compute::CLTensorAllocator *allocator();
- void map(bool blocking = true);
- void unmap();
- void setBuffer(void *host_ptr);
-
-protected:
- uint8_t *doMap(cl::CommandQueue &q, bool blocking) override;
- void doUnmap(cl::CommandQueue &q) override;
-
-private:
- std::shared_ptr<arm_compute::CLTensor> _cl_tensor;
-};
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_CL_OPERAND_CL_TENSOR_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.cc b/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.cc
deleted file mode 100644
index 23d723de4..000000000
--- a/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-#include "ICLTensor.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-size_t ICLTensor::total_size() const { return info()->total_size(); }
-
-size_t ICLTensor::dimension(size_t index) const { return info()->dimension(index); }
-
-size_t ICLTensor::num_dimensions() const { return info()->num_dimensions(); }
-
-size_t ICLTensor::calcOffset(const neurun::util::feature::Coordinate4D &coords)
-{
- int32_t N = coords.n();
- int32_t C = coords.c();
- int32_t H = coords.h();
- int32_t W = coords.w();
-
- ::arm_compute::Coordinates coordinates{W, H, C, N};
- return info()->offset_element_in_bytes(coordinates);
-}
-
-arm_compute::DataType ICLTensor::data_type() const { return info()->data_type(); }
-
-uint8_t *ICLTensor::buffer() const { return handle()->buffer(); }
-
-const cl::Buffer &ICLTensor::cl_buffer() const { return handle()->cl_buffer(); }
-
-arm_compute::ITensorInfo *ICLTensor::info() const { return handle()->info(); }
-
-arm_compute::ITensorInfo *ICLTensor::info() { return handle()->info(); }
-
-void ICLTensor::map(cl::CommandQueue &q, bool blocking) { return handle()->map(q, blocking); }
-
-void ICLTensor::unmap(cl::CommandQueue &q) { return handle()->unmap(q); }
-
-void ICLTensor::clear(cl::CommandQueue &q) { return handle()->clear(q); }
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.h b/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.h
deleted file mode 100644
index 226fbf814..000000000
--- a/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_CL_OPERAND_I_CL_TENSOR_H__
-#define __NEURUN_BACKEND_ACL_CL_OPERAND_I_CL_TENSOR_H__
-
-#include <arm_compute/core/ITensorInfo.h>
-#include <arm_compute/core/CL/ICLTensor.h>
-#include "backend/interface/operand/ITensor.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-class ICLTensor : public ::neurun::backend::operand::ITensor
-{
-public:
- ICLTensor() = default;
- ICLTensor(const ICLTensor &) = delete;
- ICLTensor &operator=(const ICLTensor &) = delete;
- ICLTensor(ICLTensor &&) = default;
- ICLTensor &operator=(ICLTensor &&) = default;
- virtual ~ICLTensor() = default;
-
-public:
- virtual arm_compute::ICLTensor *handle() = 0;
- virtual arm_compute::ICLTensor *handle() const = 0;
-
-public:
- uint8_t *buffer() const override;
- size_t total_size() const override;
- size_t dimension(size_t index) const override;
- size_t num_dimensions() const override;
- size_t calcOffset(const neurun::util::feature::Coordinate4D &coords) override;
-
-public:
- arm_compute::DataType data_type() const;
- const cl::Buffer &cl_buffer() const;
- arm_compute::ITensorInfo *info() const;
- arm_compute::ITensorInfo *info();
- void map(cl::CommandQueue &q, bool blocking = true);
- void unmap(cl::CommandQueue &q);
- void clear(cl::CommandQueue &q);
-
-protected:
- virtual uint8_t *doMap(cl::CommandQueue &q, bool blocking) = 0;
- virtual void doUnmap(cl::CommandQueue &q) = 0;
-};
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_CL_OPERAND_I_CL_TENSOR_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/Object.cc b/runtimes/neurun/src/backend/acl_cl/operand/Object.cc
deleted file mode 100644
index a84fa2366..000000000
--- a/runtimes/neurun/src/backend/acl_cl/operand/Object.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Object.h"
-
-#include <arm_compute/runtime/CL/CLScheduler.h>
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-void Object::access(
- const std::function<void(::neurun::backend::operand::ITensor &tensor)> &fn) const
-{
- auto &queue = ::arm_compute::CLScheduler::get().queue();
-
- _tensor->map(queue);
- fn(*_tensor);
- _tensor->unmap(queue);
-}
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/Object.h b/runtimes/neurun/src/backend/acl_cl/operand/Object.h
deleted file mode 100644
index 4ba22b269..000000000
--- a/runtimes/neurun/src/backend/acl_cl/operand/Object.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_CL_OPERAND_OBJECT_H__
-#define __NEURUN_BACKEND_ACL_CL_OPERAND_OBJECT_H__
-
-#include <memory>
-
-#include "backend/interface/operand/IObject.h"
-#include "backend/acl_cl/operand/ICLTensor.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-namespace operand
-{
-
-class Object : public backend::operand::IObject
-{
-public:
- Object() = default;
-
-public:
- Object(const std::shared_ptr<::neurun::backend::acl_cl::operand::ICLTensor> &tensor)
- : _tensor{tensor}
- {
- // DO NOTHING
- }
-
-public:
- ::neurun::backend::acl_cl::operand::ICLTensor *ptr(void) const override { return _tensor.get(); }
-
-private:
- std::shared_ptr<::neurun::backend::acl_cl::operand::ICLTensor> _tensor;
-
-public:
- void
- access(const std::function<void(::neurun::backend::operand::ITensor &tensor)> &fn) const override;
-};
-
-} // namespace operand
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_CL_OPERAND_OBJECT_H__
diff --git a/runtimes/neurun/src/backend/cpu/CMakeLists.txt b/runtimes/neurun/src/backend/cpu/CMakeLists.txt
deleted file mode 100644
index dc4406a65..000000000
--- a/runtimes/neurun/src/backend/cpu/CMakeLists.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-file(GLOB_RECURSE SOURCES "*.cc")
-
-add_library(${LIB_NEURUN_BACKEND_CPU} SHARED ${SOURCES})
-
-target_include_directories(${LIB_NEURUN_BACKEND_CPU} PUBLIC ${NNFW_INCLUDE_DIR})
-target_include_directories(${LIB_NEURUN_BACKEND_CPU} PUBLIC ${NEURUN_INCLUDE_DIR})
-target_include_directories(${LIB_NEURUN_BACKEND_CPU} PUBLIC ${CMAKE_SOURCE_DIR}/externals/tensorflow)
-
-target_link_libraries(${LIB_NEURUN_BACKEND_CPU} tensorflow-lite)
-target_link_libraries(${LIB_NEURUN_BACKEND_CPU} nnfw_lib_misc)
-target_link_libraries(${LIB_NEURUN_BACKEND_CPU} nnfw_lib_cpp14)
-target_link_libraries(${LIB_NEURUN_BACKEND_CPU} ${LIB_NEURUN_KERNEL_CPU})
-target_link_libraries(${LIB_NEURUN_BACKEND_CPU} ${LIB_NEURUN})
-
-target_compile_options(${LIB_NEURUN_BACKEND_CPU} PRIVATE -Wall -Wextra -Werror)
-
-set_target_properties(${LIB_NEURUN_BACKEND_CPU} PROPERTIES OUTPUT_NAME backend_cpu)
-install(TARGETS ${LIB_NEURUN_BACKEND_CPU} DESTINATION lib/neurun)
diff --git a/runtimes/neurun/src/backend/cpu/Config.cc b/runtimes/neurun/src/backend/cpu/Config.cc
deleted file mode 100644
index 001ba9d02..000000000
--- a/runtimes/neurun/src/backend/cpu/Config.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/cpu/Config.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-void Config::initialize()
-{
- // DO NOTHING
-}
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/cpu/Config.h b/runtimes/neurun/src/backend/cpu/Config.h
deleted file mode 100644
index ad9ca0ee8..000000000
--- a/runtimes/neurun/src/backend/cpu/Config.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_CPU_CONFIG_H__
-#define __NEURUN_BACKEND_CPU_CONFIG_H__
-
-#include "backend/interface/IConfig.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-class Config : public IConfig
-{
-public:
- Config()
- {
- // DO NOTHING
- }
-
- virtual std::string id() override { return "cpu"; }
- virtual void initialize() override;
- virtual graph::operand::Layout getOperandLayout() { return graph::operand::Layout::NHWC; }
- virtual bool SupportSubTensorAlloc() override
- {
- // NOTE CPU allocator cannot support subtensor allocation yet
- return false;
- }
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_CPU_CONFIG_H__
diff --git a/runtimes/neurun/src/backend/cpu/MemoryPlanner.cc b/runtimes/neurun/src/backend/cpu/MemoryPlanner.cc
deleted file mode 100644
index 2d0995b8a..000000000
--- a/runtimes/neurun/src/backend/cpu/MemoryPlanner.cc
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "MemoryPlanner.h"
-#include "util/logging.h"
-#include <cassert>
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-Allocator::Allocator(uint32_t capacity)
-{
- assert(!_base && capacity != 0);
-
- _base = new uint8_t[capacity];
-
- VERBOSE(ALLOC) << "allocation capacity: " << capacity << std::endl;
- VERBOSE(ALLOC) << "base pointer: " << static_cast<void *>(_base) << std::endl;
-}
-
-Allocator::~Allocator() { delete[] _base; }
-
-void BumpPlanner::claim(const model::operand::Index &ind, size_t size)
-{
- assert(size != 0);
-
- Block blk{_capacity, size};
- _mem_plans[ind] = blk;
- _capacity += size;
-
- VERBOSE(BP_PLANNER) << "CLAIM(#" << ind.value() << "): " << blk.offset << ", " << blk.size
- << std::endl;
-}
-
-void BumpPlanner::release(const model::operand::Index &ind)
-{
- VERBOSE(BP_PLANNER) << "RELEASE(#" << ind.value() << "): "
- << "NOTHING does" << std::endl;
-}
-
-// There are some assumptions for claiming memory(== making a reservation for memory).
-// 1. About _claim_table(std::map).
-// - The table's data structure is std::map so that it always sorts
-// value(model::operand::Index) by key(base_offset).
-// - This claim() inserts key/value into _claim_table and the release() removes the key/value from
-// _claim_table.
-// - _claim_table shows the memory status at a certain point in time. Therefore,
-// - If _claim_table has an offset and a certain size at a certain point in time,
-// it means the place at the offset has been already claimed(== can't claim now. need to find
-// someplace new).
-// - If _claim_table doesn't have any element for an offset and a certain size at a certain
-// point in time, it means the place at the offset can be claimed.
-// 2. In the loop for _claim_table, we can assume the current claim_base_offset value is bigger than
-// the previous claim_base_offset.
-void FirstFitPlanner::claim(const model::operand::Index &ind, size_t size)
-{
- assert(size != 0);
-
- // Find the right position for claiming
- uint32_t next_offset = 0;
- for (auto &mem_claim : _claim_table)
- {
- auto claimed_base_offset = mem_claim.first;
- auto claimed_size = _mem_plans[mem_claim.second].size;
- if (next_offset + size <= claimed_base_offset)
- {
- break;
- }
- else
- {
- next_offset = claimed_base_offset + claimed_size;
- }
- }
-
- // Now next_offset is set to the proper offset
- _claim_table[next_offset] = ind;
- _mem_plans[ind] = {next_offset, size};
-
- VERBOSE(FF_PLANNER) << "claim(#" << ind.value() << "): [+" << next_offset << ", " << size << "sz]"
- << std::endl;
-
- if (_capacity < next_offset + size)
- {
- _capacity = next_offset + size;
- }
-}
-
-void FirstFitPlanner::release(const model::operand::Index &ind)
-{
- for (auto it = _claim_table.cbegin(); it != _claim_table.cend(); ++it)
- {
- if (it->second == ind)
- {
- uint32_t offset = it->first;
- uint32_t index = ind.value();
- uint32_t size = _mem_plans[ind].size;
-
- _claim_table.erase(it);
-
- VERBOSE(FF_PLANNER) << "release(#" << index << "): [+" << offset << ", " << size << "sz]"
- << std::endl;
- return;
- }
- }
- assert(!"Cannot release for given index. It has been not claimed or released already.");
-}
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/cpu/MemoryPlanner.h b/runtimes/neurun/src/backend/cpu/MemoryPlanner.h
deleted file mode 100644
index 4b2661223..000000000
--- a/runtimes/neurun/src/backend/cpu/MemoryPlanner.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file        MemoryPlanner.h
- * @brief       This file contains Memory Planning related classes
- */
-
-#ifndef __NEURUN_BACKEND_CPU_MEMORY_PLANNER_H__
-#define __NEURUN_BACKEND_CPU_MEMORY_PLANNER_H__
-
-#include <map>
-#include <unordered_map>
-
-#include "model/operand/Index.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-/**
- * @brief Structure to have memory offset and size
- */
-struct Block
-{
- uint32_t offset;
- uint32_t size;
-};
-
-/**
- * @brief Class to allocate memory
- */
-class Allocator
-{
-public:
- Allocator(uint32_t capacity);
- ~Allocator();
- /**
- * @brief Get memory base pointer
- * @return base pointer
- */
- uint8_t *base() const { return _base; }
-
-private:
- uint8_t *_base = nullptr;
-};
-
-/**
- * @brief Interface to plan memory
- */
-struct IMemoryPlanner
-{
- using MemoryPlans = std::unordered_map<model::operand::Index, Block>;
-
- /**
- * @brief Claim memory for operand
- * @param[in] index The operand index
- * @param[in] size The size of the memory
- */
- virtual void claim(const model::operand::Index &, size_t) = 0;
- /**
- * @brief Release memory for operand
- * @param[in] index The operand index
- */
- virtual void release(const model::operand::Index &) = 0;
- /**
- * @brief Get capacity for memory planning
- * @return The value of capacity
- */
- virtual uint32_t capacity() = 0;
- /**
- * @brief Get MemoryPlans
- * @return MemoryPlans
- */
- virtual MemoryPlans &memory_plans() = 0;
-};
-
-/**
- * @brief Class to plan memory by bump way
- */
-class BumpPlanner : public IMemoryPlanner
-{
-public:
- /**
- * @brief Claim memory for operand by bump way
- * @param[in] index The operand index
- * @param[in] size The size of the memory
- */
- virtual void claim(const model::operand::Index &, size_t) override;
- /**
- * @brief Release memory for operand by bump way
- * @param[in] index The operand index
- */
- virtual void release(const model::operand::Index &) override;
- /**
- * @brief Get capacity for memory planning
- * @return The value of capacity
- */
- virtual uint32_t capacity() override { return _capacity; }
- /**
- * @brief Get MemoryPlans
- * @return MemoryPlans
- */
- virtual MemoryPlans &memory_plans() override { return _mem_plans; }
-
-private:
- uint32_t _capacity = 0;
- MemoryPlans _mem_plans;
-};
-
-/**
- * @brief Class to plan memory by firstfit way
- */
-class FirstFitPlanner : public IMemoryPlanner
-{
-public:
- /**
- * @brief Claim memory for operand by firstfit way
- * @param[in] index The operand index
- * @param[in] size The size of the memory
- */
- virtual void claim(const model::operand::Index &, size_t) override;
- /**
- * @brief Release memory for operand by firstfit way
- * @param[in] index The operand index
- */
- virtual void release(const model::operand::Index &) override;
- /**
- * @brief Get capacity for memory planning
- * @return The value of capacity
- */
- virtual uint32_t capacity() override { return _capacity; }
- /**
- * @brief Get MemoryPlans
- * @return MemoryPlans
- */
- virtual MemoryPlans &memory_plans() override { return _mem_plans; }
-
-private:
- uint32_t _capacity = 0;
- MemoryPlans _mem_plans;
- // Use std::map because claim() assumes that _claim_table is sorted by uint32_t(base_offset)
- std::map<uint32_t, model::operand::Index> _claim_table;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_CPU_MEMORY_PLANNER_H__
diff --git a/runtimes/neurun/src/backend/cpu/PluginClassesAllocator.cc b/runtimes/neurun/src/backend/cpu/PluginClassesAllocator.cc
deleted file mode 100644
index 26d4d8858..000000000
--- a/runtimes/neurun/src/backend/cpu/PluginClassesAllocator.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <memory>
-#include "TensorBuilder.h"
-#include "StageGenerator.h"
-#include "Config.h"
-#include "util/logging.h"
-
-extern "C" {
-neurun::backend::cpu::TensorBuilder *allocate_TensorBuilder()
-{
- VERBOSE(allocate_TensorBuilder) << "loaded from CPU\n";
- return new neurun::backend::cpu::TensorBuilder;
-}
-
-neurun::backend::cpu::StageGenerator *
-allocate_StageGenerator(const neurun::model::operand::Set &operand_ctx,
- const std::shared_ptr<neurun::backend::cpu::TensorBuilder> &tensor_builder)
-{
- VERBOSE(allocate_StageGenerator) << "loaded from CPU\n";
- return new neurun::backend::cpu::StageGenerator(operand_ctx, tensor_builder);
-}
-
-neurun::backend::cpu::Config *allocate_Config()
-{
- VERBOSE(allocate_Config) << "loaded from CPU\n";
- return new neurun::backend::cpu::Config;
-}
-}
diff --git a/runtimes/neurun/src/backend/cpu/StageGenerator.cc b/runtimes/neurun/src/backend/cpu/StageGenerator.cc
deleted file mode 100644
index c53b320a4..000000000
--- a/runtimes/neurun/src/backend/cpu/StageGenerator.cc
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "StageGenerator.h"
-
-#include <stdexcept>
-
-#include "cpp14/memory.h"
-#include "util/Padding.h"
-#include "kernel/cpu/OperationUtils.h"
-#include "kernel/cpu/ConvolutionLayer.h"
-#include "kernel/cpu/AvgPoolLayer.h"
-#include "kernel/cpu/MaxPoolLayer.h"
-#include "kernel/cpu/ConcatLayer.h"
-#include "kernel/cpu/FullyConnectedLayer.h"
-#include "kernel/cpu/ReshapeLayer.h"
-#include "kernel/cpu/SoftMaxLayer.h"
-#include "kernel/cpu/PermuteLayer.h"
-#include "backend/BackendManager.h"
-#include "backend/interface/IConfig.h"
-
-#include "util/logging.h"
-
-#include "util/Utils.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-StageGenerator::StageGenerator(const neurun::model::operand::Set &operand_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder)
- : _ctx(operand_ctx), _tensor_builder(tensor_builder)
-{
- // DO NOTHING
-}
-
-void StageGenerator::visit(const model::operation::Conv2DNode &node)
-{
- using model::operation::Conv2DNode;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(Conv2DNode::Input::INPUT)};
- const auto ker_index{node.getInputs().at(Conv2DNode::Input::KERNEL)};
- const auto bias_index{node.getInputs().at(Conv2DNode::Input::BIAS)};
-
- const auto vstride_index{node.param().vstride_index};
- const auto hstride_index{node.param().hstride_index};
-
- const auto padding_index{node.param().padding_index};
- const auto activation_index{node.param().activation_index};
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
-
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
- util::Stride stride;
-
- stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
- stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
-
- // Construct operation parameters
- struct Param
- {
- model::operand::Index ofm_index;
- model::operand::Index ifm_index;
- model::operand::Index ker_index;
- model::operand::Index bias_index;
-
- ::neurun::kernel::cpu::Shape ofm_shape;
- ::neurun::kernel::cpu::Shape ifm_shape;
- ::neurun::kernel::cpu::Shape ker_shape;
- ::neurun::kernel::cpu::Shape bias_shape;
-
- util::Padding padding;
- util::Stride stride;
-
- FuseCode activation;
- };
-
- Param param;
-
- param.ofm_index = ofm_index;
- param.ifm_index = ifm_index;
- param.ker_index = ker_index;
- param.bias_index = bias_index;
-
- param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
- param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ifm_index));
- param.ker_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ker_index));
- param.bias_shape = ::neurun::kernel::cpu::getShape(_ctx.at(bias_index));
-
- param.stride = stride;
- param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
- _ctx.at(ofm_index).shape().asFeature(), stride,
- _ctx.at(ker_index).shape().asKernel().W,
- _ctx.at(ker_index).shape().asKernel().H)
- : util::valid_padding();
-
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto ofm_alloc = tensors->at(param.ofm_index);
- auto ifm_alloc = tensors->at(param.ifm_index);
- auto ker_alloc = tensors->at(param.ker_index);
- auto bias_alloc = tensors->at(param.bias_index);
-
- std::unique_ptr<::neurun::kernel::cpu::ConvolutionLayer> fn{
- new ::neurun::kernel::cpu::ConvolutionLayer};
-
- fn->configure(ifm_alloc->buffer(), param.ifm_shape, ker_alloc->buffer(), param.ker_shape,
- bias_alloc->buffer(), param.bias_shape, param.padding.left, param.padding.right,
- param.padding.top, param.padding.bottom, param.stride.horizontal,
- param.stride.vertical, param.activation, ofm_alloc->buffer(), param.ofm_shape);
-
- builder.append(std::move(fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(model::operation::MaxPool2DNode::Input::INPUT)};
-
- const auto kh_index{node.param().kh_index};
- const auto kw_index{node.param().kw_index};
-
- const auto vstride_index{node.param().vstride_index};
- const auto hstride_index{node.param().hstride_index};
-
- const auto padding_index{node.param().padding_index};
- const auto activation_index{node.param().activation_index};
-
- const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
- const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
-
- const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
- const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
-
- // Construct operation parameters
- struct Param
- {
- model::operand::Index ofm_index;
- model::operand::Index ifm_index;
-
- uint32_t kw;
- uint32_t kh;
-
- ::neurun::kernel::cpu::Shape ofm_shape;
- ::neurun::kernel::cpu::Shape ifm_shape;
-
- util::Padding padding;
- util::Stride stride;
-
- FuseCode activation;
- };
-
- Param param;
-
- param.ofm_index = ofm_index;
- param.ifm_index = ifm_index;
-
- param.kh = kh;
- param.kw = kw;
-
- param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
- param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ifm_index));
-
- param.stride.vertical = vstride;
- param.stride.horizontal = hstride;
-
- param.padding =
- (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
- _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh)
- : util::valid_padding();
-
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto ofm_alloc = tensors->at(param.ofm_index).get();
- auto ifm_alloc = tensors->at(param.ifm_index).get();
-
- std::unique_ptr<::neurun::kernel::cpu::MaxPoolLayer> fn{
- new ::neurun::kernel::cpu::MaxPoolLayer};
-
- fn->configure(ifm_alloc->buffer(), param.ifm_shape, param.padding.left, param.padding.right,
- param.padding.top, param.padding.bottom, param.stride.horizontal,
- param.stride.vertical, param.kw, param.kh, param.activation, ofm_alloc->buffer(),
- param.ofm_shape);
-
- builder.append(std::move(fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::AvgPool2DNode &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(model::operation::AvgPool2DNode::Input::INPUT)};
-
- const auto kh_index{node.param().kh_index};
- const auto kw_index{node.param().kw_index};
-
- const auto vstride_index{node.param().vstride_index};
- const auto hstride_index{node.param().hstride_index};
-
- const auto padding_index{node.param().padding_index};
- const auto activation_index{node.param().activation_index};
-
- const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
- const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
-
- const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
- const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
-
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
- // Construct operation parameters
- struct Param
- {
- model::operand::Index ofm_index;
- model::operand::Index ifm_index;
-
- uint32_t kw;
- uint32_t kh;
-
- ::neurun::kernel::cpu::Shape ofm_shape;
- ::neurun::kernel::cpu::Shape ifm_shape;
-
- util::Padding padding;
- util::Stride stride;
-
- FuseCode activation;
- };
-
- Param param;
-
- param.ofm_index = ofm_index;
- param.ifm_index = ifm_index;
-
- param.kh = kh;
- param.kw = kw;
-
- param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
- param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ifm_index));
-
- param.stride.vertical = vstride;
- param.stride.horizontal = hstride;
-
- param.padding =
- (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
- _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh)
- : util::valid_padding();
-
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto ofm_alloc = tensors->at(param.ofm_index).get();
- auto ifm_alloc = tensors->at(param.ifm_index).get();
-
- std::unique_ptr<::neurun::kernel::cpu::AvgPoolLayer> fn{
- new ::neurun::kernel::cpu::AvgPoolLayer};
-
- fn->configure(ifm_alloc->buffer(), param.ifm_shape, param.padding.left, param.padding.right,
- param.padding.top, param.padding.bottom, param.stride.horizontal,
- param.stride.vertical, param.kw, param.kh, param.activation, ofm_alloc->buffer(),
- param.ofm_shape);
-
- builder.append(std::move(fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::ConcatNode &node)
-{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto axis_index{node.param().axis_index};
-
- struct Param
- {
- model::operand::Index output_index;
- std::vector<model::operand::Index> input_indexes;
-
- int32_t axis;
-
- ::neurun::kernel::cpu::Shape ofm_shape;
- std::vector<::neurun::kernel::cpu::Shape> ifm_shapes;
- };
-
- Param param;
-
- param.output_index = ofm_index;
- for (const auto &e : node.getInputs())
- {
- param.input_indexes.emplace_back(e);
- }
- param.axis = _ctx.at(axis_index).asScalar<int32_t>();
-
- param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
-
- for (auto e : node.getInputs())
- {
- param.ifm_shapes.emplace_back(::neurun::kernel::cpu::getShape(_ctx.at(e)));
- }
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto output_alloc = tensors->at(param.output_index).get();
-
- std::vector<const uint8_t *> input_buffers;
- for (auto ifm_ind : param.input_indexes)
- {
- input_buffers.emplace_back(tensors->at(ifm_ind).get()->buffer());
- }
-
- std::unique_ptr<::neurun::kernel::cpu::ConcatLayer> fn{new ::neurun::kernel::cpu::ConcatLayer};
-
- fn->configure(input_buffers, param.ifm_shapes, param.axis, output_alloc->buffer(),
- param.ofm_shape);
-
- builder.append(std::move(fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::FullyConnectedNode &node)
-{
- using model::operation::FullyConnectedNode;
-
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(FullyConnectedNode::Input::INPUT)};
- const auto weight_index{node.getInputs().at(FullyConnectedNode::Input::WEIGHT)};
- const auto bias_index{node.getInputs().at(FullyConnectedNode::Input::BIAS)};
- const auto activation_index{node.param().activation_index};
-
- // Construct operation parameters
- struct Param
- {
- model::operand::Index output_index;
- model::operand::Index input_index;
- model::operand::Index weight_index;
- model::operand::Index bias_index;
-
- ::neurun::kernel::cpu::Shape ofm_shape;
- ::neurun::kernel::cpu::Shape ifm_shape;
- ::neurun::kernel::cpu::Shape weight_shape;
- ::neurun::kernel::cpu::Shape bias_shape;
-
- FuseCode activation;
- };
-
- Param param;
-
- param.output_index = output_index;
- param.input_index = input_index;
- param.weight_index = weight_index;
- param.bias_index = bias_index;
-
- param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(output_index));
- param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(input_index));
- param.weight_shape = ::neurun::kernel::cpu::getShape(_ctx.at(weight_index));
- param.bias_shape = ::neurun::kernel::cpu::getShape(_ctx.at(bias_index));
-
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto output_alloc = tensors->at(param.output_index).get();
- auto input_alloc = tensors->at(param.input_index).get();
- auto weight_alloc = tensors->at(param.weight_index).get();
- auto bias_alloc = tensors->at(param.bias_index).get();
-
- std::unique_ptr<::neurun::kernel::cpu::FullyConnectedLayer> fn{
- new ::neurun::kernel::cpu::FullyConnectedLayer};
-
- fn->configure(input_alloc->buffer(), param.ifm_shape, weight_alloc->buffer(),
- param.weight_shape, bias_alloc->buffer(), param.bias_shape, param.activation,
- output_alloc->buffer(), param.ofm_shape);
-
- builder.append(std::move(fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::ReshapeNode &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(model::operation::ReshapeNode::Input::INPUT)};
-
- struct Param
- {
- model::operand::Index output_index;
- model::operand::Index input_index;
-
- ::neurun::kernel::cpu::Shape ofm_shape;
- ::neurun::kernel::cpu::Shape ifm_shape;
- };
-
- Param param;
-
- param.output_index = output_index;
- param.input_index = input_index;
-
- param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(output_index));
- param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(input_index));
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto output_alloc = tensors->at(param.output_index).get();
- auto input_alloc = tensors->at(param.input_index).get();
-
- std::unique_ptr<::neurun::kernel::cpu::ReshapeLayer> fn{
- new ::neurun::kernel::cpu::ReshapeLayer};
-
- fn->configure(input_alloc->buffer(), param.ifm_shape, output_alloc->buffer(), param.ofm_shape);
-
- builder.append(std::move(fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::SoftmaxNode &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(model::operation::SoftmaxNode::Input::INPUT)};
- const auto scale_index{node.param().scale_index};
-
- struct Param
- {
- model::operand::Index output_index;
- model::operand::Index input_index;
-
- ::neurun::kernel::cpu::Shape ofm_shape;
- ::neurun::kernel::cpu::Shape ifm_shape;
-
- float scale;
- };
-
- Param param;
-
- param.output_index = output_index;
- param.input_index = input_index;
-
- param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(output_index));
- param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(input_index));
-
- param.scale = _ctx.at(scale_index).asScalar<float>();
-
- auto tensors = _tensor_builder;
-
- returnStage([tensors, param](IExecutionBuilder &builder) {
- auto output_alloc = tensors->at(param.output_index).get();
- auto input_alloc = tensors->at(param.input_index).get();
-
- std::unique_ptr<::neurun::kernel::cpu::SoftMaxLayer> fn{
- new ::neurun::kernel::cpu::SoftMaxLayer};
-
- fn->configure(input_alloc->buffer(), param.ifm_shape, param.scale, output_alloc->buffer(),
- param.ofm_shape);
-
- builder.append(std::move(fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::PermuteNode &node)
-{
- const auto output_index{node.getOutputs().at(0)};
- const auto input_index{node.getInputs().at(0)};
-
- using PermuteType = model::operation::PermuteNode::Type;
-
- struct Param
- {
- model::operand::Index output_index;
- model::operand::Index input_index;
-
- model::operand::Shape shape;
-
- PermuteType type{PermuteType::COPY};
- };
-
- Param param;
-
- param.output_index = output_index;
- param.input_index = input_index;
-
- param.shape = _ctx.at(output_index).shape();
- param.type = node.param().type;
-
- // assert(param.shape == _ctx.at(input_index));
-
- const auto &input_li = _ctx.at(input_index).lower_info();
- const auto &output_li = _ctx.at(output_index).lower_info();
- const auto input_backend = input_li->def_backends().getOnlyElement();
- const auto output_backend = output_li->def_backends().getOnlyElement();
-
- const auto input_tensors = input_backend->tensor_builder();
- const auto output_tensors = output_backend->tensor_builder();
-
- returnStage([input_tensors, output_tensors, param](IExecutionBuilder &builder) {
- auto output_object = output_tensors->wrapTensor(param.output_index);
- auto input_object = input_tensors->wrapTensor(param.input_index);
-
- auto fn = nnfw::cpp14::make_unique<::neurun::kernel::cpu::PermuteLayer>();
-
- fn->configure(input_object, output_object, param.shape, param.type);
-
- builder.append(std::move(fn));
- });
-}
-
-void StageGenerator::visit(const model::operation::AddNode &) { throw std::runtime_error("NYI"); }
-
-} // namespace neurun
-} // namespace backend
-} // namespace cpu
diff --git a/runtimes/neurun/src/backend/cpu/StageGenerator.h b/runtimes/neurun/src/backend/cpu/StageGenerator.h
deleted file mode 100644
index 6a0e387da..000000000
--- a/runtimes/neurun/src/backend/cpu/StageGenerator.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
-#define __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
-
-#include "backend/interface/IStageGenerator.h"
-
-#include "model/operand/Set.h"
-#include "backend/cpu/operand/Tensor.h"
-#include "TensorBuilder.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-class StageGenerator : public IStageGenerator
-{
-public:
- StageGenerator(const neurun::model::operand::Set &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
-
- virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
-
-#define OP(InternalName, IsNnApi, NnApiName) \
- virtual void visit(const model::operation::InternalName &) override;
-#include "model/operation/Op.lst"
-#undef OP
-
-private:
- const neurun::model::operand::Set &_ctx;
- std::shared_ptr<TensorBuilder> _tensor_builder;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc b/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
deleted file mode 100644
index 9c39b9c00..000000000
--- a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TensorBuilder.h"
-
-#include <cassert>
-
-#include "operand/Object.h"
-#include "util/logging.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-TensorBuilder::TensorBuilder() : _mem_planner(std::make_shared<FirstFitPlanner>())
-{
- // DO NOTHING
-}
-
-void TensorBuilder::registerTensorInfo(const model::operand::Index &ind,
- const compiler::TensorInfo &info)
-{
- _tensor_info_map.insert({ind, info});
-}
-
-void TensorBuilder::registerSubTensorInfo(const model::operand::Index &,
- const compiler::SubTensorInfo &)
-{
- // Not supported yet
- assert(false);
-}
-
-void TensorBuilder::notifyFirstUse(const model::operand::Index &ind)
-{
- assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
- const auto &info = _tensor_info_map.at(ind);
-
- const auto size = info.total_size();
- _mem_planner->claim(ind, size);
-}
-
-void TensorBuilder::notifyLastUse(const model::operand::Index &ind) { _mem_planner->release(ind); }
-
-void TensorBuilder::prepare(void)
-{
- assert(_tensors.size() == 0);
-
- _mem_alloc = std::make_shared<Allocator>(_mem_planner->capacity());
- assert(_mem_alloc->base());
-
- for (auto &mem_plan : _mem_planner->memory_plans())
- {
- auto ind = mem_plan.first;
- auto mem_blk = mem_plan.second;
- const auto &info = _tensor_info_map[ind];
-
- uint8_t *buffer = _mem_alloc->base() + mem_blk.offset;
- auto tensor = std::make_shared<operand::Tensor>(info);
- tensor->setBuffer(buffer);
- _tensors[ind] = tensor;
-
- VERBOSE(CPU_TENSORBUILDER) << "TENSOR(#" << ind.value() << "): " << static_cast<void *>(buffer)
- << std::endl;
-
- // If we do not make tensor here currently, stages would cause segment fault
- }
-}
-
-void TensorBuilder::allocate(void)
-{
- // NOTE For now nothing to do. Allocation is done in prepare stage, which is wrong
-}
-
-std::shared_ptr<::neurun::backend::operand::ITensor>
-TensorBuilder::tensorAt(const model::operand::Index &ind)
-{
- return _tensors.at(ind);
-}
-
-std::shared_ptr<backend::operand::IObject>
-TensorBuilder::wrapTensor(const model::operand::Index &ind)
-{
- if (_objects.find(ind) != _objects.end())
- {
- return _objects.at(ind);
- }
- else
- {
- return _objects[ind] = std::make_shared<operand::Object>(_tensors.at(ind));
- }
-}
-
-void TensorBuilder::iterate(const IterateFunction &fn)
-{
- for (auto it : _tensors)
- {
- fn(it.first);
- }
-}
-
-std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::operand::Index &ind)
-{
- return _tensors.at(ind);
-}
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.h b/runtimes/neurun/src/backend/cpu/TensorBuilder.h
deleted file mode 100644
index 2715d57f0..000000000
--- a/runtimes/neurun/src/backend/cpu/TensorBuilder.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
-#define __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
-
-#include <unordered_map>
-
-#include "backend/interface/ITensorBuilder.h"
-#include "backend/cpu/operand/Tensor.h"
-#include "backend/cpu/operand/Object.h"
-#include "model/operand/Index.h"
-#include "MemoryPlanner.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-class TensorBuilder : public ITensorBuilder
-{
-public:
- TensorBuilder();
-
- /**
- * @brief Register tensor information to allocate on CPU backend
- * @param[in] ind Operand index
- * @param[in] info Tensor information
- */
- virtual void registerTensorInfo(const model::operand::Index &ind,
- const compiler::TensorInfo &info) override;
- /**
- * @brief Register subtensor information to allocate on CPU backend
- * @param[in] ind Operand index
- * @param[in] info Tensor information
- */
- virtual void registerSubTensorInfo(const model::operand::Index &ind,
- const compiler::SubTensorInfo &info) override;
-
- virtual void notifyFirstUse(const model::operand::Index &) override;
- virtual void notifyLastUse(const model::operand::Index &) override;
-
- virtual void prepare(void) override;
- virtual void allocate(void) override;
-
- virtual std::shared_ptr<::neurun::backend::operand::ITensor>
- tensorAt(const model::operand::Index &ind) override;
- virtual std::shared_ptr<backend::operand::IObject>
- wrapTensor(const model::operand::Index &ind) override;
- virtual void iterate(const IterateFunction &fn) override;
-
- std::shared_ptr<operand::Tensor> at(const ::neurun::model::operand::Index &ind);
-
-private:
- std::unordered_map<model::operand::Index, compiler::TensorInfo> _tensor_info_map;
- std::unordered_map<model::operand::Index, std::shared_ptr<operand::Tensor>> _tensors;
- std::unordered_map<model::operand::Index, std::shared_ptr<operand::Object>> _objects;
- std::unordered_map<model::operand::Index, Block> _tensor_mem_map;
- std::shared_ptr<IMemoryPlanner> _mem_planner;
- std::shared_ptr<Allocator> _mem_alloc;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
diff --git a/runtimes/neurun/src/backend/cpu/operand/Object.cc b/runtimes/neurun/src/backend/cpu/operand/Object.cc
deleted file mode 100644
index 011747a8c..000000000
--- a/runtimes/neurun/src/backend/cpu/operand/Object.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Object.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-namespace operand
-{
-
-void Object::access(
- const std::function<void(::neurun::backend::operand::ITensor &tensor)> &fn) const
-{
- fn(*_tensor);
-}
-
-} // namespace operand
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/cpu/operand/Object.h b/runtimes/neurun/src/backend/cpu/operand/Object.h
deleted file mode 100644
index 5ef7c4fbf..000000000
--- a/runtimes/neurun/src/backend/cpu/operand/Object.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_CPU_OPERAND_OBJECT_H__
-#define __NEURUN_BACKEND_CPU_OPERAND_OBJECT_H__
-
-#include <memory>
-#include "backend/interface/operand/ITensor.h"
-
-#include "backend/interface/operand/IObject.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-namespace operand
-{
-
-class Object : public backend::operand::IObject
-{
-public:
- Object() = default;
-
-public:
- Object(const std::shared_ptr<::neurun::backend::operand::ITensor> &tensor) : _tensor{tensor}
- {
- // DO NOTHING
- }
-
-public:
- ::neurun::backend::operand::ITensor *ptr(void) const override { return _tensor.get(); }
-
-private:
- std::shared_ptr<::neurun::backend::operand::ITensor> _tensor;
-
-public:
- void
- access(const std::function<void(::neurun::backend::operand::ITensor &tensor)> &fn) const override;
-};
-
-} // namespace operand
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_CPU_OPERAND_OBJECT_H__
diff --git a/runtimes/neurun/src/backend/cpu/operand/Tensor.cc b/runtimes/neurun/src/backend/cpu/operand/Tensor.cc
deleted file mode 100644
index a5251292e..000000000
--- a/runtimes/neurun/src/backend/cpu/operand/Tensor.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Tensor.h"
-
-#define NO_USE(a) (void)(a)
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-namespace operand
-{
-
-size_t Tensor::calcOffset(const neurun::util::feature::Coordinate4D &coords)
-{
- NO_USE(coords);
- throw std::runtime_error("offset_element_in_bytes is not supported for cpu::Tensor now.");
-}
-
-} // namespace operand
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
diff --git a/runtimes/neurun/src/backend/cpu/operand/Tensor.h b/runtimes/neurun/src/backend/cpu/operand/Tensor.h
deleted file mode 100644
index 7500f890f..000000000
--- a/runtimes/neurun/src/backend/cpu/operand/Tensor.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__
-#define __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__
-
-#include "backend/interface/operand/ITensor.h"
-#include "compiler/TensorInfo.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-namespace operand
-{
-
-class Tensor : public ::neurun::backend::operand::ITensor
-{
-public:
- Tensor() = delete;
-
-public:
- Tensor(const compiler::TensorInfo &info) : _info(info)
- {
- // DO NOTHING
- }
-
-public:
- void setBuffer(uint8_t *buffer) { _buffer = buffer; }
- ::neurun::model::operand::DataType data_type() const { return _info.typeInfo().type(); }
-
-public:
- uint8_t *buffer() const override { return _buffer; }
- /**
- * @brief Get dimension by index
- *
- * @param index Index to get diemension
- * @return size_t Dimension at index
- * @note N : dimension(0)
- * H : dimension(1)
- * W : dimension(2)
- * C : dimension(3)
- */
- size_t dimension(size_t index) const override { return _info.shape().dim(index); }
- size_t num_dimensions() const override { return _info.shape().dims().size(); }
- size_t total_size() const override { return _info.total_size(); }
- size_t calcOffset(const neurun::util::feature::Coordinate4D &coords) override;
-
-private:
- compiler::TensorInfo _info;
- uint8_t *_buffer = nullptr;
-};
-
-} // namespace operand
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__
diff --git a/runtimes/neurun/src/backend/interface/IConfig.h b/runtimes/neurun/src/backend/interface/IConfig.h
deleted file mode 100644
index 82789d0ff..000000000
--- a/runtimes/neurun/src/backend/interface/IConfig.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ICONFIG_H__
-#define __NEURUN_BACKEND_ICONFIG_H__
-
-#include <string>
-
-#include "graph/operand/Layout.h"
-
-namespace neurun
-{
-namespace backend
-{
-
-struct IConfig
-{
- virtual ~IConfig() = default;
-
- virtual std::string id() = 0;
- virtual void initialize() = 0;
- // NOTE Assume backend has only one type of operand layout
- virtual graph::operand::Layout getOperandLayout() = 0;
- // Support subtensor allocation
- virtual bool SupportSubTensorAlloc() = 0;
-};
-
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ICONFIG_H__
diff --git a/runtimes/neurun/src/backend/interface/IStageGenerator.h b/runtimes/neurun/src/backend/interface/IStageGenerator.h
deleted file mode 100644
index 878a50e3f..000000000
--- a/runtimes/neurun/src/backend/interface/IStageGenerator.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ISTAGE_GENERATOR_H__
-#define __NEURUN_BACKEND_ISTAGE_GENERATOR_H__
-
-#include <memory>
-#include <functional>
-
-#include "exec/interface/IFunction.h"
-
-#include "backend/interface/ITensorBuilder.h"
-#include "model/operation/NodeVisitor.h"
-
-struct IExecutionBuilder
-{
- virtual ~IExecutionBuilder() = default;
-
- virtual void append(std::unique_ptr<::neurun::exec::IFunction> &&f) = 0;
-};
-
-using Stage = std::function<void(IExecutionBuilder &)>;
-
-namespace neurun
-{
-namespace backend
-{
-
-class IStageGenerator : model::operation::NodeVisitor
-{
-public:
- virtual ~IStageGenerator() = default;
-
- virtual std::shared_ptr<ITensorBuilder> tensor_builder() = 0;
-
-protected:
-#define OP(InternalName, IsNnApi, NnApiName) \
- virtual void visit(const model::operation::InternalName &) override {}
-#include "model/operation/Op.lst"
-#undef OP
-
-protected:
- void returnStage(const Stage &stage) { _return = stage; }
-
-public:
- Stage generate(const model::operation::Node &node)
- {
- node.accept(std::move(*this));
- return _return;
- }
-
-private:
- Stage _return = nullptr;
-};
-
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ISTAGE_GENERATOR_H__
diff --git a/runtimes/neurun/src/backend/interface/ITensorBuilder.h b/runtimes/neurun/src/backend/interface/ITensorBuilder.h
deleted file mode 100644
index 354a270e6..000000000
--- a/runtimes/neurun/src/backend/interface/ITensorBuilder.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ITENSOR_BUILDER_H__
-#define __NEURUN_BACKEND_ITENSOR_BUILDER_H__
-
-#include <map>
-
-#include "model/operand/Index.h"
-#include "operand/IObject.h"
-#include "compiler/SubTensorInfo.h"
-#include "compiler/TensorInfo.h"
-#include "backend/interface/operand/ITensor.h"
-
-namespace neurun
-{
-namespace backend
-{
-
-struct ITensorBuilder
-{
- using IterateFunction = std::function<void(const model::operand::Index &)>;
-
- virtual ~ITensorBuilder(void) = default;
-
- // TODO Merge registerTensorInfo and registerSubTensorInfo using abstraction by internal class
- /**
- * @brief Register tensor information to allocate on backend
- */
- virtual void registerTensorInfo(const model::operand::Index &, const compiler::TensorInfo &) = 0;
- /**
- * @brief Register subtensor information to allocate on backend
- */
- virtual void registerSubTensorInfo(const model::operand::Index &,
- const compiler::SubTensorInfo &) = 0;
-
- virtual void notifyFirstUse(const model::operand::Index &) = 0;
- virtual void notifyLastUse(const model::operand::Index &) = 0;
-
- virtual void prepare(void) = 0;
- virtual void allocate(void) = 0;
-
- virtual std::shared_ptr<::neurun::backend::operand::ITensor>
- tensorAt(const model::operand::Index &ind) = 0;
- virtual std::shared_ptr<backend::operand::IObject>
- wrapTensor(const model::operand::Index &ind) = 0;
- virtual void iterate(const IterateFunction &fn) = 0;
-};
-
-} // namespace backend
-} // namespace neurun
-
-#include <set>
-#include <memory>
-
-namespace neurun
-{
-namespace backend
-{
-
-using TensorBuilderSet = std::set<std::shared_ptr<backend::ITensorBuilder>>;
-
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ITENSOR_BUILDER_H__
diff --git a/runtimes/neurun/src/backend/interface/operand/IObject.h b/runtimes/neurun/src/backend/interface/operand/IObject.h
deleted file mode 100644
index 44b33b080..000000000
--- a/runtimes/neurun/src/backend/interface/operand/IObject.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
-#define __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
-
-#include <functional>
-
-#include "ITensor.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace operand
-{
-
-struct IObject
-{
- virtual ~IObject() = default;
- virtual ::neurun::backend::operand::ITensor *ptr(void) const = 0;
- virtual void
- access(const std::function<void(::neurun::backend::operand::ITensor &tensor)> &fn) const = 0;
-};
-
-} // namespace operand
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
diff --git a/runtimes/neurun/src/backend/interface/operand/ITensor.h b/runtimes/neurun/src/backend/interface/operand/ITensor.h
deleted file mode 100644
index 8bc3ff465..000000000
--- a/runtimes/neurun/src/backend/interface/operand/ITensor.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_OPERAND_I_TENSOR_H__
-#define __NEURUN_BACKEND_OPERAND_I_TENSOR_H__
-
-#include <cstring>
-#include <cstdint>
-
-#include "util/feature/Coordinate4D.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace operand
-{
-
-class ITensor
-{
-public:
- virtual ~ITensor() = default;
-
-public:
- virtual uint8_t *buffer() const = 0;
- virtual size_t total_size() const = 0;
- virtual size_t dimension(size_t index) const = 0;
- virtual size_t num_dimensions() const = 0;
- virtual size_t calcOffset(const neurun::util::feature::Coordinate4D &coords) = 0;
-};
-
-} // namespace operand
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_OPERAND_I_TENSOR_H__