summaryrefslogtreecommitdiff
path: root/runtimes/neurun/core/include/backend
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/core/include/backend')
-rw-r--r--runtimes/neurun/core/include/backend/Backend.h65
-rw-r--r--runtimes/neurun/core/include/backend/CustomKernel.h82
-rw-r--r--runtimes/neurun/core/include/backend/CustomKernelRegistry.h50
-rw-r--r--runtimes/neurun/core/include/backend/ExecTime.h111
-rw-r--r--runtimes/neurun/core/include/backend/IConfig.h45
-rw-r--r--runtimes/neurun/core/include/backend/IConstantInitializer.h260
-rw-r--r--runtimes/neurun/core/include/backend/IKernelGenerator.h63
-rw-r--r--runtimes/neurun/core/include/backend/IMemoryManager.h49
-rw-r--r--runtimes/neurun/core/include/backend/IShapeFixer.h56
-rw-r--r--runtimes/neurun/core/include/backend/ITensorBuilder.h94
-rw-r--r--runtimes/neurun/core/include/backend/ITensorManager.h56
-rw-r--r--runtimes/neurun/core/include/backend/JSONExecTime.h96
-rw-r--r--runtimes/neurun/core/include/backend/operand/IObject.h42
-rw-r--r--runtimes/neurun/core/include/backend/operand/ITensor.h52
-rw-r--r--runtimes/neurun/core/include/backend/operand/Object.h57
15 files changed, 1178 insertions, 0 deletions
diff --git a/runtimes/neurun/core/include/backend/Backend.h b/runtimes/neurun/core/include/backend/Backend.h
new file mode 100644
index 000000000..e8bfac25c
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/Backend.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_BACKEND_H__
+#define __NEURUN_BACKEND_BACKEND_H__
+
+#include <memory>
+
+#include "model/Operands.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+class Backend;
+struct IConfig;
+class IConstantInitializer;
+class IKernelGenerator;
+class IShapeFixer;
+struct ITensorBuilder;
+
+namespace custom
+{
+class KernelRegistry;
+}
+
+class BackendContext
+{
+public:
+ const Backend *backend;
+ std::shared_ptr<ITensorBuilder> tensor_builder;
+ std::shared_ptr<IConstantInitializer> constant_initializer;
+ std::shared_ptr<IKernelGenerator> kernel_gen;
+ std::shared_ptr<IShapeFixer> shape_fixer;
+};
+
+class Backend
+{
+public:
+ virtual ~Backend() = default;
+ virtual std::shared_ptr<neurun::backend::IConfig> config() const = 0;
+
+ virtual std::unique_ptr<BackendContext>
+ newContext(const model::Operands &operands,
+ const std::shared_ptr<custom::KernelRegistry> &registry) const = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_BACKEND_H__
diff --git a/runtimes/neurun/core/include/backend/CustomKernel.h b/runtimes/neurun/core/include/backend/CustomKernel.h
new file mode 100644
index 000000000..db0c91e46
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/CustomKernel.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CUSTOM_KERNEL_H__
+#define __NEURUN_BACKEND_CUSTOM_KERNEL_H__
+
+#include "nnfw_dev.h"
+
+#include "exec/IFunction.h"
+
+#include "misc/tensor/Shape.h"
+#include "model/DataType.h"
+
+#include <vector>
+
+namespace neurun
+{
+namespace backend
+{
+namespace custom
+{
+
+using Shape = nnfw::misc::tensor::Shape;
+
+struct TypeInfo
+{
+ Shape shape;
+ model::DataType dtype;
+};
+
+class Kernel : public ::neurun::exec::IFunction
+{
+public:
+ explicit Kernel(nnfw_custom_eval evalFunction);
+
+ nnfw_custom_kernel_params _params;
+ char *_userdata;
+ size_t _userdata_size;
+
+ nnfw_custom_eval _evalFunction;
+ // nnfw_custom_type_infer _type_infer_function; //Unused for now
+
+ struct CustomKernelConfigParams
+ {
+ std::vector<void *> input_allocations;
+ std::vector<TypeInfo> input_types;
+
+ std::vector<void *> output_allocations;
+ std::vector<TypeInfo> output_types;
+
+ char *userdata;
+ size_t userdata_size;
+ };
+
+ /**
+ * Fills _params field used later by user specified eval function
+ * @param inParams custom kernel parameters
+ */
+ virtual void configure(CustomKernelConfigParams &&inParams);
+
+ void run() override;
+ void runSync() override { run(); }
+};
+
+} // namespace custom
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CUSTOM_KERNEL_H__
diff --git a/runtimes/neurun/core/include/backend/CustomKernelRegistry.h b/runtimes/neurun/core/include/backend/CustomKernelRegistry.h
new file mode 100644
index 000000000..3eb218e11
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/CustomKernelRegistry.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CUSTOM_KERNEL_REGISTRY_H__
+#define __NEURUN_BACKEND_CUSTOM_KERNEL_REGISTRY_H__
+
+#include "CustomKernel.h"
+
+#include <unordered_map>
+#include <functional>
+#include <memory>
+
+#include <iostream>
+
+namespace neurun
+{
+namespace backend
+{
+
+namespace custom
+{
+
+class KernelRegistry
+{
+public:
+ void registerKernel(const std::string &id, nnfw_custom_eval evalFunction);
+ std::unique_ptr<Kernel> buildKernelForOp(const std::string &id);
+
+private:
+ std::unordered_map<std::string, nnfw_custom_eval> _storage;
+};
+
+} // namespace custom
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CUSTOM_KERNEL_REGISTRY_H__
diff --git a/runtimes/neurun/core/include/backend/ExecTime.h b/runtimes/neurun/core/include/backend/ExecTime.h
new file mode 100644
index 000000000..4eaf49fab
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/ExecTime.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_EXEC_TIME_H__
+#define __NEURUN_BACKEND_EXEC_TIME_H__
+
+#include "backend/Backend.h"
+#include "backend/IConfig.h"
+#include "JSONExecTime.h"
+#include <memory>
+#include <limits>
+#include <map>
+#include <unordered_map>
+#include <vector>
+
+namespace neurun
+{
+namespace backend
+{
+class ExecTime
+{
+public:
+ explicit ExecTime(const std::vector<const Backend *> &backends) : _json(backends, _measurements)
+ {
+ }
+
+public:
+ /**
+ * @brief Get exec time of an operation with input size
+ * or linearly interpolated value based on size if there is no record for given size
+ *
+ * @param[in] backend id of a backend
+ * @param[in] operation name of an operation
+ * @param[in] quant if input type quantized
+ * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
+ * @return execution time for given input sizes
+ * -1 if there are no records for given parameters (backend, op, quantization).
+ */
+ int64_t getOperationExecTime(const Backend *backend, const std::string &operation, bool quant,
+ uint32_t op_size) const;
+ /**
+ * @brief Update exec time of the operation on a backend with given input size or
+ * add new entity if there is no one.
+ *
+ * @param[in] backend id of a backend
+ * @param[in] operation name of an operation
+ * @param[in] quant if input type quantized
+ * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
+ * @param[in] time real measured value
+ */
+ void updateOperationExecTime(const Backend *backend, const std::string &operation, bool quant,
+ uint32_t op_size, int64_t time);
+ /**
+ * @brief Get the permute time from one backend to another
+ *
+ * @param[in] from_backend
+ * @param[in] to_backend
+ * @param[in] quant if input type quantized
+ * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
+ * @return permutation time for operation size
+ */
+ int64_t getPermuteTime(const Backend *from_backend, const Backend *to_backend, bool quant,
+ uint32_t op_size) const;
+ /**
+ * @brief Update permute time from one backend to another
+ *
+ * @param[in] from_backend
+ * @param[in] to_backend
+ * @param[in] quant if input type quantized
+ * @param[in] time measured permutation time
+ * @param[in] op_size sum of operation's flattened sizes of inputs and outputs
+ */
+ void updatePermuteTime(const Backend *from_backend, const Backend *to_backend, bool quant,
+ uint32_t op_size, int64_t time);
+ /**
+ * @brief Get the max value of int32_t in int64_t
+ * @return max value
+ */
+ static int64_t getMax() { return _MAX; }
+ /**
+ * @brief Update metrics file with new data.
+ */
+ void uploadOperationsExecTime() const { _json.uploadOperationsExecTime(); }
+ static const int64_t NOT_FOUND = -1;
+
+private:
+ /// @brief Measurement data, which is shared with serializer
+ MeasurementData _measurements;
+ // int64_t::max may cause integer overflow
+ static const int64_t _MAX = std::numeric_limits<int32_t>::max();
+ /// @brief Serializer
+ JSON _json;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_EXEC_TIME_H__
diff --git a/runtimes/neurun/core/include/backend/IConfig.h b/runtimes/neurun/core/include/backend/IConfig.h
new file mode 100644
index 000000000..0e9572033
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/IConfig.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ICONFIG_H__
+#define __NEURUN_BACKEND_ICONFIG_H__
+
+#include "util/ITimer.h"
+#include <memory>
+#include <string>
+
+namespace neurun
+{
+namespace backend
+{
+
+struct IConfig
+{
+ virtual ~IConfig() = default;
+
+ virtual std::string id() = 0;
+ virtual void initialize() = 0;
+ // Support subtensor allocation
+ virtual bool SupportSubTensorAlloc() = 0;
+
+ // Timer is used for backend profiling. In case of default (nullptr) timer profiler won't work.
+ virtual std::unique_ptr<util::ITimer> timer() { return nullptr; }
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ICONFIG_H__
diff --git a/runtimes/neurun/core/include/backend/IConstantInitializer.h b/runtimes/neurun/core/include/backend/IConstantInitializer.h
new file mode 100644
index 000000000..8393e0fd8
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/IConstantInitializer.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ICONSTANT_INITIALIZER_H__
+#define __NEURUN_BACKEND_ICONSTANT_INITIALIZER_H__
+
+#include <unordered_map>
+#include <functional>
+
+#include "ITensorBuilder.h"
+#include "model/Layout.h"
+#include "model/Operand.h"
+#include "model/Operands.h"
+#include "model/OperationVisitor.h"
+#include "model/Subgraph.h"
+#include "util/logging.h"
+#include "util/Utils.h"
+
+namespace
+{
+template <typename T>
+static void Init(const neurun::model::Operand &model_obj, neurun::backend::operand::IObject &obj,
+ const bool copy,
+ const neurun::model::Layout frontend_layout = neurun::model::Layout::UNKNOWN)
+{
+ const auto shape = model_obj.shape();
+ auto base = reinterpret_cast<const T *>(model_obj.data().base());
+
+ obj.access([&](::neurun::backend::operand::ITensor &tensor) {
+ switch (shape.rank())
+ {
+ case 1:
+ {
+ auto vec_size = shape.dim(0);
+ for (int32_t n = 0; n < vec_size; ++n)
+ {
+ const T *from = reinterpret_cast<const T *>(base) + n;
+ const auto value = *from;
+
+ T *into = reinterpret_cast<T *>(tensor.buffer()) + n;
+
+ *into = value;
+ }
+ break;
+ }
+ case 2:
+ {
+ const int32_t copy_len = shape.dim(1);
+
+ for (auto i = 0; i < shape.dim(0); ++i)
+ {
+ neurun::util::Coordinates coords{i, 0};
+ memcpy(tensor.buffer() + tensor.calcOffset(coords), base + i * copy_len,
+ copy_len * sizeof(T));
+ }
+ break;
+ }
+ case 3:
+ {
+ const int32_t width = shape.dim(1);
+ const int32_t copy_len = shape.dim(2);
+
+ for (auto i = 0; i < shape.dim(0); ++i)
+ {
+ for (auto j = 0; j < shape.dim(1); ++j)
+ {
+ neurun::util::Coordinates coords{i, j, 0};
+ memcpy(tensor.buffer() + tensor.calcOffset(coords),
+ base + i * width * copy_len + j * copy_len, copy_len * sizeof(T));
+ }
+ }
+ break;
+ }
+ case 4:
+ {
+ const int32_t height = shape.dim(1);
+ const int32_t width = shape.dim(2);
+ const int32_t copy_len = shape.dim(3);
+ for (auto i = 0; i < shape.dim(0); ++i)
+ {
+ for (auto j = 0; j < shape.dim(1); ++j)
+ {
+ for (auto k = 0; k < shape.dim(2); ++k)
+ {
+ if (copy)
+ {
+ neurun::util::Coordinates coords{i, j, k, 0};
+ memcpy(tensor.buffer() + tensor.calcOffset(coords),
+ base + i * height * width * copy_len + j * width * copy_len + k * copy_len,
+ copy_len * sizeof(T));
+ }
+ else
+ {
+ for (auto l = 0; l < shape.dim(3); ++l)
+ {
+ const auto coords = neurun::util::convertCoordinates(
+ {i, j, k, l}, frontend_layout, tensor.layout());
+ T *into = reinterpret_cast<T *>(tensor.buffer() + tensor.calcOffset(coords));
+ T value = *(base + i * height * width * copy_len + j * width * copy_len +
+ k * copy_len + l);
+ *into = value;
+ }
+ }
+ }
+ }
+ }
+ break;
+ }
+ default:
+ throw std::runtime_error{"Not yet supported"};
+ }
+ });
+}
+
+template <typename T>
+void copyInit(const neurun::model::Operand &model_obj, neurun::backend::operand::IObject &obj)
+{
+ Init<T>(model_obj, obj, true);
+}
+
+template <typename T>
+void permuteInit(const neurun::model::Operand &model_obj, neurun::backend::operand::IObject &obj,
+ const neurun::model::Layout frontend_layout)
+{
+ Init<T>(model_obj, obj, false, frontend_layout);
+}
+
+} // namespace
+
+namespace neurun
+{
+namespace backend
+{
+
+class IConstantInitializer : model::OperationVisitor
+{
+public:
+ virtual ~IConstantInitializer() = default;
+
+public:
+ virtual void run() = 0;
+
+public:
+ using Initializer = std::function<void(const model::Operand &, backend::operand::IObject &)>;
+
+ void generate(const model::Subgraph &subg, const model::Operands &operands)
+ {
+ _current_subg_layout = subg.getLayout();
+ subg.accept(*this);
+ for (const auto &e : subg.operations())
+ {
+ for (const auto &ind : e.node->getInputs())
+ {
+ const auto &obj = operands.at(ind);
+ if (obj.isConstant() && !exist(ind))
+ {
+ registerPermuteInitializer(ind, obj);
+ }
+ }
+ }
+ }
+
+protected:
+#define OP(InternalName, IsNnApi) \
+ virtual void visit(const model::operation::InternalName &) override { /* DO NOTHING */}
+#include "model/Operations.lst"
+#undef OP
+
+protected:
+ void registerCopyInitializer(const model::OperandIndex &index, const model::Operand &obj)
+ {
+ // For only CONSTANTS
+ if (!obj.isConstant())
+ return;
+
+ VERBOSE(FillOperandData) << "Fill data for operand " << index.value() << std::endl;
+
+ const auto type = obj.typeInfo().type();
+ using neurun::model::DataType;
+
+ switch (type)
+ {
+ case DataType::FLOAT32:
+ _init_map[index] = copyInit<float>;
+ break;
+ case DataType::INT32:
+ _init_map[index] = copyInit<int32_t>;
+ break;
+ case DataType::UINT32:
+ _init_map[index] = copyInit<uint32_t>;
+ break;
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ _init_map[index] = copyInit<uint8_t>;
+ break;
+ default:
+ throw std::runtime_error("Not supported, yet");
+ break;
+ }
+ }
+
+protected:
+ void registerPermuteInitializer(const model::OperandIndex &index, const model::Operand &obj)
+ {
+ // For only CONSTANTS
+ if (!obj.isConstant())
+ return;
+
+ VERBOSE(FillOperandData) << "Fill data for operand " << index.value() << std::endl;
+
+ const auto type = obj.typeInfo().type();
+ using neurun::model::DataType;
+ using namespace std::placeholders;
+
+ switch (type)
+ {
+ case DataType::FLOAT32:
+ _init_map[index] = std::bind(permuteInit<float>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::INT32:
+ _init_map[index] = std::bind(permuteInit<int32_t>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::UINT32:
+ _init_map[index] = std::bind(permuteInit<uint32_t>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ _init_map[index] = std::bind(permuteInit<uint8_t>, _1, _2, _current_subg_layout);
+ break;
+ default:
+ throw std::runtime_error("Not supported, yet");
+ break;
+ }
+ }
+
+private:
+ bool exist(const model::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); }
+
+protected:
+ std::unordered_map<model::OperandIndex, Initializer> _init_map;
+ model::Layout _current_subg_layout;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ICONSTANT_INITIALIZER_H__
diff --git a/runtimes/neurun/core/include/backend/IKernelGenerator.h b/runtimes/neurun/core/include/backend/IKernelGenerator.h
new file mode 100644
index 000000000..542a55338
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/IKernelGenerator.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_IKERNEL_GENERATOR_H__
+#define __NEURUN_BACKEND_IKERNEL_GENERATOR_H__
+
+#include <memory>
+#include <functional>
+
+#include "ITensorBuilder.h"
+#include "compiler/IExecutionBuilder.h"
+#include "model/OperationVisitor.h"
+#include "model/Subgraph.h"
+#include "cpp14/memory.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+class IKernelGenerator : public model::OperationVisitor
+{
+public:
+ virtual ~IKernelGenerator() = default;
+
+ void generate(const model::Operation &node, neurun::compiler::IExecutionBuilder *executionBuilder)
+ {
+ _execution_builder = executionBuilder;
+ node.accept(*this);
+ }
+
+protected:
+ using model::OperationVisitor::visit;
+
+#define OP(InternalName, IsNnApi) \
+ virtual void visit(const model::operation::InternalName &) override \
+ { \
+ throw std::runtime_error("NYI"); \
+ }
+#include "model/Operations.lst"
+#undef OP
+
+protected:
+ neurun::compiler::IExecutionBuilder *_execution_builder;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_IKERNEL_GENERATOR_H__
diff --git a/runtimes/neurun/core/include/backend/IMemoryManager.h b/runtimes/neurun/core/include/backend/IMemoryManager.h
new file mode 100644
index 000000000..b06bab872
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/IMemoryManager.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_IMEMORY_MANAGER_H__
+#define __NEURUN_BACKEND_IMEMORY_MANAGER_H__
+
+namespace neurun
+{
+namespace backend
+{
+
+struct IMemoryManager
+{
+ virtual ~IMemoryManager() = default;
+
+ virtual void allocate(void) = 0;
+ virtual void deallocate(void) = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#include <unordered_set>
+#include <memory>
+
+namespace neurun
+{
+namespace backend
+{
+
+using MemoryManagerSet = std::unordered_set<std::unique_ptr<backend::IMemoryManager>>;
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_IMEMORY_MANAGER_H__
diff --git a/runtimes/neurun/core/include/backend/IShapeFixer.h b/runtimes/neurun/core/include/backend/IShapeFixer.h
new file mode 100644
index 000000000..ad137942c
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/IShapeFixer.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ISHAPE_FIXER_H__
+#define __NEURUN_BACKEND_ISHAPE_FIXER_H__
+
+#include <memory>
+#include <functional>
+
+#include "ITensorBuilder.h"
+#include "model/OperationVisitor.h"
+#include "model/Subgraph.h"
+#include "cpp14/memory.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+class IShapeFixer : model::OperationVisitor
+{
+public:
+ virtual ~IShapeFixer() = default;
+
+ virtual std::shared_ptr<ITensorBuilder> tensor_builder() = 0;
+
+protected:
+#define OP(InternalName, IsNnApi) \
+ virtual void visit(const model::operation::InternalName &) override \
+ { \
+ throw std::runtime_error("NYI"); \
+ }
+#include "model/Operations.lst"
+#undef OP
+
+public:
+ void fix(const model::Operation &node) { node.accept(*this); }
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ISHAPE_FIXER_H__
diff --git a/runtimes/neurun/core/include/backend/ITensorBuilder.h b/runtimes/neurun/core/include/backend/ITensorBuilder.h
new file mode 100644
index 000000000..72079a2fb
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/ITensorBuilder.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ITENSOR_BUILDER_H__
+#define __NEURUN_BACKEND_ITENSOR_BUILDER_H__
+
+#include <map>
+
+#include "model/Index.h"
+#include "model/OperandInfo.h"
+#include "model/Operation.h"
+#include "model/Layout.h"
+#include "operand/IObject.h"
+#include "operand/ITensor.h"
+#include "compiler/SubTensorInfo.h"
+#include "ITensorManager.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+struct ITensorBuilder
+{
+ using IterateFunction = std::function<void(const model::OperandIndex &)>;
+
+ virtual ~ITensorBuilder(void) = default;
+
+ // TODO Merge registerTensorInfo and registerSubTensorInfo using abstraction by internal class
+ /**
+ * @brief Register tensor information to allocate on backend
+ */
+ virtual void registerTensorInfo(const model::OperandIndex &, const model::OperandInfo &,
+ model::Layout frontend_layout, model::Layout backend_layout,
+ bool as_const) = 0;
+ /**
+ * @brief Register subtensor information to allocate on backend
+ */
+ virtual void registerSubTensorInfo(const model::OperandIndex &,
+ const compiler::SubTensorInfo &) = 0;
+
+ virtual void notifyFirstUse(const model::OperandIndex &) = 0;
+ virtual void notifyLastUse(const model::OperandIndex &) = 0;
+
+ virtual void prepare(void) = 0;
+ // TODO Remove after all of apis appended land
+ virtual void allocate(void) = 0;
+
+ virtual void allocateConsts() = 0;
+ virtual void allocateNonconsts() = 0;
+ virtual void postFunctionPrepare() = 0;
+ virtual void finalize() = 0;
+
+ virtual std::shared_ptr<::neurun::backend::operand::ITensor>
+ tensorAt(const model::OperandIndex &ind) = 0;
+ virtual std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) = 0;
+ virtual void iterate(const IterateFunction &fn) = 0;
+
+ virtual void preVisit(const model::Operation &) = 0;
+ virtual void postVisit(const model::Operation &) = 0;
+
+ virtual std::unique_ptr<ITensorManager> releaseTensorManager(void) = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#include <unordered_set>
+#include <memory>
+
+namespace neurun
+{
+namespace backend
+{
+
+using TensorBuilderSet = std::unordered_set<std::shared_ptr<backend::ITensorBuilder>>;
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ITENSOR_BUILDER_H__
diff --git a/runtimes/neurun/core/include/backend/ITensorManager.h b/runtimes/neurun/core/include/backend/ITensorManager.h
new file mode 100644
index 000000000..74506ef59
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/ITensorManager.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ITENSOR_MANAGER_H__
+#define __NEURUN_BACKEND_ITENSOR_MANAGER_H__
+
+namespace neurun
+{
+namespace backend
+{
+
+// NOTE This name ITensorManager has been discussed whether or not the name is proper.
+// Anyone can argue with any better name.
+/**
+ * @brief Interface as an abstract tensor manager which has MemoryManager
+ */
+struct ITensorManager
+{
+ virtual ~ITensorManager() = default;
+
+ virtual void allocateConsts(void) = 0;
+ virtual void allocateNonconsts(void) = 0;
+ virtual void deallocateConsts(void) = 0;
+ virtual void deallocateNonconsts(void) = 0;
+};
+
+} // namespace backend
+} // namespace neurun
+
+#include <unordered_set>
+#include <memory>
+
+namespace neurun
+{
+namespace backend
+{
+
+using TensorManagerSet = std::unordered_set<std::unique_ptr<backend::ITensorManager>>;
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ITENSOR_MANAGER_H__
diff --git a/runtimes/neurun/core/include/backend/JSONExecTime.h b/runtimes/neurun/core/include/backend/JSONExecTime.h
new file mode 100644
index 000000000..84505e10f
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/JSONExecTime.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_JSON_EXEC_TIME_H__
+#define __NEURUN_BACKEND_JSON_EXEC_TIME_H__
+
+#include <fstream>
+#include <unordered_map>
+#include <map>
+#include <vector>
+#include "backend/Backend.h"
+#include "backend/IConfig.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+/**
+ * @brief table, that contains execution time of an operation on some backend for different input
+ * sizes and transfer time from one backend to another for various input sizes (permutation time)
+ *
+ * backend -> op -> quant-> size --> time
+ * _measurements[Backend*]["string"][bool][uint32_t] = int64_t
+ */
+using MeasurementData = std::unordered_map<
+ const Backend *,
+ std::unordered_map<std::string, std::unordered_map<bool, std::map<uint32_t, int64_t>>>>;
+
+class JSON
+{
+public:
+ explicit JSON(const std::vector<const Backend *> &backends, MeasurementData &measurements)
+ : _measurement_file("exec_time.json"), _backends(), _measurements(measurements)
+ {
+ for (const auto b : backends)
+ {
+ _backends.emplace(b->config()->id(), b);
+ }
+ loadOperationsExecTime();
+ };
+ /**
+ * @brief Update _operations_exec_time_file with new data.
+ */
+ void uploadOperationsExecTime() const;
+
+private:
+ ///@brief file containing measurements
+ std::string _measurement_file;
+ std::unordered_map<std::string, const Backend *> _backends;
+ std::unordered_map<
+ const Backend *,
+ std::unordered_map<std::string, std::unordered_map<bool, std::map<uint32_t, int64_t>>>>
+ &_measurements;
+ /**
+ * @brief Helper function for inserting data to OperationExecTimes
+ *
+ * @param backend String name of backend
+ * @param operation String name of operation
+ * @param quant if input type quantized
+ * @param stream File stream
+ */
+ void readOperation(const std::string &backend, const std::string &operation, bool quant,
+ std::ifstream &stream);
+
+ /**
+ * @brief Helper function for writing OperationExecTimes to stream
+ *
+ * @param operation_info Map of operations execution information
+ * @param stream File stream
+ */
+ void printOperation(const std::map<uint32_t, int64_t> &operation_info,
+ std::ofstream &stream) const;
+ /**
+ * @brief Parse and load operations_exec_time from _operations_exec_time_file.
+ */
+ void loadOperationsExecTime();
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_JSON_EXEC_TIME_H__
diff --git a/runtimes/neurun/core/include/backend/operand/IObject.h b/runtimes/neurun/core/include/backend/operand/IObject.h
new file mode 100644
index 000000000..56eea34a8
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/operand/IObject.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
+#define __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
+
+#include <functional>
+
+#include "ITensor.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace operand
+{
+
+struct IObject
+{
+ virtual ~IObject() = default;
+ virtual operand::ITensor *ptr(void) const = 0;
+ virtual void access(const std::function<void(operand::ITensor &tensor)> &fn) const = 0;
+};
+
+} // namespace operand
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_OPERAND_I_OBJECT_H__
diff --git a/runtimes/neurun/core/include/backend/operand/ITensor.h b/runtimes/neurun/core/include/backend/operand/ITensor.h
new file mode 100644
index 000000000..f762ad03c
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/operand/ITensor.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_OPERAND_I_TENSOR_H__
+#define __NEURUN_BACKEND_OPERAND_I_TENSOR_H__
+
+#include <cstring>
+#include <cstdint>
+
+#include "model/Layout.h"
+#include "util/Coordinates.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace operand
+{
+
+class ITensor
+{
+public:
+ virtual ~ITensor() = default;
+
+public:
+ virtual uint8_t *buffer() const = 0;
+ virtual size_t total_size() const = 0;
+ virtual size_t dimension(size_t index) const = 0;
+ virtual size_t num_dimensions() const = 0;
+ virtual size_t calcOffset(const neurun::util::Coordinates &coords) const = 0;
+ virtual model::Layout layout() const = 0;
+ virtual bool has_padding() const = 0;
+};
+
+} // namespace operand
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_OPERAND_I_TENSOR_H__
diff --git a/runtimes/neurun/core/include/backend/operand/Object.h b/runtimes/neurun/core/include/backend/operand/Object.h
new file mode 100644
index 000000000..e6f6d926d
--- /dev/null
+++ b/runtimes/neurun/core/include/backend/operand/Object.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_OPERAND_OBJECT_H__
+#define __NEURUN_BACKEND_OPERAND_OBJECT_H__
+
+#include <memory>
+#include "ITensor.h"
+
+#include "IObject.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace operand
+{
+
+class Object : public IObject
+{
+public:
+ Object() = default;
+
+public:
+ Object(const std::shared_ptr<ITensor> &tensor) : _tensor{tensor}
+ {
+ // DO NOTHING
+ }
+
+public:
+ ITensor *ptr(void) const override { return _tensor.get(); }
+
+private:
+ std::shared_ptr<ITensor> _tensor;
+
+public:
+ void access(const std::function<void(ITensor &tensor)> &fn) const override { fn(*_tensor); }
+};
+
+} // namespace operand
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_OPERAND_OBJECT_H__