summaryrefslogtreecommitdiff
path: root/runtimes/neurun/backend/srcn
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/backend/srcn')
-rw-r--r--runtimes/neurun/backend/srcn/Backend.h63
-rw-r--r--runtimes/neurun/backend/srcn/CMakeLists.txt20
-rw-r--r--runtimes/neurun/backend/srcn/Config.cc33
-rw-r--r--runtimes/neurun/backend/srcn/Config.h45
-rw-r--r--runtimes/neurun/backend/srcn/ConstantInitializer.cc145
-rw-r--r--runtimes/neurun/backend/srcn/ConstantInitializer.h56
-rw-r--r--runtimes/neurun/backend/srcn/KernelGenerator.cc102
-rw-r--r--runtimes/neurun/backend/srcn/KernelGenerator.h56
-rw-r--r--runtimes/neurun/backend/srcn/MemoryManager.cc93
-rw-r--r--runtimes/neurun/backend/srcn/MemoryManager.h65
-rw-r--r--runtimes/neurun/backend/srcn/MemoryPlanner.cc123
-rw-r--r--runtimes/neurun/backend/srcn/MemoryPlanner.h168
-rw-r--r--runtimes/neurun/backend/srcn/MemoryPlannerFactory.cc47
-rw-r--r--runtimes/neurun/backend/srcn/MemoryPlannerFactory.h45
-rw-r--r--runtimes/neurun/backend/srcn/PluginClassesAllocator.cc33
-rw-r--r--runtimes/neurun/backend/srcn/ShapeFixer.cc39
-rw-r--r--runtimes/neurun/backend/srcn/ShapeFixer.h52
-rw-r--r--runtimes/neurun/backend/srcn/TensorBuilder.cc116
-rw-r--r--runtimes/neurun/backend/srcn/TensorBuilder.h92
-rw-r--r--runtimes/neurun/backend/srcn/TensorManager.cc100
-rw-r--r--runtimes/neurun/backend/srcn/TensorManager.h66
-rw-r--r--runtimes/neurun/backend/srcn/kernel/OperationUtils.cc90
-rw-r--r--runtimes/neurun/backend/srcn/kernel/OperationUtils.h75
-rw-r--r--runtimes/neurun/backend/srcn/kernel/TransposeConvLayer.cc122
-rw-r--r--runtimes/neurun/backend/srcn/kernel/TransposeConvLayer.h80
-rw-r--r--runtimes/neurun/backend/srcn/operand/Tensor.cc43
-rw-r--r--runtimes/neurun/backend/srcn/operand/Tensor.h76
27 files changed, 2045 insertions, 0 deletions
diff --git a/runtimes/neurun/backend/srcn/Backend.h b/runtimes/neurun/backend/srcn/Backend.h
new file mode 100644
index 000000000..6d7da689f
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/Backend.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_BACKEND_H__
+#define __NEURUN_BACKEND_SRCN_BACKEND_H__
+
+#include <memory>
+#include <backend/Backend.h>
+#include <model/Operands.h>
+
+#include "Config.h"
+#include "ConstantInitializer.h"
+#include "KernelGenerator.h"
+#include "ShapeFixer.h"
+#include "backend/CustomKernelRegistry.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+class Backend : public ::neurun::backend::Backend
+{
+public:
+ Backend() : _config{std::make_shared<Config>()} {}
+
+ std::shared_ptr<IConfig> config() const override { return _config; }
+
+ std::unique_ptr<BackendContext>
+ newContext(const model::Operands &operands,
+ const std::shared_ptr<custom::KernelRegistry> &registry) const override
+ {
+ auto tensor_builder = std::make_shared<TensorBuilder>();
+ return std::unique_ptr<BackendContext>{new BackendContext{
+ this, tensor_builder, std::make_shared<ConstantInitializer>(operands, tensor_builder),
+ std::make_shared<KernelGenerator>(operands, tensor_builder, registry),
+ std::make_shared<ShapeFixer>(operands, tensor_builder)}};
+ }
+
+private:
+ std::shared_ptr<IConfig> _config;
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_BACKEND_H__
diff --git a/runtimes/neurun/backend/srcn/CMakeLists.txt b/runtimes/neurun/backend/srcn/CMakeLists.txt
new file mode 100644
index 000000000..b51b95133
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/CMakeLists.txt
@@ -0,0 +1,20 @@
+if(NOT BUILD_SRCN_KERNEL)
+ message(STATUS "Skip building SRCN backend: SRCN kernel library is not build")
+ return()
+endif()
+
+set(LIB_NEURUN_BACKEND_SRCN neurun_backend_srcn)
+
+file(GLOB_RECURSE SOURCES "*.cc")
+
+add_library(${LIB_NEURUN_BACKEND_SRCN} SHARED ${SOURCES})
+
+target_link_libraries(${LIB_NEURUN_BACKEND_SRCN} PUBLIC nnfw_lib_cpp14)
+target_link_libraries(${LIB_NEURUN_BACKEND_SRCN} PRIVATE nnfw_lib_srcn)
+target_link_libraries(${LIB_NEURUN_BACKEND_SRCN} PRIVATE neurun_core)
+target_link_libraries(${LIB_NEURUN_BACKEND_SRCN} PRIVATE nnfw_common)
+target_link_libraries(${LIB_NEURUN_BACKEND_SRCN} PRIVATE nnfw_coverage)
+
+set_target_properties(${LIB_NEURUN_BACKEND_SRCN} PROPERTIES OUTPUT_NAME backend_srcn)
+
+install(TARGETS ${LIB_NEURUN_BACKEND_SRCN} DESTINATION lib)
diff --git a/runtimes/neurun/backend/srcn/Config.cc b/runtimes/neurun/backend/srcn/Config.cc
new file mode 100644
index 000000000..e69136fd9
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/Config.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Config.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+void Config::initialize()
+{
+ // DO NOTHING
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/Config.h b/runtimes/neurun/backend/srcn/Config.h
new file mode 100644
index 000000000..bffcbf245
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/Config.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_CONFIG_H__
+#define __NEURUN_BACKEND_SRCN_CONFIG_H__
+
+#include <backend/IConfig.h>
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+class Config : public IConfig
+{
+public:
+ std::string id() override { return "srcn"; }
+ void initialize() override;
+ bool SupportSubTensorAlloc() override
+ {
+ // NOTE srcn allocator cannot support subtensor allocation yet
+ return false;
+ }
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_CONFIG_H__
diff --git a/runtimes/neurun/backend/srcn/ConstantInitializer.cc b/runtimes/neurun/backend/srcn/ConstantInitializer.cc
new file mode 100644
index 000000000..f37ebe9a4
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/ConstantInitializer.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConstantInitializer.h"
+
+#include "kernel/OperationUtils.h"
+
+namespace
+{
+
+template <typename T>
+static void
+PermuteKernel(const neurun::model::Operand &model_obj, neurun::backend::operand::IObject &obj,
+ const neurun::model::Layout frontend_layout = neurun::model::Layout::UNKNOWN)
+{
+ const auto shape = model_obj.shape();
+ auto base = reinterpret_cast<const T *>(model_obj.data().base());
+
+ assert(shape.rank() == 4);
+
+ // TODO Support frontend layout
+ UNUSED_RELEASE(frontend_layout);
+
+ obj.access([&](::neurun::backend::operand::ITensor &tensor) {
+ // NOTE The srcn takes a HWOI layout as kernel filter even though image layout is NHWC.
+ // This policy is the same with the tensorflow policy.
+ // So using srcn library, we need to change kernel layout to HWOI from OHWI.
+ const int32_t outch = shape.dim(0);
+ const int32_t height = shape.dim(1);
+ const int32_t width = shape.dim(2);
+ const int32_t inch = shape.dim(3);
+ const auto to_dim = ::neurun::backend::srcn::kernel::convertCoordinates(
+ {outch, height, width, inch}, ::neurun::backend::srcn::kernel::FilterLayout::OHWI,
+ ::neurun::backend::srcn::kernel::FilterLayout::HWOI);
+ for (auto i = 0; i < outch; ++i)
+ {
+ for (auto j = 0; j < height; ++j)
+ {
+ for (auto k = 0; k < width; ++k)
+ {
+ for (auto l = 0; l < inch; ++l)
+ {
+ const auto coords = ::neurun::backend::srcn::kernel::convertCoordinates(
+ {i, j, k, l}, ::neurun::backend::srcn::kernel::FilterLayout::OHWI,
+ ::neurun::backend::srcn::kernel::FilterLayout::HWOI);
+ const size_t offset = coords[0] * to_dim[1] * to_dim[2] * to_dim[3] +
+ coords[1] * to_dim[2] * to_dim[3] + coords[2] * to_dim[3] +
+ coords[3];
+ T *into = reinterpret_cast<T *>(tensor.buffer() + offset * sizeof(T));
+ T value = *(base + i * height * width * inch + j * width * inch + k * inch + l);
+ *into = value;
+ }
+ }
+ }
+ }
+ });
+}
+}
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+ConstantInitializer::ConstantInitializer(const model::Operands &operands,
+ const std::shared_ptr<TensorBuilder> &tensor_builder)
+ : _operands{operands}, _tensor_builder{tensor_builder}
+{
+ // DO NOTHING
+}
+
+void ConstantInitializer::run()
+{
+ for (const auto &it : _init_map)
+ {
+ const auto &ind = it.first;
+ const auto &fn = it.second;
+
+ const auto &model_obj = _operands.at(ind);
+ auto tensor_obj = _tensor_builder->wrapTensor(ind);
+ fn(model_obj, *tensor_obj);
+ }
+
+ _init_map.clear();
+}
+
+void ConstantInitializer::registerPermuteKernelInitializer(const model::OperandIndex &index,
+ const model::Operand &obj)
+{
+ // For only CONSTANTS
+ if (!obj.isConstant())
+ return;
+
+ VERBOSE(FillOperandData) << "[SRCN] Fill data for operand " << index.value() << std::endl;
+
+ const auto type = obj.typeInfo().type();
+ using neurun::model::DataType;
+ using namespace std::placeholders;
+
+ switch (type)
+ {
+ case DataType::FLOAT32:
+ _init_map[index] = std::bind(PermuteKernel<float>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::INT32:
+ _init_map[index] = std::bind(PermuteKernel<int32_t>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::UINT32:
+ _init_map[index] = std::bind(PermuteKernel<uint32_t>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ _init_map[index] = std::bind(PermuteKernel<uint8_t>, _1, _2, _current_subg_layout);
+ break;
+ default:
+ throw std::runtime_error("Not supported, yet");
+ break;
+ }
+}
+
+void ConstantInitializer::visit(const model::operation::TransposeConvNode &node)
+{
+ const auto &kernel_index = node.getInputs().at(model::operation::TransposeConvNode::KERNEL);
+ const auto &kernel_obj = _operands.at(kernel_index);
+ registerPermuteKernelInitializer(kernel_index, kernel_obj);
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/ConstantInitializer.h b/runtimes/neurun/backend/srcn/ConstantInitializer.h
new file mode 100644
index 000000000..9865084c0
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/ConstantInitializer.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_COMPILER_SRCN_CONSTANT_INITIALIZER_H__
+#define __NEURUN_COMPILER_SRCN_CONSTANT_INITIALIZER_H__
+
+#include <backend/IConstantInitializer.h>
+#include <model/Operands.h>
+#include "TensorBuilder.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+class ConstantInitializer : public IConstantInitializer
+{
+public:
+ ConstantInitializer(const model::Operands &operands,
+ const std::shared_ptr<TensorBuilder> &tensor_builder);
+
+public:
+ void run() override;
+
+public:
+ void registerPermuteKernelInitializer(const model::OperandIndex &index,
+ const model::Operand &obj);
+
+public:
+ void visit(const model::operation::TransposeConvNode &) override;
+
+private:
+ const model::Operands &_operands;
+ std::shared_ptr<TensorBuilder> _tensor_builder;
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_SRCN_CONSTANT_INITIALIZER_H__
diff --git a/runtimes/neurun/backend/srcn/KernelGenerator.cc b/runtimes/neurun/backend/srcn/KernelGenerator.cc
new file mode 100644
index 000000000..c0cd8b43c
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/KernelGenerator.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "KernelGenerator.h"
+
+#include <stdexcept>
+
+#include "cpp14/memory.h"
+#include "util/Padding.h"
+#include "kernel/TransposeConvLayer.h"
+
+#include <backend/Backend.h>
+#include <backend/IConfig.h>
+
+#include "util/logging.h"
+
+#include "util/Utils.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+KernelGenerator::KernelGenerator(const neurun::model::Operands &operand_ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder,
+ const std::shared_ptr<custom::KernelRegistry> &kernel_registry)
+ : _ctx(operand_ctx), _tensor_builder(tensor_builder), _kernel_registry(kernel_registry),
+ _current_subg_layout(model::Layout::UNKNOWN)
+{
+ // DO NOTHING
+}
+
+void KernelGenerator::visit(const model::Subgraph &subgraph)
+{
+ _current_subg_layout = subgraph.getLayout();
+ for (const auto &e : subgraph.operations())
+ {
+ const auto &node = *(e.node);
+ _tensor_builder->preVisit(node);
+ node.accept(*this);
+ _tensor_builder->postVisit(node);
+ }
+}
+
+void KernelGenerator::visit(const model::operation::TransposeConvNode &node)
+{
+ using model::operation::TransposeConvNode;
+
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto ifm_index{node.getInputs().at(TransposeConvNode::Input::INPUT)};
+ const auto ker_index{node.getInputs().at(TransposeConvNode::Input::KERNEL)};
+ const auto output_shape_index{node.getInputs().at(TransposeConvNode::Input::OUTPUT_SHAPE)};
+
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_subg_layout);
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_subg_layout);
+ // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
+ const auto &ker_shape = _ctx.at(ker_index).shape();
+ const auto ker_height = ker_shape.dim(1);
+ const auto ker_width = ker_shape.dim(2);
+ const auto stride = node.param().stride;
+ const int padding_type = (node.param().padding.type == model::PaddingType::SAME);
+ const auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape,
+ stride, ker_width, ker_height);
+
+ const auto ofm_backend_shape =
+ ::neurun::backend::srcn::kernel::getShape(_ctx.at(ofm_index), _current_subg_layout);
+ const auto ifm_backend_shape =
+ ::neurun::backend::srcn::kernel::getShape(_ctx.at(ifm_index), _current_subg_layout);
+ const auto ker_backend_shape =
+ ::neurun::backend::srcn::kernel::getShape(_ctx.at(ker_index), model::Layout::UNKNOWN);
+
+ auto ofm_alloc = _tensor_builder->at(ofm_index);
+ auto ifm_alloc = _tensor_builder->at(ifm_index);
+ auto ker_alloc = _tensor_builder->at(ker_index);
+
+ auto fn = nnfw::cpp14::make_unique<::neurun::backend::srcn::kernel::TransposeConvLayer>();
+
+ fn->configure(ifm_alloc->buffer(), ifm_backend_shape, ker_alloc->buffer(), ker_backend_shape,
+ padding_type, padding.left, padding.right, padding.top, padding.bottom,
+ stride.horizontal, stride.vertical, ofm_alloc->buffer(), ofm_backend_shape);
+
+ _execution_builder->append(std::move(fn));
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/KernelGenerator.h b/runtimes/neurun/backend/srcn/KernelGenerator.h
new file mode 100644
index 000000000..f3e92e465
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/KernelGenerator.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_KERNEL_GENERATOR_H__
+#define __NEURUN_BACKEND_SRCN_KERNEL_GENERATOR_H__
+
+#include "backend/IKernelGenerator.h"
+#include "model/Operands.h"
+#include "operand/Tensor.h"
+#include "backend/CustomKernelRegistry.h"
+#include "TensorBuilder.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+class KernelGenerator : public IKernelGenerator
+{
+public:
+ KernelGenerator(const neurun::model::Operands &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder,
+ const std::shared_ptr<custom::KernelRegistry> &kernel_registry);
+
+ using IKernelGenerator::visit;
+
+ void visit(const model::Subgraph &) override;
+ void visit(const model::operation::TransposeConvNode &) override;
+
+private:
+ const neurun::model::Operands &_ctx;
+ std::shared_ptr<TensorBuilder> _tensor_builder;
+ std::shared_ptr<custom::KernelRegistry> _kernel_registry;
+ model::Layout _current_subg_layout;
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_KERNEL_GENERATOR_H__
diff --git a/runtimes/neurun/backend/srcn/MemoryManager.cc b/runtimes/neurun/backend/srcn/MemoryManager.cc
new file mode 100644
index 000000000..ad3f639f6
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/MemoryManager.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MemoryManager.h"
+
+#include <cassert>
+
+#include "MemoryPlannerFactory.h"
+#include <backend/operand/Object.h>
+#include "util/logging.h"
+#include "util/ConfigSource.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+MemoryManager::MemoryManager() : _mem_planner{createMemoryPlanner()}
+{
+ // DO NOTHING
+}
+
+IMemoryPlanner *MemoryManager::createMemoryPlanner()
+{
+ auto planner_id = util::getConfigString(util::config::CPU_MEMORY_PLANNER);
+ return MemoryPlannerFactory::instance().create(planner_id);
+}
+
+void MemoryManager::buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info)
+{
+ auto tensor = std::make_shared<operand::Tensor>(info);
+ _tensors[ind] = tensor;
+}
+
+void MemoryManager::claimPlan(const model::OperandIndex &ind, uint32_t size)
+{
+ _mem_planner->claim(ind, size);
+}
+
+void MemoryManager::releasePlan(const model::OperandIndex &ind) { _mem_planner->release(ind); }
+
+void MemoryManager::allocate(void)
+{
+ _mem_alloc = std::make_shared<Allocator>(_mem_planner->capacity());
+ assert(_mem_alloc->base());
+
+ for (auto &mem_plan : _mem_planner->memory_plans())
+ {
+ auto ind = mem_plan.first;
+ auto mem_blk = mem_plan.second;
+
+ uint8_t *buffer = _mem_alloc->base() + mem_blk.offset;
+ auto tensor = _tensors[ind];
+ tensor->setBuffer(buffer);
+
+ VERBOSE(CPU_MEMORYMANAGER) << "TENSOR(#" << ind.value() << "): " << static_cast<void *>(buffer)
+ << std::endl;
+
+ // If we do not make tensor here currently, kernel generation would cause segmentation fault.
+ // See also : Comments in `allocate` method.
+ }
+}
+
+std::shared_ptr<backend::operand::IObject> MemoryManager::wrapTensor(const model::OperandIndex &ind)
+{
+ if (_objects.find(ind) != _objects.end())
+ {
+ return _objects.at(ind);
+ }
+ else
+ {
+ return _objects[ind] = std::make_shared<::neurun::backend::operand::Object>(_tensors.at(ind));
+ }
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/MemoryManager.h b/runtimes/neurun/backend/srcn/MemoryManager.h
new file mode 100644
index 000000000..f0cf8f0ba
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/MemoryManager.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_MEMORY_MANAGER_H__
+#define __NEURUN_BACKEND_SRCN_MEMORY_MANAGER_H__
+
+#include "backend/IMemoryManager.h"
+#include "MemoryPlanner.h"
+#include "operand/Tensor.h"
+#include <backend/operand/Object.h>
+#include "model/OperandIndexMap.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+class MemoryManager : public backend::IMemoryManager
+{
+public:
+ MemoryManager();
+ virtual ~MemoryManager() = default;
+
+ void allocate(void) override;
+ void deallocate(void) override { _mem_alloc->release(); }
+
+ void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info);
+ void claimPlan(const model::OperandIndex &ind, uint32_t size);
+ void releasePlan(const model::OperandIndex &ind);
+
+ model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &tensors(void) { return _tensors; }
+
+ std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
+
+private:
+ IMemoryPlanner *createMemoryPlanner();
+
+private:
+ model::OperandIndexMap<std::shared_ptr<operand::Tensor>> _tensors;
+ model::OperandIndexMap<std::shared_ptr<::neurun::backend::operand::Object>> _objects;
+ model::OperandIndexMap<Block> _tensor_mem_map;
+ std::shared_ptr<IMemoryPlanner> _mem_planner;
+ std::shared_ptr<Allocator> _mem_alloc;
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_MEMORY_MANAGER_H__
diff --git a/runtimes/neurun/backend/srcn/MemoryPlanner.cc b/runtimes/neurun/backend/srcn/MemoryPlanner.cc
new file mode 100644
index 000000000..96ce27bd8
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/MemoryPlanner.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MemoryPlanner.h"
+#include "util/logging.h"
+#include <cassert>
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+Allocator::Allocator(uint32_t capacity)
+{
+ _base = nnfw::cpp14::make_unique<uint8_t[]>(capacity);
+
+ VERBOSE(ALLOC) << "allocation capacity: " << capacity << std::endl;
+ VERBOSE(ALLOC) << "base pointer: " << static_cast<void *>(_base.get()) << std::endl;
+}
+
+void BumpPlanner::claim(const model::OperandIndex &ind, size_t size)
+{
+ assert(size != 0);
+
+ Block blk{_capacity, size};
+ _mem_plans[ind] = blk;
+ _capacity += size;
+
+ VERBOSE(BP_PLANNER) << "CLAIM(#" << ind.value() << "): " << blk.offset << ", " << blk.size
+ << std::endl;
+}
+
+void BumpPlanner::release(const model::OperandIndex &ind)
+{
+ VERBOSE(BP_PLANNER) << "RELEASE(#" << ind.value() << "): "
+ << "NOTHING does" << std::endl;
+}
+
+// There are some assumptions for claiming memory(== making a reservation for memory).
+// 1. About _claim_table(std::map).
+// - The table's data structure is std::map so that it always sorts
+// value(model::OperandIndex) by key(base_offset).
+// - This claim() inserts key/value into _claim_table and the release() removes the key/value from
+// _claim_table.
+// - _claim_table shows the memory status at a certain point in time. Therefore,
+// - If _claim_table has an offset and a certain size at a certain point in time,
+// it means the place at the offset has been already claimed(== can't claim now. need to find
+// someplace new).
+// - If _claim_table doesn't have any element for an offset and a certain size at a certain
+// point in time, it means the place at the offset can be claimed.
+// 2. In the loop for _claim_table, we can assume the current claim_base_offset value is bigger than
+// the previous claim_base_offset.
+void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size)
+{
+ assert(size != 0);
+
+ // Find the right position for claiming
+ uint32_t next_offset = 0;
+ for (auto &mem_claim : _claim_table)
+ {
+ auto claimed_base_offset = mem_claim.first;
+ auto claimed_size = _mem_plans[mem_claim.second].size;
+ if (next_offset + size <= claimed_base_offset)
+ {
+ break;
+ }
+ else
+ {
+ next_offset = claimed_base_offset + claimed_size;
+ }
+ }
+
+ // Now next_offset is set to the proper offset
+ _claim_table[next_offset] = ind;
+ _mem_plans[ind] = {next_offset, size};
+
+ VERBOSE(FF_PLANNER) << "claim(#" << ind.value() << "): [+" << next_offset << ", " << size << "sz]"
+ << std::endl;
+
+ if (_capacity < next_offset + size)
+ {
+ _capacity = next_offset + size;
+ }
+}
+
+void FirstFitPlanner::release(const model::OperandIndex &ind)
+{
+ for (auto it = _claim_table.cbegin(); it != _claim_table.cend(); ++it)
+ {
+ if (it->second == ind)
+ {
+ uint32_t offset = it->first;
+ uint32_t index = ind.value();
+ uint32_t size = _mem_plans[ind].size;
+
+ _claim_table.erase(it);
+
+ VERBOSE(FF_PLANNER) << "release(#" << index << "): [+" << offset << ", " << size << "sz]"
+ << std::endl;
+ return;
+ }
+ }
+ assert(!"Cannot release for given index. It has been not claimed or released already.");
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/MemoryPlanner.h b/runtimes/neurun/backend/srcn/MemoryPlanner.h
new file mode 100644
index 000000000..c66efec2f
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/MemoryPlanner.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file        MemoryPlanner.h
+ * @brief       This file contains Memory Planning related classes
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_MEMORY_PLANNER_H__
+#define __NEURUN_BACKEND_SRCN_MEMORY_PLANNER_H__
+
+#include <map>
+#include <cpp14/memory.h>
+
+#include "model/OperandIndexMap.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+/**
+ * @brief Structure to have memory offset and size
+ */
+struct Block
+{
+ uint32_t offset;
+ size_t size;
+};
+
+/**
+ * @brief Class to allocate memory
+ */
+class Allocator
+{
+public:
+ Allocator(uint32_t capacity);
+ /**
+ * @brief Get memory base pointer
+ * @return base pointer
+ */
+ uint8_t *base() const { return _base.get(); }
+ void release() { _base.reset(); }
+
+private:
+ std::unique_ptr<uint8_t[]> _base;
+};
+
+/**
+ * @brief Interface to plan memory
+ */
+struct IMemoryPlanner
+{
+ using MemoryPlans = model::OperandIndexMap<Block>;
+
+ /**
+ * @brief Claim memory for operand
+ * @param[in] index The operand index
+ * @param[in] size The size of the memory
+ */
+ virtual void claim(const model::OperandIndex &, size_t) = 0;
+ /**
+ * @brief Release memory for operand
+ * @param[in] index The operand index
+ */
+ virtual void release(const model::OperandIndex &) = 0;
+ /**
+ * @brief Get capacity for memory planning
+ * @return The value of capacity
+ */
+ virtual uint32_t capacity() = 0;
+ /**
+ * @brief Get MemoryPlans
+ * @return MemoryPlans
+ */
+ virtual MemoryPlans &memory_plans() = 0;
+
+ virtual ~IMemoryPlanner() = default;
+};
+
+/**
+ * @brief Class to plan memory by bump way
+ */
+class BumpPlanner : public IMemoryPlanner
+{
+public:
+ /**
+ * @brief Claim memory for operand by bump way
+ * @param[in] index The operand index
+ * @param[in] size The size of the memory
+ */
+ void claim(const model::OperandIndex &, size_t) override;
+ /**
+ * @brief Release memory for operand by bump way
+ * @param[in] index The operand index
+ */
+ void release(const model::OperandIndex &) override;
+ /**
+ * @brief Get capacity for memory planning
+ * @return The value of capacity
+ */
+ uint32_t capacity() override { return _capacity; }
+ /**
+ * @brief Get MemoryPlans
+ * @return MemoryPlans
+ */
+ MemoryPlans &memory_plans() override { return _mem_plans; }
+
+private:
+ uint32_t _capacity = 0;
+ MemoryPlans _mem_plans;
+};
+
+/**
+ * @brief Class to plan memory by firstfit way
+ */
+class FirstFitPlanner : public IMemoryPlanner
+{
+public:
+ /**
+ * @brief Claim memory for operand by firstfit way
+ * @param[in] index The operand index
+ * @param[in] size The size of the memory
+ */
+ void claim(const model::OperandIndex &, size_t) override;
+ /**
+ * @brief Release memory for operand by firstfit way
+ * @param[in] index The operand index
+ */
+ void release(const model::OperandIndex &) override;
+ /**
+ * @brief Get capacity for memory planning
+ * @return The value of capacity
+ */
+ uint32_t capacity() override { return _capacity; }
+ /**
+ * @brief Get MemoryPlans
+ * @return MemoryPlans
+ */
+ MemoryPlans &memory_plans() override { return _mem_plans; }
+
+private:
+ uint32_t _capacity = 0;
+ MemoryPlans _mem_plans;
+ // Use std::map because claim() assumes that _claim_table is sorted by uint32_t(base_offset)
+ std::map<uint32_t, model::OperandIndex> _claim_table;
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_MEMORY_PLANNER_H__
diff --git a/runtimes/neurun/backend/srcn/MemoryPlannerFactory.cc b/runtimes/neurun/backend/srcn/MemoryPlannerFactory.cc
new file mode 100644
index 000000000..0029c38cf
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/MemoryPlannerFactory.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MemoryPlannerFactory.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+MemoryPlannerFactory &MemoryPlannerFactory::instance()
+{
+ static MemoryPlannerFactory instance;
+ return instance;
+}
+
+IMemoryPlanner *MemoryPlannerFactory::create(const std::string &key)
+{
+ if (key == "FirstFit")
+ {
+ return new FirstFitPlanner;
+ }
+ else if (key == "Bump")
+ {
+ return new BumpPlanner;
+ }
+ return new FirstFitPlanner; // Default Planner
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/MemoryPlannerFactory.h b/runtimes/neurun/backend/srcn/MemoryPlannerFactory.h
new file mode 100644
index 000000000..79cb264a6
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/MemoryPlannerFactory.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_MEMORY_PLANNER_FACTORY_H__
+#define __NEURUN_BACKEND_SRCN_MEMORY_PLANNER_FACTORY_H__
+
+#include "MemoryPlanner.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+class MemoryPlannerFactory
+{
+public:
+ static MemoryPlannerFactory &instance();
+
+private:
+ MemoryPlannerFactory() = default;
+
+public:
+ IMemoryPlanner *create(const std::string &key);
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_MEMORY_PLANNER_FACTORY_H__
diff --git a/runtimes/neurun/backend/srcn/PluginClassesAllocator.cc b/runtimes/neurun/backend/srcn/PluginClassesAllocator.cc
new file mode 100644
index 000000000..9efc6aaaa
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/PluginClassesAllocator.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <util/logging.h>
+
+#include "Backend.h"
+
+extern "C" {
+neurun::backend::Backend *neurun_backend_create()
+{
+ VERBOSE(neurun_backend_create) << "'srcn' loaded\n";
+ return new neurun::backend::srcn::Backend;
+}
+
+void neurun_backend_destroy(neurun::backend::Backend *backend)
+{
+ VERBOSE(neurun_backend_create) << "'srcn' unloaded\n";
+ delete backend;
+}
+}
diff --git a/runtimes/neurun/backend/srcn/ShapeFixer.cc b/runtimes/neurun/backend/srcn/ShapeFixer.cc
new file mode 100644
index 000000000..38f0d9252
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/ShapeFixer.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ShapeFixer.h"
+
+#include <stdexcept>
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+ShapeFixer::ShapeFixer(const neurun::model::Operands &operand_ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder)
+ : _ctx(operand_ctx), _tensor_builder(tensor_builder)
+{
+ assert(tensor_builder);
+}
+
+void ShapeFixer::visit(const model::operation::TransposeConvNode &) { /* DO NOTHING */}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/ShapeFixer.h b/runtimes/neurun/backend/srcn/ShapeFixer.h
new file mode 100644
index 000000000..c0a127a34
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/ShapeFixer.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_SHAPE_FIXER_H__
+#define __NEURUN_BACKEND_SRCN_SHAPE_FIXER_H__
+
+#include <backend/IShapeFixer.h>
+
+#include "model/Operands.h"
+#include "operand/Tensor.h"
+#include "TensorBuilder.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+class ShapeFixer : public IShapeFixer
+{
+public:
+ ShapeFixer(const neurun::model::Operands &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder);
+
+ std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
+
+ void visit(const model::operation::TransposeConvNode &) override;
+
+private:
+ const neurun::model::Operands &_ctx;
+ std::shared_ptr<TensorBuilder> _tensor_builder;
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_SHAPE_FIXER_H__
diff --git a/runtimes/neurun/backend/srcn/TensorBuilder.cc b/runtimes/neurun/backend/srcn/TensorBuilder.cc
new file mode 100644
index 000000000..52c11ad37
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/TensorBuilder.cc
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TensorBuilder.h"
+
+#include <cassert>
+
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()}
+{
+ // DO NOTHING
+}
+
+void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
+ const model::OperandInfo &info,
+ model::Layout /*frontend_layout*/,
+ model::Layout /*backend_layout*/, bool as_const)
+{
+ _tensor_info_map.emplace(ind, info);
+
+ // TODO set the layout
+
+ if (as_const)
+ _constants.append(ind);
+}
+
+void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &,
+ const compiler::SubTensorInfo &)
+{
+ // Not supported yet
+ assert(false);
+}
+
+void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind)
+{
+ assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
+ const auto &info = _tensor_info_map.at(ind);
+ const auto size = info.total_size();
+ _tensor_mgr->buildTensor(ind, info, _constants.contains(ind));
+ _tensor_mgr->claimPlan(ind, size);
+}
+
+void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); }
+
+void TensorBuilder::prepare(void)
+{
+ _tensor_mgr->allocateConsts();
+ _tensor_mgr->allocateNonconsts();
+}
+
+// TODO Remove this
+void TensorBuilder::allocate(void)
+{
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
+ // This is because SRCN kernels require `ITensor`s to be allocated before Kernel Generation.
+}
+
+void TensorBuilder::allocateConsts()
+{
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
+ // This is because SRCN kernels require `ITensor`s to be allocated before Kernel Generation.
+}
+
+void TensorBuilder::allocateNonconsts()
+{
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
+ // This is because SRCN kernels require `ITensor`s to be allocated before Kernel Generation.
+}
+
+std::shared_ptr<::neurun::backend::operand::ITensor>
+TensorBuilder::tensorAt(const model::OperandIndex &ind)
+{
+ return _tensor_mgr->at(ind);
+}
+
+std::shared_ptr<backend::operand::IObject> TensorBuilder::wrapTensor(const model::OperandIndex &ind)
+{
+ return _tensor_mgr->wrapTensor(ind);
+}
+
+void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); }
+
+std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::OperandIndex &ind)
+{
+ return _tensor_mgr->at(ind);
+}
+
+std::unique_ptr<ITensorManager> TensorBuilder::releaseTensorManager(void)
+{
+ return std::move(_tensor_mgr);
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/TensorBuilder.h b/runtimes/neurun/backend/srcn/TensorBuilder.h
new file mode 100644
index 000000000..98b45b64f
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/TensorBuilder.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_TENSOR_BUILDER_H__
+#define __NEURUN_BACKEND_SRCN_TENSOR_BUILDER_H__
+
+#include <unordered_map>
+
+#include <backend/ITensorBuilder.h>
+#include <backend/operand/Object.h>
+#include "operand/Tensor.h"
+#include "model/OperandIndexMap.h"
+#include "TensorManager.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+class TensorBuilder : public ITensorBuilder
+{
+public:
+ TensorBuilder();
+
+ /**
+ * @brief Register tensor information to allocate on CPU backend
+ * @param[in] ind Operand index
+ * @param[in] info Operand information
+ * @param[in] layout Operand data layout
+ */
+ void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
+ model::Layout frontend_layout, model::Layout backend_layout,
+ bool as_const) override;
+ /**
+ * @brief Register subtensor information to allocate on CPU backend
+ * @param[in] ind Operand index
+ * @param[in] info Tensor information
+ */
+ void registerSubTensorInfo(const model::OperandIndex &ind,
+ const compiler::SubTensorInfo &info) override;
+
+ void notifyFirstUse(const model::OperandIndex &) override;
+ void notifyLastUse(const model::OperandIndex &) override;
+
+ void prepare(void) override;
+ void allocate(void) override; // TODO Remove this
+ void allocateConsts() override;
+ void allocateNonconsts() override;
+ void postFunctionPrepare() override { /* DO NOTHING */}
+ void finalize() override { /* DO NOTHING */}
+
+ std::shared_ptr<::neurun::backend::operand::ITensor>
+ tensorAt(const model::OperandIndex &ind) override;
+
+ std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) override;
+
+ void iterate(const IterateFunction &fn) override;
+
+ void preVisit(const model::Operation &) override { /* DO NOTHING */}
+ void postVisit(const model::Operation &) override { /* DO NOTHING */}
+
+ std::unique_ptr<ITensorManager> releaseTensorManager(void) override;
+
+ std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
+
+private:
+ std::unique_ptr<TensorManager> _tensor_mgr;
+ model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
+ model::OperandIndexMap<std::pair<model::Layout, model::Layout>> _tensor_layouts_map;
+ model::OperandIndexSequence _constants;
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_TENSOR_BUILDER_H__
diff --git a/runtimes/neurun/backend/srcn/TensorManager.cc b/runtimes/neurun/backend/srcn/TensorManager.cc
new file mode 100644
index 000000000..d0c80273e
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/TensorManager.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TensorManager.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+TensorManager::TensorManager() : _const_mgr{new MemoryManager()}, _nonconst_mgr{new MemoryManager()}
+{
+ // DO NOTHING
+}
+
+void TensorManager::allocateConsts(void) { _const_mgr->allocate(); }
+
+void TensorManager::allocateNonconsts(void) { _nonconst_mgr->allocate(); }
+
+void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); }
+
+void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); }
+
+void TensorManager::buildTensor(const model::OperandIndex &ind,
+ const model::OperandInfo &tensor_info, bool as_const)
+{
+ assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
+ if (as_const)
+ {
+ _const_mgr->buildTensor(ind, tensor_info);
+ _ind_to_mgr.insert({ind, *_const_mgr});
+ }
+ else
+ {
+ _nonconst_mgr->buildTensor(ind, tensor_info);
+ _ind_to_mgr.insert({ind, *_nonconst_mgr});
+ }
+}
+
+void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ _ind_to_mgr.at(ind).claimPlan(ind, size);
+}
+
+void TensorManager::releasePlan(const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ _ind_to_mgr.at(ind).releasePlan(ind);
+}
+
+std::shared_ptr<backend::operand::IObject> TensorManager::wrapTensor(const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ return _ind_to_mgr.at(ind).wrapTensor(ind);
+}
+
+std::shared_ptr<operand::Tensor> TensorManager::at(const ::neurun::model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ return _ind_to_mgr.at(ind).tensors().at(ind);
+}
+
+model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::constTensors(void)
+{
+ return _const_mgr->tensors();
+}
+
+model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::nonconstTensors(void)
+{
+ return _nonconst_mgr->tensors();
+}
+
+void TensorManager::iterate(const std::function<void(const model::OperandIndex &)> &fn)
+{
+ for (auto it : _nonconst_mgr->tensors())
+ fn(it.first);
+
+ for (auto it : _const_mgr->tensors())
+ fn(it.first);
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/TensorManager.h b/runtimes/neurun/backend/srcn/TensorManager.h
new file mode 100644
index 000000000..61a10d255
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/TensorManager.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_TENSOR_MANAGER_H__
+#define __NEURUN_BACKEND_SRCN_TENSOR_MANAGER_H__
+
+#include "backend/ITensorManager.h"
+#include "MemoryManager.h"
+#include "model/OperandIndexMap.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+class TensorManager : public backend::ITensorManager
+{
+public:
+ TensorManager();
+ virtual ~TensorManager() = default;
+
+ void allocateConsts(void) override;
+ void allocateNonconsts(void) override;
+ void deallocateConsts(void) override;
+ void deallocateNonconsts(void) override;
+
+ void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info,
+ bool as_const);
+
+ void claimPlan(const model::OperandIndex &ind, uint32_t size);
+ void releasePlan(const model::OperandIndex &ind);
+
+ std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
+ std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
+
+ model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &constTensors(void);
+ model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &nonconstTensors(void);
+
+ void iterate(const std::function<void(const model::OperandIndex &)> &fn);
+
+private:
+ std::unique_ptr<MemoryManager> _const_mgr;
+ std::unique_ptr<MemoryManager> _nonconst_mgr;
+ model::OperandIndexMap<MemoryManager &> _ind_to_mgr;
+};
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_TENSOR_MANAGER_H__
diff --git a/runtimes/neurun/backend/srcn/kernel/OperationUtils.cc b/runtimes/neurun/backend/srcn/kernel/OperationUtils.cc
new file mode 100644
index 000000000..0df0f7b33
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/kernel/OperationUtils.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperationUtils.h"
+
+#include <cmath>
+#include <algorithm>
+#include <cassert>
+
+#include "util/Utils.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+namespace kernel
+{
+
+uint32_t MatchingDim(const Shape &shape1, int index1, const Shape &shape2, int index2)
+{
+ UNUSED_RELEASE(shape2);
+ UNUSED_RELEASE(index2);
+ assert(shape1.dimensions[index1] == shape2.dimensions[index2]);
+ return shape1.dimensions[index1];
+}
+
+Coordinates convertCoordinates(const Coordinates &from_coordinates, FilterLayout from_layout,
+ FilterLayout to_layout)
+{
+ assert(from_coordinates.size() == 4);
+ Coordinates to{from_coordinates};
+ if (from_layout == FilterLayout::OHWI && to_layout == FilterLayout::HWOI)
+ {
+ to.set(0, from_coordinates[1]);
+ to.set(1, from_coordinates[2]);
+ to.set(2, from_coordinates[0]);
+ to.set(3, from_coordinates[3]);
+ }
+ else
+ {
+ throw std::runtime_error{"NYI"};
+ }
+
+ return to;
+}
+
+Shape getShape(const ::neurun::model::Operand &o, ::neurun::model::Layout frontend_layout)
+{
+ Shape shape;
+
+ auto dims = o.shape().dims();
+ if (frontend_layout == ::neurun::model::Layout::NCHW && o.shape().rank() == 4)
+ {
+ // NCHW -> NHWC
+ uint32_t permutation[4] = {0, 2, 3, 1};
+ for (int i = 0; i < o.shape().rank(); ++i)
+ {
+ dims.at(i) = o.shape().dim(permutation[i]);
+ }
+ }
+ shape.dimensions = std::vector<uint32_t>(dims.begin(), dims.end());
+ shape.type = static_cast<OperandType>(static_cast<int32_t>(o.typeInfo().type()));
+ shape.scale = o.typeInfo().scale();
+ shape.offset = o.typeInfo().offset();
+
+ // CPU backend assume that neurun internal shape's rank is always same or less than 4
+ assert(shape.dimensions.size() <= 4);
+
+ return shape;
+}
+
+} // namespace kernel
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/kernel/OperationUtils.h b/runtimes/neurun/backend/srcn/kernel/OperationUtils.h
new file mode 100644
index 000000000..e9c833565
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/kernel/OperationUtils.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_OPERATION_UTILS_H__
+#define __NEURUN_BACKEND_SRCN_OPERATION_UTILS_H__
+
+#include <iostream>
+#include <limits>
+#include <vector>
+
+#include "model/Operand.h"
+#include "model/DataType.h"
+#include <model/InternalType.h>
+
+using OperandType = neurun::model::DataType;
+using neurun::util::Coordinates;
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+namespace kernel
+{
+
+struct Shape
+{
+ OperandType type;
+ std::vector<uint32_t> dimensions;
+ float scale;
+ int32_t offset;
+};
+
+union DataPtr {
+ uint8_t *u8;
+ int8_t *i8;
+ int32_t *i32;
+ float *f;
+ void *v;
+};
+
+enum FilterLayout
+{
+ OHWI = 0, // TfLite Kernel Layout when using NHWC image layout
+ HWOI, // SRCN Transpose Conv Kernel Layout when using NHWC image layout
+ OIHW, // SRCN Transpose Conv Kernel Layout when using NCHW image layout
+};
+
+uint32_t MatchingDim(const Shape &shape1, int index1, const Shape &shape2, int index2);
+
+Coordinates convertCoordinates(const Coordinates &from_coordinates, FilterLayout from_layout,
+ FilterLayout to_layout);
+
+Shape getShape(const ::neurun::model::Operand &o, ::neurun::model::Layout frontend_layout);
+
+} // namespace kernel
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_OPERATION_UTILS_H__
diff --git a/runtimes/neurun/backend/srcn/kernel/TransposeConvLayer.cc b/runtimes/neurun/backend/srcn/kernel/TransposeConvLayer.cc
new file mode 100644
index 000000000..59332ab6d
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/kernel/TransposeConvLayer.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TransposeConvLayer.h"
+
+#include "OperationUtils.h"
+#include "srcn/srcn_conv.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+namespace kernel
+{
+
+TransposeConvLayer::TransposeConvLayer()
+ : _inputData(), _kernelData(), _outputData(), _inputShape(), _kernelShape(), _outputShape(),
+ _paddingType(0), _paddingLeft(0), _paddingTop(0), _paddingRight(0), _paddingBottom(0),
+ _strideWidth(0), _strideHeight(0), _inputType(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
+void TransposeConvLayer::convFloat32()
+{
+ nnfw::srcn::convMat_t in_mat, out_mat, kernel_mat;
+ nnfw::srcn::convParams_t in_param;
+
+ const int batches = MatchingDim(_inputShape, 0, _outputShape, 0);
+ const int input_height = _inputShape.dimensions[1];
+ const int input_width = _inputShape.dimensions[2];
+ const int input_depth = MatchingDim(_inputShape, 3, _kernelShape, 3);
+ in_mat.c = input_depth;
+ in_mat.w = input_width;
+ in_mat.h = input_height;
+ in_mat.n = batches;
+ in_mat.data = _inputData.f;
+
+ const int output_height = _outputShape.dimensions[1];
+ const int output_width = _outputShape.dimensions[2];
+ const int output_depth = MatchingDim(_kernelShape, 0, _outputShape, 3);
+ out_mat.c = output_depth;
+ out_mat.w = output_width;
+ out_mat.h = output_height;
+ out_mat.n = batches;
+ out_mat.data = _outputData.f;
+
+ const int ker_height = _kernelShape.dimensions[1];
+ const int ker_width = _kernelShape.dimensions[2];
+ kernel_mat.c = output_depth;
+ kernel_mat.w = ker_width;
+ kernel_mat.h = ker_height;
+ kernel_mat.n = input_depth;
+ kernel_mat.data = _kernelData.f;
+
+ in_param.kernel_w = ker_width;
+ in_param.kernel_h = ker_height;
+ in_param.stride_w = _strideWidth;
+ in_param.stride_h = _strideHeight;
+ in_param.padding = _paddingType;
+ in_param.pad_w = _paddingLeft;
+ in_param.pad_h = _paddingTop;
+ in_param.dilation_w = 1;
+ in_param.dilation_h = 1;
+
+ nnfw::srcn::srcn_deconvolution2D(in_mat, kernel_mat, out_mat, in_param, 4, nnfw::srcn::col_major);
+}
+
+void TransposeConvLayer::configure(uint8_t *inputData, const Shape inputShape, uint8_t *kernelData,
+ const Shape kernelShape, const uint32_t paddingType,
+ const uint32_t paddingLeft, const uint32_t paddingRight,
+ const uint32_t paddingTop, const uint32_t paddingBottom,
+ const uint32_t strideWidth, const uint32_t strideHeight,
+ uint8_t *outputData, const Shape outputShape)
+{
+ _inputData.u8 = inputData;
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
+ _kernelData.u8 = kernelData;
+ _kernelShape = kernelShape;
+ _paddingType = paddingType;
+ _paddingLeft = paddingLeft;
+ _paddingRight = paddingRight;
+ _paddingTop = paddingTop;
+ _paddingBottom = paddingBottom;
+ _strideWidth = strideWidth;
+ _strideHeight = strideHeight;
+ _outputData.u8 = outputData;
+ _outputShape = outputShape;
+}
+
+void TransposeConvLayer::run()
+{
+ if (_inputType == OperandType::FLOAT32)
+ {
+ convFloat32();
+ }
+ else if (_inputType == OperandType::QUANT8_ASYMM)
+ {
+ throw std::runtime_error("NYI");
+ }
+}
+
+} // namespace kernel
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/kernel/TransposeConvLayer.h b/runtimes/neurun/backend/srcn/kernel/TransposeConvLayer.h
new file mode 100644
index 000000000..db9006c22
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/kernel/TransposeConvLayer.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_KERNEL_TRANSPOSECONV_LAYER_H__
+#define __NEURUN_BACKEND_SRCN_KERNEL_TRANSPOSECONV_LAYER_H__
+
+#include <exec/IFunction.h>
+
+#include "OperationUtils.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+namespace kernel
+{
+
+class TransposeConvLayer : public ::neurun::exec::IFunction
+{
+public:
+ TransposeConvLayer();
+
+public:
+ void convFloat32();
+ void configure(uint8_t *inputData, const Shape inputShape, uint8_t *kernelData,
+ const Shape kernelShape, const uint32_t paddingType, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop,
+ const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
+ uint8_t *outputData, const Shape outputShape);
+
+ void run();
+ void runSync()
+ {
+ // this abstract method is used just for profiling and called for
+ // backend::acl_common::AclFunction
+ run();
+ }
+
+private:
+ DataPtr _inputData;
+ DataPtr _kernelData;
+ DataPtr _outputData;
+
+ Shape _inputShape;
+ Shape _kernelShape;
+ Shape _outputShape;
+
+ uint32_t _paddingType;
+ uint32_t _paddingLeft;
+ uint32_t _paddingTop;
+ uint32_t _paddingRight;
+ uint32_t _paddingBottom;
+
+ uint32_t _strideWidth;
+ uint32_t _strideHeight;
+
+ OperandType _inputType;
+};
+
+} // namespace kernel
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_KERNEL_TRANSPOSECONV_LAYER_H__
diff --git a/runtimes/neurun/backend/srcn/operand/Tensor.cc b/runtimes/neurun/backend/srcn/operand/Tensor.cc
new file mode 100644
index 000000000..ef5f67512
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/operand/Tensor.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tensor.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+namespace operand
+{
+
+size_t Tensor::calcOffset(const neurun::util::Coordinates &coords) const
+{
+ size_t rank = num_dimensions();
+ size_t offset = 0;
+ for (size_t i = 0; i < rank; ++i)
+ {
+ offset = offset * dimension(i) + coords[i];
+ }
+ offset *= sizeOfDataType(data_type());
+ return offset;
+}
+
+} // namespace operand
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/srcn/operand/Tensor.h b/runtimes/neurun/backend/srcn/operand/Tensor.h
new file mode 100644
index 000000000..762f73837
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/operand/Tensor.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_OPERAND_TENSOR_H__
+#define __NEURUN_BACKEND_SRCN_OPERAND_TENSOR_H__
+
+#include <backend/operand/ITensor.h>
+#include "model/OperandInfo.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+namespace operand
+{
+
+class Tensor : public ::neurun::backend::operand::ITensor
+{
+public:
+ Tensor() = delete;
+
+public:
+ Tensor(const model::OperandInfo &info) : _info(info)
+ {
+ // DO NOTHING
+ }
+
+public:
+ void setBuffer(uint8_t *buffer) { _buffer = buffer; }
+ ::neurun::model::DataType data_type() const { return _info.typeInfo().type(); }
+
+public:
+ uint8_t *buffer() const override { return _buffer; }
+ /**
+ * @brief Get dimension by index
+ *
+ * @param index Index to get diemension
+ * @return size_t Dimension at index
+ * @note N : dimension(0)
+ * H : dimension(1)
+ * W : dimension(2)
+ * C : dimension(3)
+ */
+ size_t dimension(size_t index) const override { return _info.shape().dim(index); }
+ size_t num_dimensions() const override { return _info.shape().rank(); }
+ size_t total_size() const override { return _info.total_size(); }
+ size_t calcOffset(const neurun::util::Coordinates &coords) const override;
+ model::Layout layout() const override { return model::Layout::NHWC; }
+ bool has_padding() const override { return false; }
+
+private:
+ model::OperandInfo _info;
+ uint8_t *_buffer = nullptr;
+};
+
+} // namespace operand
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_OPERAND_TENSOR_H__