summaryrefslogtreecommitdiff
path: root/runtime/neurun/backend/srcn/KernelGenerator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/neurun/backend/srcn/KernelGenerator.cc')
-rw-r--r--runtime/neurun/backend/srcn/KernelGenerator.cc275
1 files changed, 0 insertions, 275 deletions
diff --git a/runtime/neurun/backend/srcn/KernelGenerator.cc b/runtime/neurun/backend/srcn/KernelGenerator.cc
deleted file mode 100644
index c096f9230..000000000
--- a/runtime/neurun/backend/srcn/KernelGenerator.cc
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "KernelGenerator.h"
-
-#include <stdexcept>
-
-#include "cpp14/memory.h"
-#include "util/Padding.h"
-#include "kernel/ConvolutionLayer.h"
-#include "kernel/DepthwiseConvolutionLayer.h"
-#include "kernel/InstanceNormLayer.h"
-#include "kernel/TransposeConvLayer.h"
-#include "kernel/AddLayer.h"
-
-#include <backend/Backend.h>
-#include <backend/IConfig.h>
-
-#include "util/logging.h"
-
-#include "util/Utils.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace srcn
-{
-
-KernelGenerator::KernelGenerator(const ir::Operands &operand_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<custom::IKernelBuilder> &kb)
- : _ctx(operand_ctx), _tensor_builder(tensor_builder), _kernel_builder(kb),
- _current_subg_layout(ir::Layout::UNKNOWN)
-{
- // DO NOTHING
-}
-
-void KernelGenerator::visit(const ir::OpSequence &op_seq)
-{
- _current_subg_layout = op_seq.getLayout();
- for (const auto &e : op_seq.operations())
- {
- const auto &node = *(e.node);
- _tensor_builder->preVisit(node);
- node.accept(*this);
- _tensor_builder->postVisit(node);
- }
-}
-
-void KernelGenerator::visit(const ir::operation::Conv2D &node)
-{
- using ir::operation::Conv2D;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(Conv2D::Input::INPUT)};
- const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)};
- const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)};
-
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_subg_layout);
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_subg_layout);
- // Kernel format is [depth_out, kernel_height, kernel_width, depth_in] if NHWC and [depth_out,
- // depth_in, kernel_height, kernel_width] if NCHW.
- const auto &ker_shape = _ctx.at(ker_index).shape().asFeature(_current_subg_layout);
- const auto ker_height = ker_shape.H;
- const auto ker_width = ker_shape.W;
- const auto stride = node.param().stride;
- const auto padding = neurun::util::calculatePadding(node.param().padding, ifm_shape, ofm_shape,
- stride, ker_width, ker_height);
- const int has_padding = padding.left + padding.right + padding.top + padding.bottom;
-
- auto ofm_alloc = _tensor_builder->at(ofm_index);
- auto ifm_alloc = _tensor_builder->at(ifm_index);
- auto ker_alloc = _tensor_builder->at(ker_index);
- auto bias_alloc = _tensor_builder->at(bias_index);
- const auto backend_layout = ifm_alloc->layout();
- assert(backend_layout == ofm_alloc->layout());
-
- const auto ofm_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ofm_index), _current_subg_layout, backend_layout);
- const auto ifm_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ifm_index), _current_subg_layout, backend_layout);
- const auto ker_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ker_index), _current_subg_layout, backend_layout);
- const auto bias_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(bias_index), _current_subg_layout, backend_layout);
-
- auto fn = nnfw::cpp14::make_unique<::neurun::backend::srcn::kernel::ConvolutionLayer>();
-
- // TODO Support activation
- fn->configure(ifm_alloc->buffer(), ifm_backend_descr, ker_alloc->buffer(), ker_backend_descr,
- bias_alloc->buffer(), bias_backend_descr, has_padding, padding.left, padding.right,
- padding.top, padding.bottom, stride.horizontal, stride.vertical,
- /*activation,*/ ofm_alloc->buffer(), ofm_backend_descr, backend_layout);
-
- _execution_builder->append(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::InstanceNorm &node)
-{
- using ir::operation::InstanceNorm;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(InstanceNorm::Input::INPUT)};
- const auto gamma_index{node.getInputs().at(InstanceNorm::Input::GAMMA)};
- const auto beta_index{node.getInputs().at(InstanceNorm::Input::BETA)};
-
- const auto epsilon = node.param().epsilon;
- const auto activation = node.param().activation;
-
- auto ofm_alloc = _tensor_builder->at(ofm_index);
- auto ifm_alloc = _tensor_builder->at(ifm_index);
- auto gamma_alloc = _tensor_builder->at(gamma_index);
- auto beta_alloc = _tensor_builder->at(beta_index);
-
- const auto backend_layout = ofm_alloc->layout();
-
- const auto ofm_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ofm_index), _current_subg_layout, backend_layout);
- const auto ifm_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ifm_index), _current_subg_layout, backend_layout);
- const auto gamma_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(gamma_index), _current_subg_layout, backend_layout);
- const auto beta_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(beta_index), _current_subg_layout, backend_layout);
-
- auto fn = nnfw::cpp14::make_unique<::neurun::backend::srcn::kernel::InstanceNormLayer>();
-
- fn->configure(ifm_alloc->buffer(), ifm_backend_descr, gamma_alloc->buffer(), gamma_backend_descr,
- beta_alloc->buffer(), beta_backend_descr, ofm_alloc->buffer(), ofm_backend_descr,
- epsilon, activation, backend_layout);
-
- _execution_builder->append(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node)
-{
- using ir::operation::DepthwiseConv2D;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(DepthwiseConv2D::Input::INPUT)};
- const auto ker_index{node.getInputs().at(DepthwiseConv2D::Input::KERNEL)};
- const auto bias_index{node.getInputs().at(DepthwiseConv2D::Input::BIAS)};
-
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_subg_layout);
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_subg_layout);
- // Kernel format is [depth_out, kernel_height, kernel_width, depth_in] if NHWC and [depth_out,
- // depth_in, kernel_height, kernel_width] if NCHW.
- const auto &ker_shape = _ctx.at(ker_index).shape().asFeature(_current_subg_layout);
- const auto ker_height = ker_shape.H;
- const auto ker_width = ker_shape.W;
- const auto stride = node.param().stride;
- const auto padding = neurun::util::calculatePadding(node.param().padding, ifm_shape, ofm_shape,
- stride, ker_width, ker_height);
- const int padding_type = padding.left + padding.right + padding.top + padding.bottom;
-
- auto ofm_alloc = _tensor_builder->at(ofm_index);
- auto ifm_alloc = _tensor_builder->at(ifm_index);
- auto ker_alloc = _tensor_builder->at(ker_index);
- auto bias_alloc = _tensor_builder->at(bias_index);
- const auto backend_layout = ifm_alloc->layout();
- assert(backend_layout == ofm_alloc->layout());
-
- const auto ofm_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ofm_index), _current_subg_layout, backend_layout);
- const auto ifm_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ifm_index), _current_subg_layout, backend_layout);
- const auto ker_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ker_index), _current_subg_layout, backend_layout);
- const auto bias_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(bias_index), _current_subg_layout, backend_layout);
-
- auto fn = nnfw::cpp14::make_unique<::neurun::backend::srcn::kernel::DepthwiseConvolutionLayer>();
-
- // TODO Support activation
- fn->configure(ifm_alloc->buffer(), ifm_backend_descr, ker_alloc->buffer(), ker_backend_descr,
- bias_alloc->buffer(), bias_backend_descr, padding_type, padding.left, padding.right,
- padding.top, padding.bottom, stride.horizontal, stride.vertical,
- /*activation,*/ ofm_alloc->buffer(), ofm_backend_descr, backend_layout);
-
- _execution_builder->append(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::TransposeConv &node)
-{
- using ir::operation::TransposeConv;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(TransposeConv::Input::INPUT)};
- const auto ker_index{node.getInputs().at(TransposeConv::Input::KERNEL)};
- const auto output_shape_index{node.getInputs().at(TransposeConv::Input::OUTPUT_SHAPE)};
-
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_subg_layout);
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_subg_layout);
- // Kernel format is [depth_out, kernel_height, kernel_width, depth_in] if NHWC and [depth_out,
- // depth_in, kernel_height, kernel_width] if NCHW.
- const auto &ker_shape = _ctx.at(ker_index).shape().asFeature(_current_subg_layout);
- const auto ker_height = ker_shape.H;
- const auto ker_width = ker_shape.W;
- const auto stride = node.param().stride;
- const int padding_type = (node.param().padding.type == ir::PaddingType::SAME);
- const auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape,
- stride, ker_width, ker_height);
-
- auto ofm_alloc = _tensor_builder->at(ofm_index);
- auto ifm_alloc = _tensor_builder->at(ifm_index);
- auto ker_alloc = _tensor_builder->at(ker_index);
- const auto backend_layout = ofm_alloc->layout();
- assert(backend_layout == ifm_alloc->layout());
-
- const auto ofm_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ofm_index), _current_subg_layout, backend_layout);
- const auto ifm_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ifm_index), _current_subg_layout, backend_layout);
- const auto ker_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ker_index), _current_subg_layout, backend_layout);
-
- auto fn = nnfw::cpp14::make_unique<::neurun::backend::srcn::kernel::TransposeConvLayer>();
-
- fn->configure(ifm_alloc->buffer(), ifm_backend_descr, ker_alloc->buffer(), ker_backend_descr,
- padding_type, padding.left, padding.right, padding.top, padding.bottom,
- stride.horizontal, stride.vertical, ofm_alloc->buffer(), ofm_backend_descr,
- backend_layout);
-
- _execution_builder->append(std::move(fn));
-}
-
-void KernelGenerator::visit(const ir::operation::Add &node)
-{
- using ir::operation::Add;
-
- const auto ofm_index{node.getOutputs().at(0)};
- const auto lhs_index{node.getInputs().at(Add::Input::LHS)};
- const auto rhs_index{node.getInputs().at(Add::Input::RHS)};
-
- const auto activation = node.param().activation;
-
- auto ofm_alloc = _tensor_builder->at(ofm_index).get();
- auto lhs_alloc = _tensor_builder->at(lhs_index).get();
- auto rhs_alloc = _tensor_builder->at(rhs_index).get();
-
- const auto backend_layout = ofm_alloc->layout();
- assert(backend_layout == lhs_alloc->layout() && backend_layout == rhs_alloc->layout());
-
- const auto ofm_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(ofm_index), _current_subg_layout, backend_layout);
- const auto lhs_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(lhs_index), _current_subg_layout, backend_layout);
- const auto rhs_backend_descr = ::neurun::backend::srcn::kernel::getTensorDescriptor(
- _ctx.at(rhs_index), _current_subg_layout, backend_layout);
-
- auto fn = nnfw::cpp14::make_unique<::neurun::backend::srcn::kernel::AddLayer>();
-
- fn->configure(lhs_alloc->buffer(), lhs_backend_descr, rhs_alloc->buffer(), rhs_backend_descr,
- activation, ofm_alloc->buffer(), ofm_backend_descr, backend_layout);
-
- _execution_builder->append(std::move(fn));
-}
-
-} // namespace srcn
-} // namespace backend
-} // namespace neurun