summaryrefslogtreecommitdiff
path: root/compiler/mir-onnx-importer
diff options
context:
space:
mode:
authorПавел Ильютченко/AI Tools Lab /SRR/Engineer/삼성전자 <p.iliutchenk@samsung.com>2019-08-06 19:48:45 +0300
committerAlexander Efimov/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>2019-08-06 19:48:45 +0300
commitd4dc7c4898e5aadd762cd30b0b0c55ede98e2daf (patch)
treefa268130fde692846953a008ebdd06258588aefc /compiler/mir-onnx-importer
parent94012f41df74ef1fff4bb06d318980018837469d (diff)
downloadnnfw-d4dc7c4898e5aadd762cd30b0b0c55ede98e2daf.tar.gz
nnfw-d4dc7c4898e5aadd762cd30b0b0c55ede98e2daf.tar.bz2
nnfw-d4dc7c4898e5aadd762cd30b0b0c55ede98e2daf.zip
[mir_onnx] Add NodeConverters for all supported operations from OpCreator (#6291)
* Added OpConverters * Register all converters * Remove OpCreator Signed-off-by: Pavel Iliutchenko <p.iliutchenk@samsung.com>
Diffstat (limited to 'compiler/mir-onnx-importer')
-rw-r--r--compiler/mir-onnx-importer/CMakeLists.txt52
-rw-r--r--compiler/mir-onnx-importer/ONNXOpCreator.cpp505
-rw-r--r--compiler/mir-onnx-importer/ONNXOpCreator.h119
-rw-r--r--compiler/mir-onnx-importer/ONNXOpRegistration.h52
-rw-r--r--compiler/mir-onnx-importer/Op/Add.cpp36
-rw-r--r--compiler/mir-onnx-importer/Op/Add.h30
-rw-r--r--compiler/mir-onnx-importer/Op/AveragePool.cpp49
-rw-r--r--compiler/mir-onnx-importer/Op/AveragePool.h30
-rw-r--r--compiler/mir-onnx-importer/Op/BatchNormalization.cpp73
-rw-r--r--compiler/mir-onnx-importer/Op/BatchNormalization.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Concat.cpp40
-rw-r--r--compiler/mir-onnx-importer/Op/Concat.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Constant.cpp45
-rw-r--r--compiler/mir-onnx-importer/Op/Constant.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Conv.cpp83
-rw-r--r--compiler/mir-onnx-importer/Op/Conv.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Dropout.cpp39
-rw-r--r--compiler/mir-onnx-importer/Op/Dropout.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Gather.cpp39
-rw-r--r--compiler/mir-onnx-importer/Op/Gather.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Gemm.cpp101
-rw-r--r--compiler/mir-onnx-importer/Op/Gemm.h30
-rw-r--r--compiler/mir-onnx-importer/Op/GivenTensorFill.cpp47
-rw-r--r--compiler/mir-onnx-importer/Op/GivenTensorFill.h30
-rw-r--r--compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp49
-rw-r--r--compiler/mir-onnx-importer/Op/GlobalAveragePool.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Max.cpp36
-rw-r--r--compiler/mir-onnx-importer/Op/Max.h30
-rw-r--r--compiler/mir-onnx-importer/Op/MaxPool.cpp49
-rw-r--r--compiler/mir-onnx-importer/Op/MaxPool.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Mul.cpp36
-rw-r--r--compiler/mir-onnx-importer/Op/Mul.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Pad.cpp56
-rw-r--r--compiler/mir-onnx-importer/Op/Pad.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Relu.cpp36
-rw-r--r--compiler/mir-onnx-importer/Op/Relu.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Reshape.cpp67
-rw-r--r--compiler/mir-onnx-importer/Op/Reshape.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Scale.cpp43
-rw-r--r--compiler/mir-onnx-importer/Op/Scale.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Shape.cpp46
-rw-r--r--compiler/mir-onnx-importer/Op/Shape.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Sigmoid.cpp36
-rw-r--r--compiler/mir-onnx-importer/Op/Sigmoid.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Softmax.cpp39
-rw-r--r--compiler/mir-onnx-importer/Op/Softmax.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Sum.cpp36
-rw-r--r--compiler/mir-onnx-importer/Op/Sum.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Unsqueeze.cpp55
-rw-r--r--compiler/mir-onnx-importer/Op/Unsqueeze.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Upsample.cpp62
-rw-r--r--compiler/mir-onnx-importer/Op/Upsample.h30
52 files changed, 2018 insertions, 628 deletions
diff --git a/compiler/mir-onnx-importer/CMakeLists.txt b/compiler/mir-onnx-importer/CMakeLists.txt
index 1f2d03e26..4cc01d0ba 100644
--- a/compiler/mir-onnx-importer/CMakeLists.txt
+++ b/compiler/mir-onnx-importer/CMakeLists.txt
@@ -10,9 +10,55 @@ set(MIR_ONNX_IMPORTER_SOURCES
ONNXImporterImpl.cpp
ONNXImporterImpl.h
ONNXNodeConverterRegistry.h
- ONNXOpCreator.cpp
- ONNXOpCreator.h
- ONNXOpRegistration.h)
+ ONNXOpRegistration.h
+ Op/Add.cpp
+ Op/Add.h
+ Op/AveragePool.cpp
+ Op/AveragePool.h
+ Op/BatchNormalization.cpp
+ Op/BatchNormalization.h
+ Op/Concat.cpp
+ Op/Concat.h
+ Op/Constant.cpp
+ Op/Constant.h
+ Op/Conv.cpp
+ Op/Conv.h
+ Op/Dropout.cpp
+ Op/Dropout.h
+ Op/Gather.cpp
+ Op/Gather.h
+ Op/Gemm.cpp
+ Op/Gemm.h
+ Op/GivenTensorFill.cpp
+ Op/GivenTensorFill.h
+ Op/GlobalAveragePool.cpp
+ Op/GlobalAveragePool.h
+ Op/Max.cpp
+ Op/Max.h
+ Op/MaxPool.cpp
+ Op/MaxPool.h
+ Op/Mul.cpp
+ Op/Mul.h
+ Op/Pad.cpp
+ Op/Pad.h
+ Op/Relu.cpp
+ Op/Relu.h
+ Op/Reshape.cpp
+ Op/Reshape.h
+ Op/Scale.cpp
+ Op/Scale.h
+ Op/Shape.cpp
+ Op/Shape.h
+ Op/Sigmoid.cpp
+ Op/Sigmoid.h
+ Op/Softmax.cpp
+ Op/Softmax.h
+ Op/Sum.cpp
+ Op/Sum.h
+ Op/Unsqueeze.cpp
+ Op/Unsqueeze.h
+ Op/Upsample.cpp
+ Op/Upsample.h)
add_library(mir_onnx_importer STATIC ${MIR_ONNX_IMPORTER_SOURCES})
set_target_properties(mir_onnx_importer PROPERTIES POSITION_INDEPENDENT_CODE ON)
diff --git a/compiler/mir-onnx-importer/ONNXOpCreator.cpp b/compiler/mir-onnx-importer/ONNXOpCreator.cpp
deleted file mode 100644
index b1f6046c6..000000000
--- a/compiler/mir-onnx-importer/ONNXOpCreator.cpp
+++ /dev/null
@@ -1,505 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ONNXOpCreator.h"
-#include "ONNXHelpers.h"
-#include "ONNXImporterImpl.h"
-
-#include "mir/ops/BatchNormOp.h"
-#include "mir/ops/BiasAddOp.h"
-#include "mir/ops/CappedReluOp.h"
-#include "mir/ops/ConcatOp.h"
-#include "mir/ops/ConstantOp.h"
-#include "mir/ops/Conv2DOp.h"
-#include "mir/ops/DepthwiseConv2DOp.h"
-#include "mir/ops/DropoutOp.h"
-#include "mir/ops/FullyConnectedOp.h"
-#include "mir/ops/GatherOp.h"
-#include "mir/ops/GemmOp.h"
-#include "mir/ops/InputOp.h"
-#include "mir/ops/PadOp.h"
-#include "mir/ops/PoolOp.h"
-#include "mir/ops/ReluOp.h"
-#include "mir/ops/ReshapeOp.h"
-#include "mir/ops/ResizeOp.h"
-#include "mir/ops/ScaleOp.h"
-#include "mir/ops/SigmoidOp.h"
-#include "mir/ops/SoftmaxOp.h"
-#include "mir/ops/TransposeOp.h"
-#include "mir/ops/ElementwiseOp.h"
-#include "mir/Index.h"
-#include "mir/Graph.h"
-#include "mir/Scalar.h"
-#include "mir/ShapeRange.h"
-#include "mir/Tensor.h"
-#include "mir/TensorUtil.h"
-
-#include <cmath>
-#include <iostream>
-#include <set>
-
-namespace mir_onnx
-{
-
-using namespace mir;
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertConv2D(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node)
-{
- assert(inputs.size() >= 2);
-
- KernelStridesPadding cdata;
- getKernelStridesPadding(onnx_node, cdata);
- // FIXME: It can be non-constant value.
- auto *in_weights = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
- assert(in_weights && "Weights could be a constant tensor only");
- const auto &in_weights_tensor = in_weights->getValue();
- // We should transpose ONNX MC(IO)HW to HWOI
- auto kernel_tensor = transposeTensor<2, 3, 1, 0>(in_weights_tensor);
- auto in_group_size = kernel_tensor.getShape().dim(2);
- auto out_channels = kernel_tensor.getShape().dim(3);
- bool found;
- int num_groups;
- std::tie(found, num_groups) = getIntAttribute(onnx_node, "group");
- if (!found)
- num_groups = 1;
- bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
-
- mir::Operation *result;
- auto transposed_input = convertONNXToMIR(_graph, inputs[0]);
- if (is_depthwise)
- {
- // TODO handle properly kernel with layer multiplier
- auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
- auto kernel = createOp<ops::ConstantOp>(_graph, transposed_tensor)->getOutput(0);
- result = createOp<ops::DepthwiseConv2DOp>(_graph, transposed_input, kernel, cdata.strides_shape,
- cdata.padding_before, cdata.padding_after);
- }
- else
- {
- // first we need to convert kernel of grouped convolution to appropriate ordinary kernel
- if (num_groups != 1)
- kernel_tensor = fixGroupedKernel(num_groups, kernel_tensor);
- kernel_tensor = transposeTensor<3, 0, 1, 2>(kernel_tensor);
- auto kernel = createOp<ops::ConstantOp>(_graph, kernel_tensor)->getOutput(0);
- result = createOp<ops::Conv2DOp>(_graph, transposed_input, kernel, cdata.strides_shape,
- cdata.padding_before, cdata.padding_after);
- }
-
- if (inputs.size() > 2)
- result = createOp<ops::BiasAddOp>(_graph, result->getOutput(0), inputs[2]);
-
- return {convertMIRToONNX(_graph, result->getOutput(0))};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertConcat(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node)
-{
- bool found;
- int axis;
- std::tie(found, axis) = getIntAttribute(onnx_node);
- if (!found)
- throw std::runtime_error("Concat must have 'axis' attribute");
- auto result = createOp<ops::ConcatOp>(_graph, inputs, axis);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertGather(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node)
-{
- bool found;
- int value;
- std::tie(found, value) = getIntAttribute(onnx_node, "axis");
- int axis = found ? value : 0;
- auto result = createOp<ops::GatherOp>(_graph, inputs[0], inputs[1], axis);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertPad(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node)
-{
- bool found;
- float value;
- std::tie(found, value) = getFloatAttribute(onnx_node, "value");
- assert(found);
- auto padsAtt = findAttribute(onnx_node, "pads");
- assert(padsAtt);
- auto modeAtt = findAttribute(onnx_node, "mode");
- assert(modeAtt);
- auto mode = modeAtt->s();
- const mir::Scalar scalar(reinterpret_cast<const char *>(&value), DTYPE::FLOAT32, sizeof(float));
- assert(padsAtt->ints_size() > 0);
- int cnt = padsAtt->ints_size() / 2;
- assert(cnt % 2 == 0);
- int last = padsAtt->ints_size() - 1;
- std::vector<std::pair<int32_t, int32_t>> vec(cnt);
- auto *data = padsAtt->ints().data();
- for (int i = 0; i < cnt; i++)
- {
- auto pair = std::make_pair(data[i], data[last - i]);
- vec[i] = pair;
- }
- auto result = createOp<ops::PadOp>(_graph, inputs[0], inputs[0]->getShape().rank(), vec, scalar);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertPool(const std::vector<mir::Operation::Output *> &inputs,
- mir::ops::PoolOp::PoolingType pool_type,
- const onnx::NodeProto &onnx_node)
-{
- ops::PoolOp::BorderType border_type;
-
- KernelStridesPadding cdata;
- // Transpose ONNX NCHW to MIR NHWC
- auto t_input = convertONNXToMIR(_graph, inputs[0]);
-
- switch (pool_type)
- {
- case mir::ops::PoolOp::PoolingType::AVG:
- border_type = ops::PoolOp::BorderType::ZEROFILLED;
- pool_type = ops::PoolOp::PoolingType::AVG;
- getKernelStridesPadding(onnx_node, cdata);
- break;
- case mir::ops::PoolOp::PoolingType::MAX:
- border_type = ops::PoolOp::BorderType::EMPTY;
- pool_type = ops::PoolOp::PoolingType::MAX;
- getKernelStridesPadding(onnx_node, cdata);
- break;
- default:
- assert(false);
- }
- auto result =
- createOp<ops::PoolOp>(_graph, t_input, pool_type, cdata.kernel_shape, cdata.strides_shape,
- cdata.padding_before, cdata.padding_after, border_type);
- return {convertMIRToONNX(_graph, result->getOutput(0))};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertSoftmax(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node)
-{
- int axis;
- bool found;
- std::tie(found, axis) = getIntAttribute(onnx_node);
- axis = found ? axis : 1;
- auto result = createOp<ops::SoftmaxOp>(_graph, inputs[0], axis);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertReshape(const std::vector<mir::Operation::Output *> &inputs)
-{
- // The original shape
- const auto &in_shape = inputs[0]->getShape();
-
- // Input tensor describing the new shape
- // TODO: could it be not a constant?
- auto *op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
- assert(op && "We support constants only");
- auto shape_tensor = op->getValue();
- Shape shape_tensor_shape = (shape_tensor).getShape();
- assert(shape_tensor_shape.rank() == 1);
- // The rank of the new shape
- auto cnt = shape_tensor_shape.numElements();
- // The vector to build the new shape from
- std::vector<int32_t> shape_vector(cnt);
- ShapeRange out_range(shape_tensor_shape);
- Tensor<int64_t> tensor_accessor(shape_tensor);
-
- int i = 0;
- for (auto idx : out_range)
- {
- if (tensor_accessor.at(idx) == 0)
- shape_vector[i] = in_shape.dim(i);
- else if (tensor_accessor.at(idx) == -1)
- shape_vector[i] = Shape::autoDim;
- else
- shape_vector[i] = tensor_accessor.at(idx);
- i++;
- }
- auto out_shape = Shape(shape_vector);
- auto result = createOp<ops::ReshapeOp>(_graph, inputs[0], out_shape);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertUnsqueeze(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node)
-{
- auto *axes = findAttribute(onnx_node, "axes");
- assert(axes && axes->ints_size());
- const Shape &input_shape = inputs[0]->getShape();
- const int out_rank = input_shape.rank() + axes->ints_size();
- Shape out_shape(out_rank);
- auto ints_iterator = axes->ints().begin();
- int j = 0;
- for (int i = 0; i < out_rank; i++)
- {
- if (ints_iterator < axes->ints().end() && i == *ints_iterator)
- {
- out_shape.dim(i) = 1;
- ints_iterator++;
- }
- else
- {
- out_shape.dim(i) = input_shape.dim(j);
- j++;
- }
- }
- auto result = createOp<ops::ReshapeOp>(_graph, inputs[0], out_shape);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertRelu(const std::vector<mir::Operation::Output *> &inputs)
-{
- assert(inputs.size() == 1);
- auto result = createOp<ops::ReluOp>(_graph, inputs[0]);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertSigmoid(const std::vector<mir::Operation::Output *> &inputs)
-{
- assert(inputs.size() == 1);
- auto result = createOp<ops::SigmoidOp>(_graph, inputs[0]);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertElementwise(const std::vector<mir::Operation::Output *> &inputs,
- mir::ops::ElementwiseOp::OpType op_type)
-{
- auto result = createOp<ops::ElementwiseOp>(_graph, inputs, op_type);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertUpsample(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node)
-{
- bool success;
- std::string mode;
- std::tie(success, mode) = getStringAttribute(onnx_node, "mode");
- if (!success)
- mode = "nearest";
- assert(mode == "nearest" && "Unsupported upscale mode!");
-
- // relies on attributes being lifted to constants (ONNX optimization pass)
- assert(inputs.size() > 1);
- auto *scales = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
- assert(scales && "Weights could be a constant tensor only");
- auto scales_tensor = Tensor<float>(scales->getValue());
- int rank = inputs[0]->getShape().rank();
- assert(scales_tensor.getShape().numElements() == rank &&
- "The number of elements of 'scales' should be the same as the rank of input 'X'");
- assert(rank == 4 && "Only rank 4 is supported");
- std::vector<float> scales_vector(4);
- const int onnx2mir[] = {0, 3, 1, 2};
- assert(scales_tensor.getShape().rank() == 1 && "Scales are a 1d tensor");
- for (int i = 0; i < scales_tensor.getShape().numElements(); i++)
- scales_vector[onnx2mir[i]] = scales_tensor.atOffset(i);
- return {convertMIRToONNX(
- _graph, createOp<ops::ResizeOp>(_graph, convertONNXToMIR(_graph, inputs[0]),
- ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
- ->getOutput(0))};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertBatchNorm(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node, InputTensors &input_tensors)
-{
- // overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias
- bool found;
- float value;
- std::tie(found, value) = getFloatAttribute(onnx_node, "epsilon");
- float epsilon = found ? value : 1e-05f;
-
- // TODO: it's better to do it via inputs
- const auto &scale_tensor = input_tensors.at(inputs[1]->getNode()->getName());
- const auto &bias_tensor = input_tensors.at(inputs[2]->getNode()->getName());
- const auto &mean_tensor = input_tensors.at(inputs[3]->getNode()->getName());
- const auto &var_tensor = input_tensors.at(inputs[4]->getNode()->getName());
-
- // res1 = X - mean
- Tensor<float> bias_data(mean_tensor);
- for (auto &idx : ShapeRange(bias_data.getShape()))
- bias_data.at(idx) *= -1;
-
- auto data = convertONNXToMIR(_graph, inputs[0]);
- auto mean = createOp<ops::ConstantOp>(_graph, mean_tensor)->getOutput(0);
- auto result = createOp<ops::BiasAddOp>(_graph, data, mean);
-
- // res2 = res1 * scale / (var + epsilon)
- Tensor<float> multiplier(scale_tensor);
- Tensor<float> var_accessor(var_tensor);
- for (auto &idx : ShapeRange(scale_tensor.getShape()))
- multiplier.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon);
- auto scale = createOp<ops::ConstantOp>(_graph, scale_tensor)->getOutput(0);
- result = createOp<ops::ScaleOp>(_graph, result->getOutput(0), scale);
-
- // overall_res = res2 + bias
- auto bias = createOp<ops::ConstantOp>(_graph, bias_tensor)->getOutput(0);
- result = createOp<ops::BiasAddOp>(_graph, result->getOutput(0), bias);
-
- return {convertMIRToONNX(_graph, result->getOutput(0))};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertDropout(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node)
-{
- bool found;
- float value;
- std::tie(found, value) = getFloatAttribute(onnx_node, "ratio");
- float ratio = found ? value : 1.0;
- auto result = createOp<ops::SoftmaxOp>(_graph, inputs[0], ratio);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertScale(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node)
-{
- bool found;
- float value;
- std::tie(found, value) = getFloatAttribute(onnx_node, "scale");
- float scale_val = found ? value : 1.0;
- const auto &shape = inputs[0]->getShape();
- auto scale_tensor = createScalarTensor(scale_val, shape);
- auto scale = createOp<ops::ConstantOp>(_graph, scale_tensor)->getOutput(0);
- auto result = createOp<ops::ScaleOp>(_graph, inputs[0], scale);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertShape(const std::vector<mir::Operation::Output *> &inputs)
-{
- const auto &input_shape = inputs[0]->getShape();
- int size = input_shape.rank();
- Shape output_shape{size};
- std::vector<float> data(static_cast<std::size_t>(size));
- for (int i = 0; i < size; i++)
- {
- data[i] = input_shape.dim(i);
- }
- TensorVariant tensor(DTYPE::FLOAT32, output_shape, data.data());
- auto result = createOp<ops::ConstantOp>(_graph, tensor);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertGivenTensorFill(const onnx::NodeProto &onnx_node, InputTensors &input_tensors)
-{
- auto values_att = findAttribute(onnx_node, "values");
- auto shape_att = findAttribute(onnx_node, "shape");
- assert(values_att && shape_att);
- assert(values_att->floats_size() > 0 && shape_att->ints_size() > 0);
- Shape shape(shape_att->ints_size());
- for (int i = 0; i < shape_att->ints_size(); i++)
- shape.dim(i) = shape_att->ints(i);
- TensorVariant tensor(DTYPE::FLOAT32, shape, values_att->floats().data());
- input_tensors.insert(std::make_pair(onnx_node.output(0), tensor));
- auto result = createOp<ops::ConstantOp>(_graph, tensor);
- return {result->getOutput(0)};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertConstant(const onnx::NodeProto &onnx_node, InputTensors &input_tensors)
-{
- assert((onnx_node.attribute_size() == 1) &&
- (onnx_node.attribute(0).type() == onnx::AttributeProto_AttributeType_TENSOR) &&
- (onnx_node.attribute(0).tensors_size() == 0));
- assert(onnx_node.attribute(0).name() == "value");
- auto name = onnx_node.output(0);
- auto &onnx_tensor = onnx_node.attribute(0).t();
- auto mir_tensor = createTensor(&onnx_tensor);
- input_tensors.insert(std::make_pair(name, mir_tensor));
- auto op = _graph->create<mir::ops::ConstantOp>(name, mir_tensor)->getOutput(0);
- return {op};
-}
-
-std::vector<mir::Operation::Output *>
-ONNXOpCreator::convertGemm(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node)
-{
- bool found;
- int ivalue;
- float fvalue;
-
- // Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),
- // input tensor B has shape (K, N) or (N, K),
- // input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N).
- // A will be transposed before doing the computation if attribute transA is non-zero,
- // same for B and transB. This operator supports unidirectional broadcasting
- // (tensor C should be unidirectional broadcastable to tensor A * B).
-
- std::tie(found, ivalue) = getIntAttribute(onnx_node, "transA");
- bool trans_a = found ? static_cast<bool>(ivalue) : false;
- std::tie(found, ivalue) = getIntAttribute(onnx_node, "transB");
- bool trans_b = found ? static_cast<bool>(ivalue) : false;
- std::tie(found, fvalue) = getFloatAttribute(onnx_node, "alpha");
- float alpha_val = found ? fvalue : 1.0f;
- std::tie(found, fvalue) = getFloatAttribute(onnx_node, "beta");
- float beta_val = found ? fvalue : 1.0f;
-
- // 1. Prepare input matrix A
- // Flatten the shape by dim(0)
- const auto &in_shape = inputs[0]->getShape();
- mir::Shape shape0{in_shape.dim(0), in_shape.numElements() / in_shape.dim(0)};
- auto input_a = createOp<ops::ReshapeOp>(_graph, inputs[0], shape0)->getOutput(0);
- if (trans_a)
- input_a =
- createOp<ops::TransposeOp>(_graph, input_a, std::vector<std::size_t>{1, 0})->getOutput(0);
- if (alpha_val != 1.0)
- {
- auto alpha_tensor = createScalarTensor(alpha_val, input_a->getShape());
- auto alpha = createOp<ops::ConstantOp>(_graph, alpha_tensor)->getOutput(0);
- input_a = createOp<ops::ScaleOp>(_graph, input_a, alpha)->getOutput(0);
- }
-
- // 2. Prepare input matrix B
- //
- auto input_b = inputs[1];
- if (trans_b)
- input_b =
- createOp<ops::TransposeOp>(_graph, input_b, std::vector<std::size_t>{1, 0})->getOutput(0);
- // Number of cols in tensor A must be equal to number of rows in tensor B
- assert(input_a->getShape().dim(1) == input_b->getShape().dim(0));
- Shape mult_a_b{input_a->getShape().dim(0), input_b->getShape().dim(1)};
-
- // 3. Prepare input matrix C
- //
- auto input_c = inputs[2];
- auto beta_tensor = createScalarTensor(beta_val, input_c->getShape());
- if ((mult_a_b.rank() == 2) && (input_c->getShape().rank() == 1))
- {
- beta_tensor = TensorVariant(beta_tensor, mult_a_b);
- }
- auto beta = createOp<ops::ConstantOp>(_graph, beta_tensor)->getOutput(0);
- std::vector<mir::Operation::Output *> mul_inputs = {beta, input_c};
- auto c_mult = createOp<ops::ElementwiseOp>(_graph, mul_inputs, ops::ElementwiseOp::OpType::mul)
- ->getOutput(0);
- assert(c_mult->getShape() == mult_a_b);
- auto result = createOp<ops::GemmOp>(_graph, input_a, input_b, c_mult);
- return {result->getOutput(0)};
-}
-} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/ONNXOpCreator.h b/compiler/mir-onnx-importer/ONNXOpCreator.h
deleted file mode 100644
index d6caa99b3..000000000
--- a/compiler/mir-onnx-importer/ONNXOpCreator.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _MIR_ONNX_OP_CREATOR_H
-#define _MIR_ONNX_OP_CREATOR_H
-
-#include "onnx/onnx.pb.h"
-
-#include "mir/ops/CommonProps.h"
-#include "mir/ops/ElementwiseOp.h"
-#include "mir/ops/PoolOp.h"
-#include "mir/Graph.h"
-#include "mir/Shape.h"
-#include "mir/TensorVariant.h"
-
-#include <set>
-#include <map>
-#include <vector>
-#include <memory>
-
-namespace mir_onnx
-{
-
-class ONNXOpCreator
-{
-public:
- using InputTensors = std::map<std::string, mir::TensorVariant>;
-
- ONNXOpCreator() = default;
-
- void setMirGraph(mir::Graph *g) { _graph = g; };
-
- std::vector<mir::Operation::Output *>
- convertConv2D(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node);
-
- std::vector<mir::Operation::Output *>
- convertConcat(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node);
-
- std::vector<mir::Operation::Output *> convertGivenTensorFill(const onnx::NodeProto &onnx_node,
- InputTensors &input_tensors);
-
- std::vector<mir::Operation::Output *> convertConstant(const onnx::NodeProto &onnx_node,
- InputTensors &input_tensors);
-
- std::vector<mir::Operation::Output *>
- convertPool(const std::vector<mir::Operation::Output *> &inputs,
- mir::ops::PoolOp::PoolingType pool_type, const onnx::NodeProto &onnx_node);
-
- std::vector<mir::Operation::Output *>
- convertPad(const std::vector<mir::Operation::Output *> &inputs, const onnx::NodeProto &onnx_node);
-
- std::vector<mir::Operation::Output *>
- convertSoftmax(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node);
-
- std::vector<mir::Operation::Output *>
- convertReshape(const std::vector<mir::Operation::Output *> &inputs);
-
- std::vector<mir::Operation::Output *>
- convertRelu(const std::vector<mir::Operation::Output *> &inputs);
-
- std::vector<mir::Operation::Output *>
- convertSigmoid(const std::vector<mir::Operation::Output *> &inputs);
-
- std::vector<mir::Operation::Output *>
- convertUnsqueeze(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node);
-
- std::vector<mir::Operation::Output *>
- convertUpsample(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node);
-
- std::vector<mir::Operation::Output *>
- convertElementwise(const std::vector<mir::Operation::Output *> &inputs,
- mir::ops::ElementwiseOp::OpType op_type);
-
- std::vector<mir::Operation::Output *>
- convertScale(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node);
-
- std::vector<mir::Operation::Output *>
- convertShape(const std::vector<mir::Operation::Output *> &inputs);
-
- std::vector<mir::Operation::Output *>
- convertBatchNorm(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node, InputTensors &input_tensors);
-
- std::vector<mir::Operation::Output *>
- convertDropout(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node);
-
- std::vector<mir::Operation::Output *>
- convertGather(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node);
-
- std::vector<mir::Operation::Output *>
- convertGemm(const std::vector<mir::Operation::Output *> &inputs,
- const onnx::NodeProto &onnx_node);
-
-private:
- mir::Graph *_graph = nullptr;
-};
-} // namespace mir_onnx
-#endif // _MIR_ONNX_OP_CREATOR_H
diff --git a/compiler/mir-onnx-importer/ONNXOpRegistration.h b/compiler/mir-onnx-importer/ONNXOpRegistration.h
index 33ae8412c..1028478db 100644
--- a/compiler/mir-onnx-importer/ONNXOpRegistration.h
+++ b/compiler/mir-onnx-importer/ONNXOpRegistration.h
@@ -16,13 +16,63 @@
#include "ONNXNodeConverterRegistry.h"
+#include "Op/Add.h"
+#include "Op/AveragePool.h"
+#include "Op/BatchNormalization.h"
+#include "Op/Concat.h"
+#include "Op/Constant.h"
+#include "Op/Conv.h"
+#include "Op/Dropout.h"
+#include "Op/Gather.h"
+#include "Op/Gemm.h"
+#include "Op/GivenTensorFill.h"
+#include "Op/GlobalAveragePool.h"
+#include "Op/Max.h"
+#include "Op/MaxPool.h"
+#include "Op/Mul.h"
+#include "Op/Pad.h"
+#include "Op/Relu.h"
+#include "Op/Reshape.h"
+#include "Op/Scale.h"
+#include "Op/Shape.h"
+#include "Op/Sigmoid.h"
+#include "Op/Softmax.h"
+#include "Op/Sum.h"
+#include "Op/Unsqueeze.h"
+#include "Op/Upsample.h"
+
namespace mir_onnx
{
inline void registerSupportedOps()
{
auto &registry = NodeConverterRegistry::getInstance();
- // registry.registerConverter("Add", stdex::make_unique<AddNodeConverter>());
+ registry.registerConverter("Add", stdex::make_unique<AddNodeConverter>());
+ registry.registerConverter("AveragePool", stdex::make_unique<AveragePoolNodeConverter>());
+ registry.registerConverter("BatchNormalization",
+ stdex::make_unique<BatchNormalizationNodeConverter>());
+ registry.registerConverter("Concat", stdex::make_unique<ConcatNodeConverter>());
+ registry.registerConverter("Constant", stdex::make_unique<ConstantNodeConverter>());
+ registry.registerConverter("Conv", stdex::make_unique<ConvNodeConverter>());
+ registry.registerConverter("Dropout", stdex::make_unique<DropoutNodeConverter>());
+ registry.registerConverter("Gather", stdex::make_unique<GatherNodeConverter>());
+ registry.registerConverter("Gemm", stdex::make_unique<GemmNodeConverter>());
+ registry.registerConverter("GivenTensorFill", stdex::make_unique<GivenTensorFillNodeConverter>());
+ registry.registerConverter("GlobalAveragePool",
+ stdex::make_unique<GlobalAveragePoolNodeConverter>());
+ registry.registerConverter("Max", stdex::make_unique<MaxNodeConverter>());
+ registry.registerConverter("MaxPool", stdex::make_unique<MaxPoolNodeConverter>());
+ registry.registerConverter("Mul", stdex::make_unique<MulNodeConverter>());
+ registry.registerConverter("Pad", stdex::make_unique<PadNodeConverter>());
+ registry.registerConverter("Relu", stdex::make_unique<ReluNodeConverter>());
+ registry.registerConverter("Reshape", stdex::make_unique<ReshapeNodeConverter>());
+ registry.registerConverter("Scale", stdex::make_unique<ScaleNodeConverter>());
+ registry.registerConverter("Shape", stdex::make_unique<ShapeNodeConverter>());
+ registry.registerConverter("Sigmoid", stdex::make_unique<SigmoidNodeConverter>());
+ registry.registerConverter("Softmax", stdex::make_unique<SoftmaxNodeConverter>());
+ registry.registerConverter("Sum", stdex::make_unique<SumNodeConverter>());
+ registry.registerConverter("Unsqueeze", stdex::make_unique<UnsqueezeNodeConverter>());
+ registry.registerConverter("Upsample", stdex::make_unique<UpsampleNodeConverter>());
}
} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Add.cpp b/compiler/mir-onnx-importer/Op/Add.cpp
new file mode 100644
index 000000000..ed413d8ee
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Add.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Add.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/ElementwiseOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+AddNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ auto result =
+ createOp<mir::ops::ElementwiseOp>(graph, inputs, mir::ops::ElementwiseOp::OpType::add);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Add.h b/compiler/mir-onnx-importer/Op/Add.h
new file mode 100644
index 000000000..67fb12542
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Add.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class AddNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/AveragePool.cpp b/compiler/mir-onnx-importer/Op/AveragePool.cpp
new file mode 100644
index 000000000..9aac33e27
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/AveragePool.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AveragePool.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/PoolOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+AveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ // TODO Set some asserts
+ mir::ops::PoolOp::BorderType border_type;
+ mir::ops::PoolOp::PoolingType pool_type;
+
+ KernelStridesPadding cdata;
+ // Transpose ONNX NCHW to MIR NHWC
+ auto t_input = convertONNXToMIR(graph, inputs[0]);
+
+ border_type = mir::ops::PoolOp::BorderType::ZEROFILLED;
+ pool_type = mir::ops::PoolOp::PoolingType::AVG;
+ getKernelStridesPadding(onnx_node, cdata);
+
+ auto result =
+ createOp<mir::ops::PoolOp>(graph, t_input, pool_type, cdata.kernel_shape, cdata.strides_shape,
+ cdata.padding_before, cdata.padding_after, border_type);
+ return {convertMIRToONNX(graph, result->getOutput(0))};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/AveragePool.h b/compiler/mir-onnx-importer/Op/AveragePool.h
new file mode 100644
index 000000000..f282281de
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/AveragePool.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class AveragePoolNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
new file mode 100644
index 000000000..5fb587b81
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BatchNormalization.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ShapeRange.h"
+#include "mir/Tensor.h"
+
+#include "mir/ops/BiasAddOp.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/ScaleOp.h"
+
+#include <cmath>
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+BatchNormalizationNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ // overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias
+ bool found;
+ float value;
+ std::tie(found, value) = getFloatAttribute(onnx_node, "epsilon");
+ float epsilon = found ? value : 1e-05f;
+
+ const auto &scale_tensor = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode())->getValue();
+ const auto &bias_tensor = dynamic_cast<mir::ops::ConstantOp *>(inputs[2]->getNode())->getValue();
+ const auto &mean_tensor = dynamic_cast<mir::ops::ConstantOp *>(inputs[3]->getNode())->getValue();
+ const auto &var_tensor = dynamic_cast<mir::ops::ConstantOp *>(inputs[4]->getNode())->getValue();
+
+ // res1 = X - mean
+ mir::Tensor<float> bias_data(mean_tensor);
+ for (auto &idx : mir::ShapeRange(bias_data.getShape()))
+ bias_data.at(idx) *= -1;
+
+ auto data = convertONNXToMIR(graph, inputs[0]);
+ auto mean = createOp<mir::ops::ConstantOp>(graph, mean_tensor)->getOutput(0);
+ auto result = createOp<mir::ops::BiasAddOp>(graph, data, mean);
+
+ // res2 = res1 * scale / (var + epsilon)
+ mir::Tensor<float> multiplier(scale_tensor);
+ mir::Tensor<float> var_accessor(var_tensor);
+ for (auto &idx : mir::ShapeRange(scale_tensor.getShape()))
+ multiplier.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon);
+ auto scale = createOp<mir::ops::ConstantOp>(graph, scale_tensor)->getOutput(0);
+ result = createOp<mir::ops::ScaleOp>(graph, result->getOutput(0), scale);
+
+ // overall_res = res2 + bias
+ auto bias = createOp<mir::ops::ConstantOp>(graph, bias_tensor)->getOutput(0);
+ result = createOp<mir::ops::BiasAddOp>(graph, result->getOutput(0), bias);
+
+ return {convertMIRToONNX(graph, result->getOutput(0))};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/BatchNormalization.h b/compiler/mir-onnx-importer/Op/BatchNormalization.h
new file mode 100644
index 000000000..79eb8e35e
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/BatchNormalization.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class BatchNormalizationNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Concat.cpp b/compiler/mir-onnx-importer/Op/Concat.cpp
new file mode 100644
index 000000000..cfeb854fc
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Concat.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Concat.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/ConcatOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+ConcatNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ bool found;
+ int axis;
+ std::tie(found, axis) = getIntAttribute(onnx_node);
+ if (!found)
+ throw std::runtime_error("Concat must have 'axis' attribute");
+ auto result = createOp<mir::ops::ConcatOp>(graph, inputs, axis);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Concat.h b/compiler/mir-onnx-importer/Op/Concat.h
new file mode 100644
index 000000000..06af0b2e5
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Concat.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class ConcatNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Constant.cpp b/compiler/mir-onnx-importer/Op/Constant.cpp
new file mode 100644
index 000000000..0a1484b3f
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Constant.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Constant.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/TensorVariant.h"
+#include "mir/ops/ConstantOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+ConstantNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ assert((onnx_node.attribute_size() == 1) &&
+ (onnx_node.attribute(0).type() == ::onnx::AttributeProto_AttributeType_TENSOR) &&
+ (onnx_node.attribute(0).tensors_size() == 0));
+ assert(onnx_node.attribute(0).name() == "value");
+ auto name = onnx_node.output(0);
+ auto &onnx_tensor = onnx_node.attribute(0).t();
+ auto mir_tensor = createTensor(&onnx_tensor);
+ // TODO check right removing input_tensors
+ // input_tensors.insert(std::make_pair(name, mir_tensor));
+ auto op = graph->create<mir::ops::ConstantOp>(name, mir_tensor);
+ return {op->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Constant.h b/compiler/mir-onnx-importer/Op/Constant.h
new file mode 100644
index 000000000..92aa6e623
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Constant.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class ConstantNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Conv.cpp b/compiler/mir-onnx-importer/Op/Conv.cpp
new file mode 100644
index 000000000..879018fb1
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Conv.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Conv.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/TensorUtil.h"
+
+#include "mir/ops/BiasAddOp.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/Conv2DOp.h"
+#include "mir/ops/DepthwiseConv2DOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+ConvNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ assert(inputs.size() >= 2);
+
+ KernelStridesPadding cdata;
+ getKernelStridesPadding(onnx_node, cdata);
+ // FIXME: It can be non-constant value.
+ auto *in_weights = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
+ assert(in_weights && "Weights could be a constant tensor only");
+ const auto &in_weights_tensor = in_weights->getValue();
+ // We should transpose ONNX MC(IO)HW to HWOI
+ auto kernel_tensor = mir::transposeTensor<2, 3, 1, 0>(in_weights_tensor);
+ auto in_group_size = kernel_tensor.getShape().dim(2);
+ auto out_channels = kernel_tensor.getShape().dim(3);
+ bool found;
+ int num_groups;
+ std::tie(found, num_groups) = getIntAttribute(onnx_node, "group");
+ if (!found)
+ num_groups = 1;
+ bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
+
+ mir::Operation *result;
+ auto transposed_input = convertONNXToMIR(graph, inputs[0]);
+ if (is_depthwise)
+ {
+ // TODO handle properly kernel with layer multiplier
+ auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
+ auto kernel = createOp<mir::ops::ConstantOp>(graph, transposed_tensor)->getOutput(0);
+ result =
+ createOp<mir::ops::DepthwiseConv2DOp>(graph, transposed_input, kernel, cdata.strides_shape,
+ cdata.padding_before, cdata.padding_after);
+ }
+ else
+ {
+ // first we need to convert kernel of grouped convolution to appropriate ordinary kernel
+ if (num_groups != 1)
+ kernel_tensor = fixGroupedKernel(num_groups, kernel_tensor);
+ kernel_tensor = mir::transposeTensor<3, 0, 1, 2>(kernel_tensor);
+ auto kernel = createOp<mir::ops::ConstantOp>(graph, kernel_tensor)->getOutput(0);
+ result = createOp<mir::ops::Conv2DOp>(graph, transposed_input, kernel, cdata.strides_shape,
+ cdata.padding_before, cdata.padding_after);
+ }
+
+ if (inputs.size() > 2)
+ result = createOp<mir::ops::BiasAddOp>(graph, result->getOutput(0), inputs[2]);
+
+ return {convertMIRToONNX(graph, result->getOutput(0))};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Conv.h b/compiler/mir-onnx-importer/Op/Conv.h
new file mode 100644
index 000000000..5849e65a4
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Conv.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class ConvNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Dropout.cpp b/compiler/mir-onnx-importer/Op/Dropout.cpp
new file mode 100644
index 000000000..e30923e14
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Dropout.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dropout.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/DropoutOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+DropoutNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ bool found;
+ float value;
+ std::tie(found, value) = getFloatAttribute(onnx_node, "ratio");
+ float ratio = found ? value : 0.5; // default 0.5
+ auto result = createOp<mir::ops::DropoutOp>(graph, inputs[0], ratio);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Dropout.h b/compiler/mir-onnx-importer/Op/Dropout.h
new file mode 100644
index 000000000..f57fa4975
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Dropout.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class DropoutNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Gather.cpp b/compiler/mir-onnx-importer/Op/Gather.cpp
new file mode 100644
index 000000000..7e82b6a45
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Gather.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Gather.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/GatherOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+GatherNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ bool found;
+ int value;
+ std::tie(found, value) = getIntAttribute(onnx_node, "axis");
+ int axis = found ? value : 0;
+ auto result = createOp<mir::ops::GatherOp>(graph, inputs[0], inputs[1], axis);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Gather.h b/compiler/mir-onnx-importer/Op/Gather.h
new file mode 100644
index 000000000..64c770ac9
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Gather.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class GatherNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Gemm.cpp b/compiler/mir-onnx-importer/Op/Gemm.cpp
new file mode 100644
index 000000000..d1b6adf1a
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Gemm.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Gemm.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/TensorVariant.h"
+
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/ElementwiseOp.h"
+#include "mir/ops/GemmOp.h"
+#include "mir/ops/ReshapeOp.h"
+#include "mir/ops/ScaleOp.h"
+#include "mir/ops/TransposeOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+GemmNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ bool found;
+ int ivalue;
+ float fvalue;
+
+ // Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),
+ // input tensor B has shape (K, N) or (N, K),
+ // input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N).
+ // A will be transposed before doing the computation if attribute transA is non-zero,
+ // same for B and transB. This operator supports unidirectional broadcasting
+ // (tensor C should be unidirectional broadcastable to tensor A * B).
+
+ std::tie(found, ivalue) = getIntAttribute(onnx_node, "transA");
+ bool trans_a = found ? static_cast<bool>(ivalue) : false;
+ std::tie(found, ivalue) = getIntAttribute(onnx_node, "transB");
+ bool trans_b = found ? static_cast<bool>(ivalue) : false;
+ std::tie(found, fvalue) = getFloatAttribute(onnx_node, "alpha");
+ float alpha_val = found ? fvalue : 1.0f;
+ std::tie(found, fvalue) = getFloatAttribute(onnx_node, "beta");
+ float beta_val = found ? fvalue : 1.0f;
+
+ // 1. Prepare input matrix A
+ // Flatten the shape by dim(0)
+ const auto &in_shape = inputs[0]->getShape();
+ mir::Shape shape0{in_shape.dim(0), in_shape.numElements() / in_shape.dim(0)};
+ auto input_a = createOp<mir::ops::ReshapeOp>(graph, inputs[0], shape0)->getOutput(0);
+ if (trans_a)
+ input_a = createOp<mir::ops::TransposeOp>(graph, input_a, std::vector<std::size_t>{1, 0})
+ ->getOutput(0);
+ if (alpha_val != 1.0)
+ {
+ auto alpha_tensor = createScalarTensor(alpha_val, input_a->getShape());
+ auto alpha = createOp<mir::ops::ConstantOp>(graph, alpha_tensor)->getOutput(0);
+ input_a = createOp<mir::ops::ScaleOp>(graph, input_a, alpha)->getOutput(0);
+ }
+
+ // 2. Prepare input matrix B
+ //
+ auto input_b = inputs[1];
+ if (trans_b)
+ input_b = createOp<mir::ops::TransposeOp>(graph, input_b, std::vector<std::size_t>{1, 0})
+ ->getOutput(0);
+ // Number of cols in tensor A must be equal to number of rows in tensor B
+ assert(input_a->getShape().dim(1) == input_b->getShape().dim(0));
+ mir::Shape mult_a_b{input_a->getShape().dim(0), input_b->getShape().dim(1)};
+
+ // 3. Prepare input matrix C
+ //
+ auto input_c = inputs[2];
+ auto beta_tensor = createScalarTensor(beta_val, input_c->getShape());
+ if ((mult_a_b.rank() == 2) && (input_c->getShape().rank() == 1))
+ {
+ beta_tensor = mir::TensorVariant(beta_tensor, mult_a_b);
+ }
+ auto beta = createOp<mir::ops::ConstantOp>(graph, beta_tensor)->getOutput(0);
+ std::vector<mir::Operation::Output *> mul_inputs = {beta, input_c};
+ auto c_mult =
+ createOp<mir::ops::ElementwiseOp>(graph, mul_inputs, mir::ops::ElementwiseOp::OpType::mul)
+ ->getOutput(0);
+ assert(c_mult->getShape() == mult_a_b);
+ auto result = createOp<mir::ops::GemmOp>(graph, input_a, input_b, c_mult);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Gemm.h b/compiler/mir-onnx-importer/Op/Gemm.h
new file mode 100644
index 000000000..461ebfd3b
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Gemm.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class GemmNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/GivenTensorFill.cpp b/compiler/mir-onnx-importer/Op/GivenTensorFill.cpp
new file mode 100644
index 000000000..c3febc1b8
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/GivenTensorFill.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GivenTensorFill.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/TensorVariant.h"
+
+#include "mir/ops/ConstantOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+GivenTensorFillNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ auto values_att = findAttribute(onnx_node, "values");
+ auto shape_att = findAttribute(onnx_node, "shape");
+ assert(values_att && shape_att);
+ assert(values_att->floats_size() > 0 && shape_att->ints_size() > 0);
+ mir::Shape shape(shape_att->ints_size());
+ for (int i = 0; i < shape_att->ints_size(); i++)
+ shape.dim(i) = shape_att->ints(i);
+ mir::TensorVariant tensor(mir::DTYPE::FLOAT32, shape, values_att->floats().data());
+ // TODO Check right removing input_tensors
+ // input_tensors.insert(std::make_pair(onnx_node.output(0), tensor));
+ auto result = createOp<mir::ops::ConstantOp>(graph, tensor);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/GivenTensorFill.h b/compiler/mir-onnx-importer/Op/GivenTensorFill.h
new file mode 100644
index 000000000..806a4b6ed
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/GivenTensorFill.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class GivenTensorFillNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp b/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
new file mode 100644
index 000000000..d4f973643
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GlobalAveragePool.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/PoolOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+GlobalAveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ mir::ops::PoolOp::BorderType border_type = mir::ops::PoolOp::BorderType::ZEROFILLED;
+ mir::ops::PoolOp::PoolingType pool_type = mir::ops::PoolOp::PoolingType::AVG;
+
+ KernelStridesPadding cdata;
+ // Transpose ONNX NCHW to MIR NHWC
+ auto t_input = convertONNXToMIR(graph, inputs[0]);
+
+ // GlobalAveragePool is equivalent to AveragePool with kernel size equal
+ // to the spatial dimension of input tensor
+ cdata.kernel_shape = {t_input->getShape().dim(1), t_input->getShape().dim(2)};
+ cdata.strides_shape = {1, 1};
+
+ auto result =
+ createOp<mir::ops::PoolOp>(graph, t_input, pool_type, cdata.kernel_shape, cdata.strides_shape,
+ cdata.padding_before, cdata.padding_after, border_type);
+ return {convertMIRToONNX(graph, result->getOutput(0))};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/GlobalAveragePool.h b/compiler/mir-onnx-importer/Op/GlobalAveragePool.h
new file mode 100644
index 000000000..48cfa8e79
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/GlobalAveragePool.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class GlobalAveragePoolNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Max.cpp b/compiler/mir-onnx-importer/Op/Max.cpp
new file mode 100644
index 000000000..b32ca4343
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Max.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Max.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/ElementwiseOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+MaxNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ auto result =
+ createOp<mir::ops::ElementwiseOp>(graph, inputs, mir::ops::ElementwiseOp::OpType::max);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Max.h b/compiler/mir-onnx-importer/Op/Max.h
new file mode 100644
index 000000000..80797b63d
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Max.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class MaxNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/MaxPool.cpp b/compiler/mir-onnx-importer/Op/MaxPool.cpp
new file mode 100644
index 000000000..4d415e848
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/MaxPool.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MaxPool.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/PoolOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+MaxPoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ // TODO Set some asserts
+ mir::ops::PoolOp::BorderType border_type;
+ mir::ops::PoolOp::PoolingType pool_type;
+
+ KernelStridesPadding cdata;
+ // Transpose ONNX NCHW to MIR NHWC
+ auto t_input = convertONNXToMIR(graph, inputs[0]);
+
+ border_type = mir::ops::PoolOp::BorderType::EMPTY;
+ pool_type = mir::ops::PoolOp::PoolingType::MAX;
+ getKernelStridesPadding(onnx_node, cdata);
+
+ auto result =
+ createOp<mir::ops::PoolOp>(graph, t_input, pool_type, cdata.kernel_shape, cdata.strides_shape,
+ cdata.padding_before, cdata.padding_after, border_type);
+ return {convertMIRToONNX(graph, result->getOutput(0))};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/MaxPool.h b/compiler/mir-onnx-importer/Op/MaxPool.h
new file mode 100644
index 000000000..cf7058bfd
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/MaxPool.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class MaxPoolNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Mul.cpp b/compiler/mir-onnx-importer/Op/Mul.cpp
new file mode 100644
index 000000000..e09523325
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Mul.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Mul.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/ElementwiseOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+MulNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ auto result =
+ createOp<mir::ops::ElementwiseOp>(graph, inputs, mir::ops::ElementwiseOp::OpType::mul);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Mul.h b/compiler/mir-onnx-importer/Op/Mul.h
new file mode 100644
index 000000000..a25cf23e9
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Mul.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class MulNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Pad.cpp b/compiler/mir-onnx-importer/Op/Pad.cpp
new file mode 100644
index 000000000..c3d3a6860
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Pad.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Pad.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/PadOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+PadNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ bool found;
+ float value;
+ std::tie(found, value) = getFloatAttribute(onnx_node, "value");
+ assert(found);
+ auto padsAtt = findAttribute(onnx_node, "pads");
+ assert(padsAtt);
+ auto modeAtt = findAttribute(onnx_node, "mode");
+ assert(modeAtt);
+ auto mode = modeAtt->s();
+ const mir::Scalar scalar(reinterpret_cast<const char *>(&value), mir::DTYPE::FLOAT32,
+ sizeof(float));
+ assert(padsAtt->ints_size() > 0);
+ int axis_size = padsAtt->ints_size() / 2;
+ std::vector<std::pair<int32_t, int32_t>> vec(axis_size);
+ auto *data = padsAtt->ints().data();
+ for (int i = 0; i < axis_size; i++)
+ {
+ auto pair = std::make_pair(data[i], data[axis_size + i]);
+ vec[i] = pair;
+ }
+ auto result =
+ createOp<mir::ops::PadOp>(graph, inputs[0], inputs[0]->getShape().rank(), vec, scalar);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Pad.h b/compiler/mir-onnx-importer/Op/Pad.h
new file mode 100644
index 000000000..a2801afce
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Pad.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class PadNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Relu.cpp b/compiler/mir-onnx-importer/Op/Relu.cpp
new file mode 100644
index 000000000..500e449c0
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Relu.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Relu.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/ReluOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+ReluNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ assert(inputs.size() == 1);
+ auto result = createOp<mir::ops::ReluOp>(graph, inputs[0]);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Relu.h b/compiler/mir-onnx-importer/Op/Relu.h
new file mode 100644
index 000000000..4b2ee9e87
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Relu.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class ReluNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Reshape.cpp b/compiler/mir-onnx-importer/Op/Reshape.cpp
new file mode 100644
index 000000000..f764b3ed6
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Reshape.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Reshape.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/Tensor.h"
+
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/ReshapeOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+ReshapeNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ // The original shape
+ const auto &in_shape = inputs[0]->getShape();
+
+ // Input tensor describing the new shape
+ // TODO: could it be not a constant?
+ auto *op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
+ assert(op && "We support constants only");
+ auto shape_tensor = op->getValue();
+ mir::Shape shape_tensor_shape = (shape_tensor).getShape();
+ assert(shape_tensor_shape.rank() == 1);
+ // The rank of the new shape
+ auto cnt = shape_tensor_shape.numElements();
+ // The vector to build the new shape from
+ std::vector<int32_t> shape_vector(cnt);
+ mir::ShapeRange out_range(shape_tensor_shape);
+ mir::Tensor<int64_t> tensor_accessor(shape_tensor);
+
+ int i = 0;
+ for (auto idx : out_range)
+ {
+ if (tensor_accessor.at(idx) == 0)
+ shape_vector[i] = in_shape.dim(i);
+ else if (tensor_accessor.at(idx) == -1)
+ shape_vector[i] = mir::Shape::autoDim;
+ else
+ shape_vector[i] = tensor_accessor.at(idx);
+ i++;
+ }
+ auto out_shape = mir::Shape(shape_vector);
+ auto result = createOp<mir::ops::ReshapeOp>(graph, inputs[0], out_shape);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Reshape.h b/compiler/mir-onnx-importer/Op/Reshape.h
new file mode 100644
index 000000000..c8558d8d9
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Reshape.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class ReshapeNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Scale.cpp b/compiler/mir-onnx-importer/Op/Scale.cpp
new file mode 100644
index 000000000..f888a530f
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Scale.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Scale.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/ScaleOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+ScaleNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ bool found;
+ float value;
+ std::tie(found, value) = getFloatAttribute(onnx_node, "scale");
+ float scale_val = found ? value : 1.0;
+ const auto &shape = inputs[0]->getShape();
+ auto scale_tensor = createScalarTensor(scale_val, shape);
+ auto scale = createOp<mir::ops::ConstantOp>(graph, scale_tensor)->getOutput(0);
+ auto result = createOp<mir::ops::ScaleOp>(graph, inputs[0], scale);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Scale.h b/compiler/mir-onnx-importer/Op/Scale.h
new file mode 100644
index 000000000..55c2e3a09
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Scale.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class ScaleNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Shape.cpp b/compiler/mir-onnx-importer/Op/Shape.cpp
new file mode 100644
index 000000000..7344d4511
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Shape.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Shape.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/TensorVariant.h"
+
+#include "mir/ops/ConstantOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+ShapeNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ const auto &input_shape = inputs[0]->getShape();
+ int size = input_shape.rank();
+ mir::Shape output_shape{size};
+ std::vector<float> data(static_cast<std::size_t>(size));
+ for (int i = 0; i < size; i++)
+ {
+ data[i] = input_shape.dim(i);
+ }
+ mir::TensorVariant tensor(mir::DTYPE::FLOAT32, output_shape, data.data());
+ auto result = createOp<mir::ops::ConstantOp>(graph, tensor);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Shape.h b/compiler/mir-onnx-importer/Op/Shape.h
new file mode 100644
index 000000000..52ab97ff1
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Shape.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class ShapeNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Sigmoid.cpp b/compiler/mir-onnx-importer/Op/Sigmoid.cpp
new file mode 100644
index 000000000..e537b07d6
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sigmoid.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sigmoid.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/SigmoidOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+SigmoidNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ assert(inputs.size() == 1);
+ auto result = createOp<mir::ops::SigmoidOp>(graph, inputs[0]);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Sigmoid.h b/compiler/mir-onnx-importer/Op/Sigmoid.h
new file mode 100644
index 000000000..b738c232b
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sigmoid.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class SigmoidNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Softmax.cpp b/compiler/mir-onnx-importer/Op/Softmax.cpp
new file mode 100644
index 000000000..7fc338de3
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Softmax.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Softmax.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/SoftmaxOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+SoftmaxNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ int axis;
+ bool found;
+ std::tie(found, axis) = getIntAttribute(onnx_node);
+ axis = found ? axis : 1;
+ auto result = createOp<mir::ops::SoftmaxOp>(graph, inputs[0], axis);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Softmax.h b/compiler/mir-onnx-importer/Op/Softmax.h
new file mode 100644
index 000000000..4600ee740
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Softmax.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class SoftmaxNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Sum.cpp b/compiler/mir-onnx-importer/Op/Sum.cpp
new file mode 100644
index 000000000..d2fa94c59
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sum.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sum.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/ElementwiseOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+SumNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ auto result =
+ createOp<mir::ops::ElementwiseOp>(graph, inputs, mir::ops::ElementwiseOp::OpType::add);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Sum.h b/compiler/mir-onnx-importer/Op/Sum.h
new file mode 100644
index 000000000..a9c64b815
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sum.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class SumNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Unsqueeze.cpp b/compiler/mir-onnx-importer/Op/Unsqueeze.cpp
new file mode 100644
index 000000000..9487b9052
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Unsqueeze.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Unsqueeze.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/ReshapeOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+UnsqueezeNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ auto *axes = findAttribute(onnx_node, "axes");
+ assert(axes && axes->ints_size());
+ const mir::Shape &input_shape = inputs[0]->getShape();
+ const int out_rank = input_shape.rank() + axes->ints_size();
+ mir::Shape out_shape(out_rank);
+ auto ints_iterator = axes->ints().begin();
+ int j = 0;
+ for (int i = 0; i < out_rank; i++)
+ {
+ if (ints_iterator < axes->ints().end() && i == *ints_iterator)
+ {
+ out_shape.dim(i) = 1;
+ ints_iterator++;
+ }
+ else
+ {
+ out_shape.dim(i) = input_shape.dim(j);
+ j++;
+ }
+ }
+ auto result = createOp<mir::ops::ReshapeOp>(graph, inputs[0], out_shape);
+ return {result->getOutput(0)};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Unsqueeze.h b/compiler/mir-onnx-importer/Op/Unsqueeze.h
new file mode 100644
index 000000000..c9eae0b13
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Unsqueeze.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class UnsqueezeNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Upsample.cpp b/compiler/mir-onnx-importer/Op/Upsample.cpp
new file mode 100644
index 000000000..4353c3ad5
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Upsample.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Upsample.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/Tensor.h"
+
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/ResizeOp.h"
+
+namespace mir_onnx
+{
+
+std::vector<mir::Operation::Output *>
+UpsampleNodeConverter::convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const
+{
+ bool success;
+ std::string mode;
+ std::tie(success, mode) = getStringAttribute(onnx_node, "mode");
+ if (!success)
+ mode = "nearest";
+ assert(mode == "nearest" && "Unsupported upscale mode!");
+
+ // relies on attributes being lifted to constants (ONNX optimization pass)
+ assert(inputs.size() > 1);
+ auto *scales = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
+ assert(scales && "Weights could be a constant tensor only");
+ auto scales_tensor = mir::Tensor<float>(scales->getValue());
+ int rank = inputs[0]->getShape().rank();
+ assert(scales_tensor.getShape().numElements() == rank &&
+ "The number of elements of 'scales' should be the same as the rank of input 'X'");
+ assert(rank == 4 && "Only rank 4 is supported");
+ std::vector<float> scales_vector(4);
+ const int onnx2mir[] = {0, 3, 1, 2};
+ assert(scales_tensor.getShape().rank() == 1 && "Scales are a 1d tensor");
+ for (int i = 0; i < scales_tensor.getShape().numElements(); i++)
+ scales_vector[onnx2mir[i]] = scales_tensor.atOffset(i);
+ return {convertMIRToONNX(
+ graph,
+ createOp<mir::ops::ResizeOp>(graph, convertONNXToMIR(graph, inputs[0]),
+ mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0))};
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Upsample.h b/compiler/mir-onnx-importer/Op/Upsample.h
new file mode 100644
index 000000000..9c2d2a54e
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Upsample.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+class UpsampleNodeConverter : public NodeConverter
+{
+public:
+ std::vector<mir::Operation::Output *> convert(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &inputs,
+ mir::Graph *graph) const override;
+};
+
+} // namespace mir_onnx