summaryrefslogtreecommitdiff
path: root/compiler/mir-onnx-importer
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-04-23 14:45:49 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-04-23 14:45:49 +0900
commite2ef8438a24f7c56a0744eb579a6e293ee2fbf8e (patch)
tree44a1a7951d168dd4370e13593ed03f4bc6d920c5 /compiler/mir-onnx-importer
parent302e6564a7a76109e1178207e44e45a58631c477 (diff)
downloadnnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.gz
nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.bz2
nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.zip
Imported Upstream version 1.4.0upstream/1.4.0submit/tizen/20200423.054851
Diffstat (limited to 'compiler/mir-onnx-importer')
-rw-r--r--compiler/mir-onnx-importer/AttributeHelpers.h105
-rw-r--r--compiler/mir-onnx-importer/CMakeLists.txt119
-rw-r--r--compiler/mir-onnx-importer/ConvPoolHelpers.cpp113
-rw-r--r--compiler/mir-onnx-importer/ConvPoolHelpers.h44
-rw-r--r--compiler/mir-onnx-importer/ONNXHelpers.cpp188
-rw-r--r--compiler/mir-onnx-importer/ONNXHelpers.h50
-rw-r--r--compiler/mir-onnx-importer/ONNXImporterImpl.cpp241
-rw-r--r--compiler/mir-onnx-importer/ONNXImporterImpl.h35
-rw-r--r--compiler/mir-onnx-importer/ONNXNodeConverterRegistry.cpp142
-rw-r--r--compiler/mir-onnx-importer/ONNXNodeConverterRegistry.h80
-rw-r--r--compiler/mir-onnx-importer/ONNXNodeConverterRegistry.test.cpp64
-rw-r--r--compiler/mir-onnx-importer/ONNXOpRegistration.h256
-rw-r--r--compiler/mir-onnx-importer/Op/Abs.cpp47
-rw-r--r--compiler/mir-onnx-importer/Op/Abs.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Add.cpp53
-rw-r--r--compiler/mir-onnx-importer/Op/Add.h31
-rw-r--r--compiler/mir-onnx-importer/Op/AveragePool.cpp99
-rw-r--r--compiler/mir-onnx-importer/Op/AveragePool.h31
-rw-r--r--compiler/mir-onnx-importer/Op/BatchNormalization.cpp119
-rw-r--r--compiler/mir-onnx-importer/Op/BatchNormalization.h32
-rw-r--r--compiler/mir-onnx-importer/Op/Concat.cpp54
-rw-r--r--compiler/mir-onnx-importer/Op/Concat.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Constant.cpp61
-rw-r--r--compiler/mir-onnx-importer/Op/Constant.h31
-rw-r--r--compiler/mir-onnx-importer/Op/Conv.cpp156
-rw-r--r--compiler/mir-onnx-importer/Op/Conv.h29
-rw-r--r--compiler/mir-onnx-importer/Op/ConvTranspose.cpp138
-rw-r--r--compiler/mir-onnx-importer/Op/ConvTranspose.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Div.cpp38
-rw-r--r--compiler/mir-onnx-importer/Op/Div.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Dropout.cpp54
-rw-r--r--compiler/mir-onnx-importer/Op/Dropout.h32
-rw-r--r--compiler/mir-onnx-importer/Op/Equal.cpp43
-rw-r--r--compiler/mir-onnx-importer/Op/Equal.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Expand.cpp43
-rw-r--r--compiler/mir-onnx-importer/Op/Expand.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Flatten.cpp58
-rw-r--r--compiler/mir-onnx-importer/Op/Flatten.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Gather.cpp40
-rw-r--r--compiler/mir-onnx-importer/Op/Gather.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Gemm.cpp120
-rw-r--r--compiler/mir-onnx-importer/Op/Gemm.h33
-rw-r--r--compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp50
-rw-r--r--compiler/mir-onnx-importer/Op/GlobalAveragePool.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Greater.cpp47
-rw-r--r--compiler/mir-onnx-importer/Op/Greater.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Identity.cpp30
-rw-r--r--compiler/mir-onnx-importer/Op/Identity.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Less.cpp47
-rw-r--r--compiler/mir-onnx-importer/Op/Less.h30
-rw-r--r--compiler/mir-onnx-importer/Op/MatMul.cpp50
-rw-r--r--compiler/mir-onnx-importer/Op/MatMul.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Max.cpp54
-rw-r--r--compiler/mir-onnx-importer/Op/Max.h31
-rw-r--r--compiler/mir-onnx-importer/Op/MaxPool.cpp107
-rw-r--r--compiler/mir-onnx-importer/Op/MaxPool.h31
-rw-r--r--compiler/mir-onnx-importer/Op/Mul.cpp35
-rw-r--r--compiler/mir-onnx-importer/Op/Mul.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Pad.cpp70
-rw-r--r--compiler/mir-onnx-importer/Op/Pad.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Reciprocal.cpp53
-rw-r--r--compiler/mir-onnx-importer/Op/Reciprocal.h30
-rw-r--r--compiler/mir-onnx-importer/Op/ReduceMean.cpp60
-rw-r--r--compiler/mir-onnx-importer/Op/ReduceMean.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Relu.cpp46
-rw-r--r--compiler/mir-onnx-importer/Op/Relu.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Reshape.cpp97
-rw-r--r--compiler/mir-onnx-importer/Op/Reshape.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Shape.cpp46
-rw-r--r--compiler/mir-onnx-importer/Op/Shape.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Sigmoid.cpp46
-rw-r--r--compiler/mir-onnx-importer/Op/Sigmoid.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Softmax.cpp40
-rw-r--r--compiler/mir-onnx-importer/Op/Softmax.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Sqrt.cpp46
-rw-r--r--compiler/mir-onnx-importer/Op/Sqrt.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Sub.cpp53
-rw-r--r--compiler/mir-onnx-importer/Op/Sub.h31
-rw-r--r--compiler/mir-onnx-importer/Op/Sum.cpp41
-rw-r--r--compiler/mir-onnx-importer/Op/Sum.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Tanh.cpp46
-rw-r--r--compiler/mir-onnx-importer/Op/Tanh.h30
-rw-r--r--compiler/mir-onnx-importer/Op/Transpose.cpp57
-rw-r--r--compiler/mir-onnx-importer/Op/Transpose.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Unsqueeze.cpp56
-rw-r--r--compiler/mir-onnx-importer/Op/Unsqueeze.h29
-rw-r--r--compiler/mir-onnx-importer/Op/Upsample.cpp124
-rw-r--r--compiler/mir-onnx-importer/Op/Upsample.h31
-rw-r--r--compiler/mir-onnx-importer/requires.cmake2
89 files changed, 5003 insertions, 0 deletions
diff --git a/compiler/mir-onnx-importer/AttributeHelpers.h b/compiler/mir-onnx-importer/AttributeHelpers.h
new file mode 100644
index 000000000..d5cc1501a
--- /dev/null
+++ b/compiler/mir-onnx-importer/AttributeHelpers.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_ATTRIBUTE_HELPERS_H
+#define MIR_ONNX_ATTRIBUTE_HELPERS_H
+
+#include "onnx/onnx.pb.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace mir_onnx
+{
+
+template <typename T> T getAttributeValue(const onnx::AttributeProto &attribute) = delete;
+
+template <> inline float getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::FLOAT);
+ return attribute.f();
+}
+
+template <> inline std::int64_t getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::INT);
+ return attribute.i();
+}
+
+template <> inline std::string getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::STRING);
+ return attribute.s();
+}
+
+template <> inline onnx::TensorProto getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::TENSOR);
+ return attribute.t();
+}
+
+template <>
+inline std::vector<std::int32_t> getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::INTS);
+ // TODO Check that values fit.
+ return {attribute.ints().cbegin(), attribute.ints().cend()};
+}
+
+template <>
+inline std::vector<std::int64_t> getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::INTS);
+ return {attribute.ints().cbegin(), attribute.ints().cend()};
+}
+
+inline const onnx::AttributeProto *findAttribute(const onnx::NodeProto &node,
+ const std::string &name)
+{
+ const auto &attributes = node.attribute();
+ const auto it = std::find_if(
+ attributes.cbegin(), attributes.cend(),
+ [&name](const onnx::AttributeProto &attribute) { return attribute.name() == name; });
+ if (it == attributes.cend())
+ return nullptr;
+ return &*it;
+}
+
+template <typename T> T getAttributeValue(const onnx::NodeProto &node, const std::string &name)
+{
+ const auto *attribute = findAttribute(node, name);
+ if (attribute == nullptr)
+ throw std::runtime_error("Cannot find attribute '" + name + "' in node '" + node.name() + "'.");
+ return getAttributeValue<T>(*attribute);
+}
+
+template <typename T>
+T getAttributeValue(const onnx::NodeProto &node, const std::string &name, T default_value)
+{
+ const auto *attribute = findAttribute(node, name);
+ if (attribute == nullptr)
+ return std::move(default_value);
+ return getAttributeValue<T>(*attribute);
+}
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_ATTRIBUTE_HELPERS_H
diff --git a/compiler/mir-onnx-importer/CMakeLists.txt b/compiler/mir-onnx-importer/CMakeLists.txt
new file mode 100644
index 000000000..5f27bc041
--- /dev/null
+++ b/compiler/mir-onnx-importer/CMakeLists.txt
@@ -0,0 +1,119 @@
+nnas_find_package(ONNXSource EXACT 1.6.0 QUIET)
+nnas_find_package(Protobuf QUIET)
+
+if (NOT ONNXSource_FOUND)
+ return()
+endif ()
+
+if (NOT Protobuf_FOUND)
+ return()
+endif ()
+
+Protobuf_Generate(MIR_ONNX_PROTO
+ ${CMAKE_CURRENT_BINARY_DIR}/generated
+ ${ONNXSource_DIR}
+ onnx/onnx.proto)
+
+add_library(mir_onnx_proto STATIC ${MIR_ONNX_PROTO_SOURCES})
+set_target_properties(mir_onnx_proto PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(mir_onnx_proto PUBLIC ${MIR_ONNX_PROTO_INCLUDE_DIRS})
+target_link_libraries(mir_onnx_proto PUBLIC libprotobuf)
+
+set(MIR_ONNX_IMPORTER_SOURCES
+ AttributeHelpers.h
+ ConvPoolHelpers.cpp
+ ConvPoolHelpers.h
+ ONNXHelpers.cpp
+ ONNXHelpers.h
+ ONNXImporterImpl.cpp
+ ONNXImporterImpl.h
+ ONNXNodeConverterRegistry.h
+ ONNXNodeConverterRegistry.cpp
+ ONNXOpRegistration.h
+ Op/Abs.cpp
+ Op/Abs.h
+ Op/Add.cpp
+ Op/Add.h
+ Op/AveragePool.cpp
+ Op/AveragePool.h
+ Op/BatchNormalization.cpp
+ Op/BatchNormalization.h
+ Op/Concat.cpp
+ Op/Concat.h
+ Op/Constant.cpp
+ Op/Constant.h
+ Op/Conv.cpp
+ Op/Conv.h
+ Op/ConvTranspose.cpp
+ Op/ConvTranspose.h
+ Op/Div.cpp
+ Op/Div.h
+ Op/Dropout.cpp
+ Op/Dropout.h
+ Op/Equal.cpp
+ Op/Equal.h
+ Op/Expand.cpp
+ Op/Expand.h
+ Op/Flatten.cpp
+ Op/Flatten.h
+ Op/Gather.cpp
+ Op/Gather.h
+ Op/Greater.cpp
+ Op/Greater.h
+ Op/Gemm.cpp
+ Op/Gemm.h
+ Op/Identity.cpp
+ Op/Identity.h
+ Op/Less.cpp
+ Op/Less.h
+ Op/MatMul.cpp
+ Op/MatMul.h
+ Op/GlobalAveragePool.cpp
+ Op/GlobalAveragePool.h
+ Op/Max.cpp
+ Op/Max.h
+ Op/MaxPool.cpp
+ Op/MaxPool.h
+ Op/Mul.cpp
+ Op/Mul.h
+ Op/Pad.cpp
+ Op/Pad.h
+ Op/Reciprocal.cpp
+ Op/Reciprocal.h
+ Op/ReduceMean.cpp
+ Op/ReduceMean.h
+ Op/Relu.cpp
+ Op/Relu.h
+ Op/Reshape.cpp
+ Op/Reshape.h
+ Op/Shape.cpp
+ Op/Shape.h
+ Op/Sigmoid.cpp
+ Op/Sigmoid.h
+ Op/Softmax.cpp
+ Op/Softmax.h
+ Op/Sqrt.cpp
+ Op/Sqrt.h
+ Op/Sub.cpp
+ Op/Sub.h
+ Op/Sum.cpp
+ Op/Sum.h
+ Op/Tanh.cpp
+ Op/Tanh.h
+ Op/Transpose.cpp
+ Op/Transpose.h
+ Op/Unsqueeze.cpp
+ Op/Unsqueeze.h
+ Op/Upsample.cpp
+ Op/Upsample.h)
+
+add_library(mir_onnx_importer STATIC ${MIR_ONNX_IMPORTER_SOURCES})
+set_target_properties(mir_onnx_importer PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(mir_onnx_importer PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
+target_link_libraries(mir_onnx_importer PUBLIC mir_onnx_proto mir PRIVATE stdex mir_interpreter)
+
+nnas_find_package(GTest REQUIRED)
+
+file(GLOB_RECURSE TEST_SOURCES "*.test.cpp")
+GTest_AddTest(mir_onnx_importer_test ${TEST_SOURCES})
+target_link_libraries(mir_onnx_importer_test mir_onnx_importer)
diff --git a/compiler/mir-onnx-importer/ConvPoolHelpers.cpp b/compiler/mir-onnx-importer/ConvPoolHelpers.cpp
new file mode 100644
index 000000000..d98e6deae
--- /dev/null
+++ b/compiler/mir-onnx-importer/ConvPoolHelpers.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConvPoolHelpers.h"
+
+#include <algorithm>
+#include <cassert>
+
+namespace mir_onnx
+{
+
+void inferAutoPadding(const std::string &pad_type, const mir::Shape &input_shape,
+ const std::vector<std::int32_t> &dilations,
+ const std::vector<std::int32_t> &strides,
+ const std::vector<std::int32_t> &window_size,
+ std::vector<std::int32_t> &padding_before,
+ std::vector<std::int32_t> &padding_after)
+{
+ constexpr int num_spatial_dims = 2;
+
+ if (pad_type == "NOTSET")
+ {
+ // Do nothing.
+ }
+ else if (pad_type == "VALID")
+ {
+ padding_before.assign(num_spatial_dims, 0);
+ padding_after.assign(num_spatial_dims, 0);
+ }
+ else
+ {
+ padding_before.resize(num_spatial_dims);
+ padding_after.resize(num_spatial_dims);
+
+ assert(dilations.size() == num_spatial_dims);
+ assert(strides.size() == num_spatial_dims);
+ assert(window_size.size() == num_spatial_dims);
+
+ for (int i = 0; i < num_spatial_dims; ++i)
+ {
+ const std::int32_t eff_window_size = (window_size[i] - 1) * dilations[i] + 1;
+ // Assuming input has NCHW format.
+ const std::int32_t residual = input_shape.dim(2 + i) % strides[i];
+ const std::int32_t total_pad = std::max(
+ INT32_C(0), residual == 0 ? eff_window_size - strides[i] : eff_window_size - residual);
+ if (pad_type == "SAME_UPPER")
+ {
+ padding_before[i] = total_pad / 2;
+ padding_after[i] = (total_pad + 1) / 2;
+ }
+ else
+ {
+ assert(pad_type == "SAME_LOWER");
+ padding_before[i] = (total_pad + 1) / 2;
+ padding_after[i] = total_pad / 2;
+ }
+ }
+ }
+}
+
+std::vector<std::int32_t> fixPads(const mir::Shape &input_shape,
+ const std::vector<std::int32_t> &pads,
+ const std::vector<std::int32_t> &strides,
+ const std::vector<std::int32_t> &dilation,
+ const std::vector<std::int32_t> &kernel_shape)
+{
+ assert(pads.size() % 2 == 0);
+ int spatial_dimensions = pads.size() / 2;
+ std::vector<std::int32_t> fixed_pads(pads);
+ for (int i = 0; i < spatial_dimensions; ++i)
+ {
+ auto effective_window_dim = (kernel_shape[i] - 1) * dilation[i] + 1;
+ auto effective_input_dim = input_shape.dim(i + 2) + pads[i] + pads[i + spatial_dimensions];
+ // Computing number of "redundant" elements at the end of input dimension
+ // for example we have effective_input_dim == 8, effective_window)dim == 3 and stride == 2:
+ // [1][2][3][4][5][6][7][8] - input
+ // * * * . . . . - first kernel application
+ // . . * * * . . - second kernel application
+ // . . . . * * * - third kernel application
+ // element 8 is unused (remainder should be 1)
+ //
+ // glossary:
+ // i - effective input size
+ // w - effective window size
+ // s - stride
+ // n - number of kernel applications (3 in example)
+ //
+ // i = s * (n-1) + w + r
+ // r = i - w - s * (n-1)
+ // n - is the maximum number of windows we can fit into input, so this formula is equal to
+ // r = (i - w) % s
+ auto remainder = (effective_input_dim - effective_window_dim) % strides[i];
+
+ // remove redundant pad, but no more than there are padding
+ fixed_pads[i + spatial_dimensions] -= std::min(remainder, pads[i + spatial_dimensions]);
+ }
+ return fixed_pads;
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/ConvPoolHelpers.h b/compiler/mir-onnx-importer/ConvPoolHelpers.h
new file mode 100644
index 000000000..099392f4f
--- /dev/null
+++ b/compiler/mir-onnx-importer/ConvPoolHelpers.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_CONV_POOL_HELPERS_H
+#define MIR_ONNX_CONV_POOL_HELPERS_H
+
+#include "mir/Shape.h"
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+namespace mir_onnx
+{
+
+void inferAutoPadding(const std::string &pad_type, const mir::Shape &input_shape,
+ const std::vector<std::int32_t> &dilations,
+ const std::vector<std::int32_t> &strides,
+ const std::vector<std::int32_t> &window_size,
+ std::vector<std::int32_t> &padding_before,
+ std::vector<std::int32_t> &padding_after);
+
+std::vector<std::int32_t> fixPads(const mir::Shape &input_shape,
+ const std::vector<std::int32_t> &pads,
+ const std::vector<std::int32_t> &strides,
+ const std::vector<std::int32_t> &dilation,
+ const std::vector<std::int32_t> &kernel_shape);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_CONV_POOL_HELPERS_H
diff --git a/compiler/mir-onnx-importer/ONNXHelpers.cpp b/compiler/mir-onnx-importer/ONNXHelpers.cpp
new file mode 100644
index 000000000..f3a9d182d
--- /dev/null
+++ b/compiler/mir-onnx-importer/ONNXHelpers.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "MirInterpreter.h"
+#include "mir/ops/ConstantOp.h"
+
+#include "mir/ShapeRange.h"
+#include "mir/Tensor.h"
+#include "mir/TensorVariant.h"
+#include "mir/Index.h"
+
+namespace mir_onnx
+{
+
+const int64_t firstUnknownOpset = 13;
+
+template <typename T> static mir::Shape constantToShapeT(const mir::TensorVariant &t)
+{
+ const mir::Shape &t_shape = t.getShape();
+ mir::Tensor<T> input(t);
+ if (t_shape.rank() != 1)
+ throw std::runtime_error("only 1-d tensors supported as a shape input");
+
+ mir::Shape target_shape;
+ std::int32_t rank = t_shape.dim(0);
+ target_shape.resize(rank);
+ for (int i = 0; i < rank; ++i)
+ target_shape.dim(i) = static_cast<std::int32_t>(input.at(mir::Index{i}));
+ return target_shape;
+}
+
+mir::Shape constantToShape(const mir::ops::ConstantOp *op)
+{
+ const auto &t = op->getValue();
+ mir::DataType d_type = t.getElementType();
+
+ if (t.getType().isQuantized())
+ throw std::runtime_error("unsupported data type of shape operator");
+
+ switch (d_type)
+ {
+ case mir::DataType::FLOAT32:
+ return constantToShapeT<float>(t);
+ break;
+ case mir::DataType::FLOAT64:
+ return constantToShapeT<double>(t);
+ break;
+ case mir::DataType::INT32:
+ return constantToShapeT<int32_t>(t);
+ break;
+ case mir::DataType::INT64:
+ return constantToShapeT<int64_t>(t);
+ break;
+ case mir::DataType::UINT8:
+ return constantToShapeT<uint8_t>(t);
+ break;
+ default:
+ throw std::runtime_error{"Unknown datatype in constant"};
+ break;
+ }
+}
+
+mir::DataType onnxDataTypeToMirDataType(onnx::TensorProto::DataType type)
+{
+ switch (type)
+ {
+ case onnx::TensorProto_DataType_UINT8:
+ return mir::DataType::UINT8;
+ break;
+ case onnx::TensorProto_DataType_INT32:
+ return mir::DataType::INT32;
+ break;
+ case onnx::TensorProto_DataType_INT64:
+ return mir::DataType::INT64;
+ break;
+ case onnx::TensorProto_DataType_DOUBLE:
+ return mir::DataType::FLOAT64;
+ break;
+ case onnx::TensorProto_DataType_FLOAT:
+ return mir::DataType::FLOAT32;
+ break;
+ case onnx::TensorProto_DataType_UNDEFINED:
+ throw std::runtime_error{"Undefined input data type not supported"};
+ break;
+ default:
+ throw std::runtime_error{"Unsupported tensor element data type"};
+ }
+}
+
+mir::TensorVariant createTensor(const onnx::TensorProto *tensor)
+{
+ mir::DataType type;
+ const void *src_data;
+ mir::Shape shape(tensor->dims_size());
+ for (int i = 0; i < tensor->dims_size(); ++i)
+ {
+ shape.dim(i) = tensor->dims(i);
+ }
+
+ if (tensor->float_data_size() != 0)
+ {
+ assert(tensor->data_type() == onnx::TensorProto::FLOAT);
+ type = mir::DataType::FLOAT32;
+ src_data = tensor->float_data().data();
+ }
+ else if (tensor->double_data_size() != 0)
+ {
+ assert(tensor->data_type() == onnx::TensorProto::DOUBLE);
+ type = mir::DataType::FLOAT64;
+ src_data = tensor->double_data().data();
+ }
+ else if (tensor->int32_data_size() != 0)
+ {
+ assert(tensor->data_type() == onnx::TensorProto::INT32);
+ type = mir::DataType::INT32;
+ src_data = tensor->int32_data().data();
+ }
+ else if (tensor->int64_data_size() != 0)
+ {
+ assert(tensor->data_type() == onnx::TensorProto::INT64);
+ type = mir::DataType::INT64;
+ src_data = tensor->int64_data().data();
+ }
+ else if (tensor->has_raw_data())
+ {
+ type = onnxDataTypeToMirDataType((onnx::TensorProto_DataType)tensor->data_type());
+ src_data = tensor->raw_data().data();
+ }
+ else
+ {
+ throw std::runtime_error("Invalid data in Proto file, investigate");
+ }
+
+ return mir::TensorVariant({type, shape}, src_data);
+}
+
+mir::Operation *foldConstants(mir::Graph *graph, mir::Operation *op)
+{
+ if (op->getType() == mir::Operation::Type::constant ||
+ op->getType() == mir::Operation::Type::input || op->getType() == mir::Operation::Type::output)
+ {
+ // don't fold input, output and constant nodes
+ return op;
+ }
+
+ if (op->getNumOutputs() != 1)
+ {
+ // this operation either have more than 1 output or none at all
+ return op;
+ }
+
+ bool is_foldable =
+ std::all_of(op->getInputs().begin(), op->getInputs().end(), [](mir::Operation::Output *out) {
+ return out->getNode()->getType() == mir::Operation::Type::constant;
+ });
+
+ if (!is_foldable)
+ return op;
+
+ mir_interpreter::MIRInterpreter interpreter;
+ for (mir::Operation::Output *out : op->getInputs())
+ {
+ auto *constant = static_cast<mir::ops::ConstantOp *>(out->getNode());
+ interpreter.setTensor(out, constant->getValue());
+ }
+ op->accept(&interpreter);
+ const mir::TensorVariant &output = interpreter.getTensor(op->getOutput(0));
+
+ return graph->create<mir::ops::ConstantOp>(output);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/ONNXHelpers.h b/compiler/mir-onnx-importer/ONNXHelpers.h
new file mode 100644
index 000000000..1367ab82a
--- /dev/null
+++ b/compiler/mir-onnx-importer/ONNXHelpers.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MIR_ONNX_HELPERS_H__
+#define __MIR_ONNX_HELPERS_H__
+
+#include "mir/Graph.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/TensorVariant.h"
+#include "mir/ops/TransposeOp.h"
+
+#include "onnx/onnx.pb.h"
+
+namespace mir_onnx
+{
+
+extern const int64_t firstUnknownOpset;
+
+mir::DataType onnxDataTypeToMirDataType(onnx::TensorProto::DataType type);
+
+mir::Shape constantToShape(const mir::ops::ConstantOp *op);
+
+mir::TensorVariant createTensor(const onnx::TensorProto *tensor);
+
+mir::Operation *foldConstants(mir::Graph *graph, mir::Operation *op);
+
+template <typename OpType, typename... Types>
+mir::Operation *createOp(mir::Graph *graph, Types &&... args)
+{
+ auto op = graph->create<OpType>(std::forward<Types>(args)...);
+ op = foldConstants(graph, op);
+ return op;
+}
+
+} // namespace mir_onnx
+
+#endif // __MIR_ONNX_HELPERS_H__
diff --git a/compiler/mir-onnx-importer/ONNXImporterImpl.cpp b/compiler/mir-onnx-importer/ONNXImporterImpl.cpp
new file mode 100644
index 000000000..c33104198
--- /dev/null
+++ b/compiler/mir-onnx-importer/ONNXImporterImpl.cpp
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXImporterImpl.h"
+#include "ONNXHelpers.h"
+#include "ONNXOpRegistration.h"
+#include "onnx/onnx.pb.h"
+
+#include "mir/Shape.h"
+#include "mir/TensorUtil.h"
+
+#include "mir/ops/ConstantOp.h"
+
+#include <fcntl.h>
+
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/text_format.h>
+#include <functional>
+#include <iostream>
+#include <stdex/Memory.h>
+#include <utility>
+
+namespace mir_onnx
+{
+
+namespace
+{
+
+class ONNXImporterImpl final
+{
+public:
+ ONNXImporterImpl();
+ ~ONNXImporterImpl();
+ /// @brief Load the model and convert it into a MIR Graph.
+ std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename);
+ std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename);
+
+private:
+ std::unique_ptr<mir::Graph> createIR();
+ void createGraphInputs();
+ void collectUnsupportedOps();
+ std::unique_ptr<onnx::ModelProto> _model;
+ std::unique_ptr<ConverterContext> _converterCtx;
+ std::unique_ptr<ModelContext> _modelCtx;
+ std::unique_ptr<mir::Graph> _graph;
+};
+
+ONNXImporterImpl::ONNXImporterImpl() { registerSupportedOps(); }
+
+ONNXImporterImpl::~ONNXImporterImpl() = default;
+
+void loadModelFromBinaryFile(const std::string &filename, onnx::ModelProto *model)
+{
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ int file_handle = open(filename.c_str(), O_RDONLY);
+
+ if (file_handle == -1)
+ throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
+ ".");
+
+ google::protobuf::io::FileInputStream file_stream(file_handle);
+ file_stream.SetCloseOnDelete(true);
+
+ google::protobuf::io::CodedInputStream coded_stream(&file_stream);
+ coded_stream.SetTotalBytesLimit(INT_MAX, INT_MAX);
+
+ if (!model->ParseFromCodedStream(&coded_stream))
+ throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
+
+ // If the file has not been consumed entirely, assume that the file is in the wrong format.
+ if (!coded_stream.ConsumedEntireMessage())
+ throw std::runtime_error("File \"" + filename + "\" has not been consumed entirely.");
+}
+
+void loadModelFromTextFile(const std::string &filename, onnx::ModelProto *model)
+{
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ int file_handle = open(filename.c_str(), O_RDONLY);
+
+ if (file_handle == -1)
+ throw std::runtime_error("Couldn't open file \"" + filename + "\": " + std::strerror(errno) +
+ ".");
+
+ google::protobuf::io::FileInputStream file_stream(file_handle);
+ file_stream.SetCloseOnDelete(true);
+
+ if (!google::protobuf::TextFormat::Parse(&file_stream, model))
+ throw std::runtime_error("Couldn't parse file \"" + filename + "\".");
+}
+
+std::unique_ptr<mir::Graph> ONNXImporterImpl::importModelFromBinaryFile(const std::string &filename)
+{
+ _model = stdex::make_unique<onnx::ModelProto>();
+ loadModelFromBinaryFile(filename, _model.get());
+ _modelCtx = stdex::make_unique<ModelContext>(_model.get());
+ collectUnsupportedOps();
+ return createIR();
+}
+
+std::unique_ptr<mir::Graph> ONNXImporterImpl::importModelFromTextFile(const std::string &filename)
+{
+ _model = stdex::make_unique<onnx::ModelProto>();
+ loadModelFromTextFile(filename, _model.get());
+ _modelCtx = stdex::make_unique<ModelContext>(_model.get());
+ collectUnsupportedOps();
+ return createIR();
+}
+
+void ONNXImporterImpl::collectUnsupportedOps()
+{
+ std::set<std::pair<std::string, int64_t>> problems_op_set;
+
+ for (int i = 0; i < _model->graph().node_size(); i++)
+ {
+ const auto &onnx_node = _model->graph().node(i);
+ assert(onnx_node.has_op_type());
+ const auto &op_type = onnx_node.op_type();
+ auto opset = _modelCtx->getDomainOpsetVersion(onnx_node.domain());
+
+ NodeConverterRegistry::ConverterFunc converter =
+ NodeConverterRegistry::getInstance().lookup(op_type, opset);
+
+ if (converter == nullptr)
+ problems_op_set.emplace(op_type, opset);
+ }
+ if (!problems_op_set.empty())
+ {
+ std::cerr << "The following operators are not supported:\n";
+ for (const auto &op : problems_op_set)
+ std::cerr << op.first << " opset " << op.second << std::endl;
+ throw std::runtime_error("Unsupported operators found");
+ }
+}
+
+void ONNXImporterImpl::createGraphInputs()
+{
+ const auto &graph = _model->graph();
+ const auto &initializer = graph.initializer();
+ const auto &value_info = graph.value_info();
+
+ // Create all initializer Tensors
+ for (const auto &tensor : initializer)
+ {
+ const auto mir_tensor = createTensor(&tensor);
+ auto *op = _graph->create<mir::ops::ConstantOp>(mir_tensor);
+ _converterCtx->setOutput(tensor.name(), op->getOutput(0));
+ }
+
+ for (const auto &input : graph.input())
+ {
+ assert(input.has_name());
+
+ if (_converterCtx->getOutput(input.name()) == nullptr)
+ {
+ const auto &onnx_input_shape = input.type().tensor_type().shape();
+ mir::Shape shape(onnx_input_shape.dim_size());
+ for (int i = 0; i < onnx_input_shape.dim_size(); i++)
+ {
+ assert(onnx_input_shape.dim(i).has_dim_value());
+ shape.dim(i) = static_cast<int32_t>(onnx_input_shape.dim(i).dim_value());
+ }
+
+ auto elem_type = onnxDataTypeToMirDataType(
+ (onnx::TensorProto_DataType)input.type().tensor_type().elem_type());
+ mir::TensorType type{elem_type, shape};
+ auto *op = _graph->create<mir::ops::InputOp>(type);
+ _converterCtx->setOutput(input.name(), op->getOutput(0));
+ }
+ }
+}
+
+std::unique_ptr<mir::Graph> ONNXImporterImpl::createIR()
+{
+ _graph = stdex::make_unique<mir::Graph>();
+ _converterCtx = stdex::make_unique<ConverterContext>(_graph.get());
+
+ createGraphInputs();
+
+ // Forming partially ordered computation graph
+ for (const auto &onnx_node : _model->graph().node())
+ {
+ assert(onnx_node.has_op_type());
+ auto &op_type = onnx_node.op_type();
+ auto opset = _modelCtx->getDomainOpsetVersion(onnx_node.domain());
+ // Get converter
+ NodeConverterRegistry::ConverterFunc converter =
+ NodeConverterRegistry::getInstance().lookup(op_type, opset);
+ assert(converter != nullptr);
+ converter(onnx_node, _converterCtx.get());
+ }
+ // Set graph outputs
+ const auto &outputs = _model->graph().output();
+ for (const auto &output : outputs)
+ {
+ assert(output.has_name());
+ auto mir_output = _converterCtx->getOutput(output.name());
+ if (mir_output == nullptr)
+ throw std::runtime_error("Bad output name!");
+
+ _graph->create<mir::ops::OutputOp>(mir_output);
+ }
+
+ return std::move(_graph);
+}
+
+} // namespace
+
+std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename)
+{
+ ONNXImporterImpl importer;
+ return importer.importModelFromBinaryFile(filename);
+}
+
+std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename)
+{
+ ONNXImporterImpl importer;
+ return importer.importModelFromTextFile(filename);
+}
+
+std::unique_ptr<mir::Graph> loadModel(const std::string &filename)
+{
+ return importModelFromBinaryFile(filename);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/ONNXImporterImpl.h b/compiler/mir-onnx-importer/ONNXImporterImpl.h
new file mode 100644
index 000000000..02a49b330
--- /dev/null
+++ b/compiler/mir-onnx-importer/ONNXImporterImpl.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _MIR_ONNX_IMPORTER_H
+#define _MIR_ONNX_IMPORTER_H
+
+#include "mir/Graph.h"
+
+#include <memory>
+#include <string>
+
+namespace mir_onnx
+{
+
+std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename);
+std::unique_ptr<mir::Graph> importModelFromTextFile(const std::string &filename);
+// TODO Remove after changing all uses.
+std::unique_ptr<mir::Graph> loadModel(const std::string &filename);
+
+} // namespace mir_onnx
+
+#endif // _MIR_ONNX_IMPORTER_H
diff --git a/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.cpp b/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.cpp
new file mode 100644
index 000000000..a11b18e89
--- /dev/null
+++ b/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+
+#include <memory>
+
+namespace mir_onnx
+{
+
+void ModelContext::setDomainOpsetVersion(const std::string &domain, const int64_t opset_version)
+{
+ _domainToOpsetVersion.emplace(domain, opset_version);
+}
+
+int64_t ModelContext::getDomainOpsetVersion(const std::string &domain) const
+{
+ auto iter = _domainToOpsetVersion.find(domain);
+ if (iter == _domainToOpsetVersion.end())
+ throw std::runtime_error("Didn't have domain " + domain + "!");
+ return iter->second;
+}
+
+ModelContext::ModelContext(const onnx::ModelProto *model)
+{
+ if (model == nullptr)
+ {
+ throw std::runtime_error{"Model should be imported before importer prepare"};
+ }
+
+ if (model->ir_version() > onnx::IR_VERSION)
+ {
+ throw std::runtime_error("IR version " + std::to_string(model->ir_version()) +
+ " is not supported yet.");
+ }
+
+ // Set Opset Version for each domain
+ for (const auto &op_set : model->opset_import())
+ {
+ setDomainOpsetVersion(op_set.domain(), op_set.version());
+ }
+}
+
+// ConverterContext
+
+ConverterContext::ConverterContext(mir::Graph *graph) : _graph(graph) {}
+
+void ConverterContext::setOutput(const std::string &name, mir::Operation::Output *output)
+{
+ output->setName(name);
+ auto result = _tensorNameToOutput.emplace(name, output);
+ if (!result.second)
+ throw std::runtime_error("Name duplication: " + name);
+}
+
+mir::Operation::Output *ConverterContext::getOutput(const std::string &name) const
+{
+ auto iter = _tensorNameToOutput.find(name);
+ if (iter == _tensorNameToOutput.end())
+ return nullptr;
+ else
+ return iter->second;
+}
+
+std::vector<mir::Operation::Output *>
+ConverterContext::getNodeInputs(const onnx::NodeProto &onnx_node) const
+{
+ const auto &input_names = onnx_node.input();
+ std::vector<mir::Operation::Output *> outputs;
+
+ for (const auto &input_name : input_names)
+ {
+ if (!input_name.empty())
+ {
+ auto *mir_output = getOutput(input_name);
+ assert(mir_output != nullptr);
+ outputs.emplace_back(mir_output);
+ }
+ }
+ return outputs;
+}
+
+void ConverterContext::setNodeOutputs(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &outputs)
+{
+ assert(!outputs.empty());
+ for (std::size_t i = 0; i < outputs.size(); ++i)
+ {
+ setOutput(onnx_node.output(i), outputs[i]);
+ }
+}
+
+// NodeConverterRegistry
+
+NodeConverterRegistry::ConverterFunc NodeConverterRegistry::lookup(const std::string &optype,
+ int64_t opset) const
+{
+ auto it = _converter_map.find(optype);
+ if (it == _converter_map.end())
+ {
+ return nullptr;
+ }
+
+ const VersionMap &conv_map = it->second;
+
+ auto res = std::lower_bound(
+ conv_map.crbegin(), conv_map.crend(), opset,
+ [](const VersionMap::value_type &pair, int64_t opset) { return pair.first > opset; });
+
+ if (res == conv_map.crend())
+ {
+ return nullptr;
+ }
+ return res->second;
+}
+
+NodeConverterRegistry &NodeConverterRegistry::getInstance()
+{
+ static NodeConverterRegistry instance;
+ return instance;
+}
+
+void NodeConverterRegistry::registerConverter(const std::string &op_type, int64_t opset,
+ NodeConverterRegistry::ConverterFunc conv)
+{
+ _converter_map[op_type].emplace(opset, conv);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.h b/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.h
new file mode 100644
index 000000000..ea712ad23
--- /dev/null
+++ b/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONNX_NODE_CONVERTER_REGISTRY_H__
+#define __ONNX_NODE_CONVERTER_REGISTRY_H__
+
+#include "onnx/onnx.pb.h"
+#include "mir/Graph.h"
+
+#include <map>
+#include <string>
+#include <vector>
+
+namespace mir_onnx
+{
+
+class ModelContext
+{
+public:
+ explicit ModelContext(const onnx::ModelProto *model);
+
+ void setDomainOpsetVersion(const std::string &domain, const int64_t opset_version);
+ int64_t getDomainOpsetVersion(const std::string &domain) const;
+
+private:
+ std::map<std::string, int64_t> _domainToOpsetVersion;
+};
+
+class ConverterContext
+{
+public:
+ explicit ConverterContext(mir::Graph *graph);
+ ~ConverterContext() = default;
+
+ void setOutput(const std::string &name, mir::Operation::Output *output);
+ mir::Operation::Output *getOutput(const std::string &name) const;
+ std::vector<mir::Operation::Output *> getNodeInputs(const onnx::NodeProto &onnx_node) const;
+ void setNodeOutputs(const onnx::NodeProto &onnx_node,
+ const std::vector<mir::Operation::Output *> &outputs);
+ mir::Graph *getGraph() const { return _graph; }
+
+private:
+ std::map<std::string, mir::Operation::Output *> _tensorNameToOutput;
+ mir::Graph *_graph;
+};
+
+class NodeConverterRegistry
+{
+public:
+ using ConverterFunc = void (*)(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+ NodeConverterRegistry() = default;
+
+ ConverterFunc lookup(const std::string &optype, int64_t opset) const;
+ void registerConverter(const std::string &op_type, int64_t opset, ConverterFunc conv);
+
+ static NodeConverterRegistry &getInstance();
+
+private:
+ using VersionMap = std::map<int64_t, ConverterFunc>;
+
+ std::unordered_map<std::string, VersionMap> _converter_map;
+};
+
+} // namespace mir_onnx
+
+#endif // __ONNX_NODE_CONVERTER_REGISTRY_H__
diff --git a/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.test.cpp b/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.test.cpp
new file mode 100644
index 000000000..dfc3e4216
--- /dev/null
+++ b/compiler/mir-onnx-importer/ONNXNodeConverterRegistry.test.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ONNXNodeConverterRegistry.h"
+#include "ONNXHelpers.h"
+
+#include "gtest/gtest.h"
+
+using namespace mir_onnx;
+
+void converterV1(const onnx::NodeProto &node, ConverterContext *ctx) {}
+void converterV3(const onnx::NodeProto &node, ConverterContext *ctx) {}
+void converterV7(const onnx::NodeProto &node, ConverterContext *ctx) {}
+
+class NodeConverterRegsitryTest : public ::testing::Test
+{
+protected:
+ void SetUp() override
+ {
+ registry.registerConverter("dummy", 1, converterV1);
+ registry.registerConverter("dummy", 3, converterV3);
+ registry.registerConverter("dummy", 7, converterV7);
+ registry.registerConverter("dummy", firstUnknownOpset, nullptr);
+ }
+
+ NodeConverterRegistry registry;
+};
+
+TEST_F(NodeConverterRegsitryTest, existing_lookup_works)
+{
+ auto res = registry.lookup("dummy", 1);
+ ASSERT_EQ(res, &converterV1);
+}
+
+TEST_F(NodeConverterRegsitryTest, skipped_lookup_works)
+{
+ auto res = registry.lookup("dummy", 2);
+ ASSERT_EQ(res, &converterV1);
+}
+
+TEST_F(NodeConverterRegsitryTest, first_unknown_version_works)
+{
+ auto res = registry.lookup("dummy", 14);
+ ASSERT_EQ(res, nullptr);
+}
+
+TEST_F(NodeConverterRegsitryTest, lower_than_first_version)
+{
+ auto res = registry.lookup("dummy", 0);
+ ASSERT_EQ(res, nullptr);
+}
diff --git a/compiler/mir-onnx-importer/ONNXOpRegistration.h b/compiler/mir-onnx-importer/ONNXOpRegistration.h
new file mode 100644
index 000000000..e3001b000
--- /dev/null
+++ b/compiler/mir-onnx-importer/ONNXOpRegistration.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONNX_OP_REGISTRATION_H__
+#define __ONNX_OP_REGISTRATION_H__
+
+#include "ONNXNodeConverterRegistry.h"
+
+#include "Op/Abs.h"
+#include "Op/Add.h"
+#include "Op/AveragePool.h"
+#include "Op/BatchNormalization.h"
+#include "Op/Concat.h"
+#include "Op/Constant.h"
+#include "Op/Conv.h"
+#include "Op/ConvTranspose.h"
+#include "Op/Div.h"
+#include "Op/Dropout.h"
+#include "Op/Equal.h"
+#include "Op/Expand.h"
+#include "Op/Flatten.h"
+#include "Op/Gather.h"
+#include "Op/Greater.h"
+#include "Op/Gemm.h"
+#include "Op/GlobalAveragePool.h"
+#include "Op/Identity.h"
+#include "Op/Less.h"
+#include "Op/MatMul.h"
+#include "Op/Max.h"
+#include "Op/MaxPool.h"
+#include "Op/Mul.h"
+#include "Op/Pad.h"
+#include "Op/Reciprocal.h"
+#include "Op/ReduceMean.h"
+#include "Op/Relu.h"
+#include "Op/Reshape.h"
+#include "Op/Shape.h"
+#include "Op/Sigmoid.h"
+#include "Op/Softmax.h"
+#include "Op/Sqrt.h"
+#include "Op/Sub.h"
+#include "Op/Sum.h"
+#include "Op/Tanh.h"
+#include "Op/Transpose.h"
+#include "Op/Unsqueeze.h"
+#include "Op/Upsample.h"
+
+namespace mir_onnx
+{
+
+inline void registerSupportedOps()
+{
+ auto &registry = NodeConverterRegistry::getInstance();
+
+#define REG_CONVERTER(name, version, function) registry.registerConverter(name, version, function)
+#define REG(name, version) REG_CONVERTER(#name, version, convert##name##V##version)
+#define UNSUPPORTED(name, version) REG_CONVERTER(#name, version, nullptr)
+
+ REG(Abs, 1);
+ REG(Abs, 6);
+ UNSUPPORTED(Abs, firstUnknownOpset);
+
+ REG(Add, 1);
+ REG(Add, 6);
+ REG(Add, 7);
+ UNSUPPORTED(Add, firstUnknownOpset);
+
+ REG(AveragePool, 1);
+ REG(AveragePool, 7);
+ REG(AveragePool, 10);
+ UNSUPPORTED(AveragePool, 11);
+ UNSUPPORTED(AveragePool, firstUnknownOpset);
+
+ REG(BatchNormalization, 1);
+ REG(BatchNormalization, 6);
+ REG(BatchNormalization, 7);
+ REG(BatchNormalization, 9);
+ UNSUPPORTED(BatchNormalization, firstUnknownOpset);
+
+ REG(Concat, 1);
+ REG(Concat, 4);
+ UNSUPPORTED(Concat, 11);
+ UNSUPPORTED(Concat, firstUnknownOpset);
+
+ REG(Constant, 1);
+ REG(Constant, 9);
+ REG(Constant, 11);
+ UNSUPPORTED(Constant, 12);
+ UNSUPPORTED(Constant, firstUnknownOpset);
+
+ REG(Conv, 1);
+ UNSUPPORTED(Conv, 11);
+ UNSUPPORTED(Conv, firstUnknownOpset);
+
+ REG(ConvTranspose, 1);
+ UNSUPPORTED(ConvTranspose, 11);
+ UNSUPPORTED(ConvTranspose, firstUnknownOpset);
+
+ UNSUPPORTED(Div, 1);
+ UNSUPPORTED(Div, 6);
+ REG(Div, 7);
+ UNSUPPORTED(Div, firstUnknownOpset);
+
+ REG(Dropout, 1);
+ REG(Dropout, 6);
+ REG(Dropout, 7);
+ REG(Dropout, 10);
+ UNSUPPORTED(Dropout, 12);
+ UNSUPPORTED(Dropout, firstUnknownOpset);
+
+ UNSUPPORTED(Equal, 1);
+ REG(Equal, 7);
+ REG(Equal, 11);
+ UNSUPPORTED(Equal, firstUnknownOpset);
+
+ REG(Expand, 8);
+ UNSUPPORTED(Expand, firstUnknownOpset);
+
+ REG(Flatten, 1);
+ REG(Flatten, 9);
+ UNSUPPORTED(Flatten, 11);
+ UNSUPPORTED(Flatten, firstUnknownOpset);
+
+ REG(Gather, 1);
+ UNSUPPORTED(Gather, 11);
+ UNSUPPORTED(Gather, firstUnknownOpset);
+
+ REG(Gemm, 1);
+ REG(Gemm, 6);
+ REG(Gemm, 7);
+ REG(Gemm, 9);
+ REG(Gemm, 11);
+ UNSUPPORTED(Gemm, firstUnknownOpset);
+
+ UNSUPPORTED(GlobalAveragePool, 1);
+ REG(GlobalAveragePool, 2);
+ UNSUPPORTED(GlobalAveragePool, firstUnknownOpset);
+
+ UNSUPPORTED(Greater, 1);
+ REG(Greater, 7);
+ REG(Greater, 9);
+ UNSUPPORTED(Greater, firstUnknownOpset);
+
+ REG(Identity, 1);
+ UNSUPPORTED(Identity, firstUnknownOpset);
+
+ UNSUPPORTED(Less, 1);
+ REG(Less, 7);
+ REG(Less, 9);
+ UNSUPPORTED(Less, firstUnknownOpset);
+
+ REG(MatMul, 1);
+ REG(MatMul, 9);
+ UNSUPPORTED(MatMul, firstUnknownOpset);
+
+ REG(Max, 1);
+ REG(Max, 6);
+ REG(Max, 8);
+ UNSUPPORTED(Max, firstUnknownOpset);
+
+ REG(MaxPool, 1);
+ REG(MaxPool, 8);
+ REG(MaxPool, 10);
+ UNSUPPORTED(MaxPool, 11);
+ UNSUPPORTED(MaxPool, 12);
+ UNSUPPORTED(MaxPool, firstUnknownOpset);
+
+ UNSUPPORTED(Mul, 1);
+ UNSUPPORTED(Mul, 6);
+ REG(Mul, 7);
+ UNSUPPORTED(Mul, firstUnknownOpset);
+
+ REG(Pad, 1);
+ REG(Pad, 2);
+ UNSUPPORTED(Pad, 11);
+ UNSUPPORTED(Pad, firstUnknownOpset);
+
+ REG(Reciprocal, 1);
+ REG(Reciprocal, 6);
+ UNSUPPORTED(Reciprocal, firstUnknownOpset);
+
+ REG(ReduceMean, 1);
+ UNSUPPORTED(ReduceMean, 11);
+ UNSUPPORTED(ReduceMean, firstUnknownOpset);
+
+ REG(Relu, 1);
+ REG(Relu, 6);
+ UNSUPPORTED(Relu, firstUnknownOpset);
+
+ REG(Reshape, 1);
+ REG(Reshape, 5);
+ UNSUPPORTED(Reshape, firstUnknownOpset);
+
+ REG(Shape, 1);
+ UNSUPPORTED(Shape, firstUnknownOpset);
+
+ REG(Sigmoid, 1);
+ REG(Sigmoid, 6);
+ UNSUPPORTED(Sigmoid, firstUnknownOpset);
+
+ REG(Softmax, 1);
+ // TODO SoftmaxV11 is mostly the same, needs a check though
+ UNSUPPORTED(Softmax, firstUnknownOpset);
+
+ REG(Sqrt, 1);
+ REG(Sqrt, 6);
+ UNSUPPORTED(Sqrt, firstUnknownOpset);
+
+ REG(Sub, 1);
+ REG(Sub, 6);
+ REG(Sub, 7);
+ UNSUPPORTED(Sub, firstUnknownOpset);
+
+ UNSUPPORTED(Sum, 1);
+ UNSUPPORTED(Sum, 6);
+ REG(Sum, 8);
+ UNSUPPORTED(Sum, firstUnknownOpset);
+
+ REG(Tanh, 1);
+ REG(Tanh, 6);
+ UNSUPPORTED(Tanh, firstUnknownOpset);
+
+ REG(Transpose, 1);
+ UNSUPPORTED(Transpose, firstUnknownOpset);
+
+ REG(Unsqueeze, 1);
+ UNSUPPORTED(Unsqueeze, 11);
+ UNSUPPORTED(Unsqueeze, firstUnknownOpset);
+
+ // Upsample-1 is not mentioned in onnx master and was considered experimental at the time
+ REG(Upsample, 1);
+ REG(Upsample, 7);
+ REG(Upsample, 9);
+ UNSUPPORTED(Upsample, firstUnknownOpset);
+
+#undef REG
+#undef REG_CONVERTER
+#undef UNSUPPORTED
+}
+
+} // namespace mir_onnx
+
+#endif // __ONNX_OP_REGISTRATION_H__
diff --git a/compiler/mir-onnx-importer/Op/Abs.cpp b/compiler/mir-onnx-importer/Op/Abs.cpp
new file mode 100644
index 000000000..350270cfd
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Abs.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Abs.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/AbsOp.h"
+
+namespace mir_onnx
+{
+
+static void convertAbsGeneric(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ auto result = createOp<mir::ops::AbsOp>(graph, inputs[0])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertAbsV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertAbsGeneric(onnx_node, context);
+}
+
+void convertAbsV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertAbsGeneric(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Abs.h b/compiler/mir-onnx-importer/Op/Abs.h
new file mode 100644
index 000000000..06fcd5f3c
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Abs.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_ABS_H
+#define MIR_ONNX_OP_ABS_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertAbsV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertAbsV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_ABS_H
diff --git a/compiler/mir-onnx-importer/Op/Add.cpp b/compiler/mir-onnx-importer/Op/Add.cpp
new file mode 100644
index 000000000..8944b4e66
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Add.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Add.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/AddOp.h"
+
+namespace mir_onnx
+{
+
+void convertAddV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // consumed_inputs attribute not used
+ convertAddV6(onnx_node, context);
+}
+
+void convertAddV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // broadcast attribute not used
+ const auto *axis = findAttribute(onnx_node, "axis");
+ if (axis != nullptr)
+ throw std::runtime_error("Not supported axis attribute in Add operation!");
+
+ convertAddV7(onnx_node, context);
+}
+
+void convertAddV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ auto result = createOp<mir::ops::AddOp>(graph, inputs[0], inputs[1])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Add.h b/compiler/mir-onnx-importer/Op/Add.h
new file mode 100644
index 000000000..a11aa6bb7
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Add.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_ADD_H
+#define MIR_ONNX_OP_ADD_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertAddV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertAddV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertAddV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_ADD_H
diff --git a/compiler/mir-onnx-importer/Op/AveragePool.cpp b/compiler/mir-onnx-importer/Op/AveragePool.cpp
new file mode 100644
index 000000000..503feffc8
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/AveragePool.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AveragePool.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+#include "ConvPoolHelpers.h"
+
+#include "mir/ops/AvgPool2DOp.h"
+
+namespace mir_onnx
+{
+
+void convertAveragePoolV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ const auto &input_shape = input->getShape();
+ if (input_shape.rank() != 4)
+ throw std::runtime_error("AveragePool: only 2-D input is supported.");
+
+ constexpr int num_spatial_dims = 2;
+
+ const auto strides =
+ getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
+ if (strides.size() != num_spatial_dims)
+ throw std::runtime_error("AveragePool: attribute 'strides' has incorrect size.");
+
+ const auto kernel_shape = getAttributeValue<std::vector<std::int32_t>>(onnx_node, "kernel_shape");
+ if (kernel_shape.size() != num_spatial_dims)
+ throw std::runtime_error("AveragePool: attribute 'kernel_shape' has incorrect size.");
+
+ std::vector<std::int32_t> padding_before(num_spatial_dims, 0);
+ std::vector<std::int32_t> padding_after(num_spatial_dims, 0);
+ if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
+ {
+ const auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
+ if (pads.size() != num_spatial_dims * 2)
+ throw std::runtime_error("AveragePool: attribute 'pads' has incorrect size.");
+ padding_before.assign(pads.cbegin(), std::next(pads.cbegin(), num_spatial_dims));
+ padding_after.assign(std::next(pads.cbegin(), num_spatial_dims), pads.cend());
+ }
+ else
+ {
+ const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
+ const std::vector<std::int32_t> dilations(num_spatial_dims, 1);
+ inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_shape, padding_before,
+ padding_after);
+ }
+
+ mir::AvgPool2DOpAttributes attributes;
+ attributes.window = kernel_shape;
+ attributes.strides = strides;
+ attributes.padding_before = padding_before;
+ attributes.padding_after = padding_after;
+ attributes.include_pad = false;
+ attributes.data_format = mir::DataFormat::NCHW;
+ auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, attributes)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertAveragePoolV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto count_include_pad = getAttributeValue<int64_t>(onnx_node, "count_include_pad", 0);
+ if (count_include_pad != 0)
+ throw std::runtime_error("Not supported count_include_pad attribute!");
+
+ convertAveragePoolV1(onnx_node, context);
+}
+
+void convertAveragePoolV10(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto ceil_mode = getAttributeValue<int64_t>(onnx_node, "ceil_mode", 0);
+ if (ceil_mode != 0)
+ throw std::runtime_error("Not supported ceil_mode attribute!");
+
+ convertAveragePoolV7(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/AveragePool.h b/compiler/mir-onnx-importer/Op/AveragePool.h
new file mode 100644
index 000000000..54e406daf
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/AveragePool.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_AVERAGE_POOL_H
+#define MIR_ONNX_OP_AVERAGE_POOL_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertAveragePoolV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertAveragePoolV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertAveragePoolV10(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_AVERAGE_POOL_H
diff --git a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
new file mode 100644
index 000000000..8a6d8cc51
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BatchNormalization.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ShapeRange.h"
+#include "mir/Tensor.h"
+
+#include "mir/ops/AddOp.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/MulOp.h"
+#include "mir/ops/ReshapeOp.h"
+
+#include <cmath>
+
+namespace mir_onnx
+{
+
+void convertBatchNormalizationV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // consumed_inputs attribute not used
+ convertBatchNormalizationV6(onnx_node, context);
+}
+
+void convertBatchNormalizationV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto is_test = getAttributeValue<std::int64_t>(onnx_node, "is_test", 0);
+ if (is_test == 0)
+ throw std::runtime_error("Not supported is_test attribute!");
+
+ convertBatchNormalizationV7(onnx_node, context);
+}
+
+void convertBatchNormalizationV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // spatial attribute used only for learning
+
+ convertBatchNormalizationV9(onnx_node, context);
+}
+
+void convertBatchNormalizationV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // momentum attrribute used only for learning
+
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 5);
+ auto input = inputs[0];
+ auto scale = inputs[1];
+ auto bias = inputs[2];
+ auto mean = inputs[3];
+ auto var = inputs[4];
+
+ // 1e-05f is the default epsilon.
+ const auto epsilon = getAttributeValue<float>(onnx_node, "epsilon", 1e-05f);
+
+ // Y = (X - mean) * scale / sqrt(var + epsilon) + bias =
+ // = (X + C1) * C2 + bias
+ // We need these to be constants since we are going to change them.
+ // TODO Implement the formula using ops and let the optimizer constant-fold them.
+ auto scale_op = dynamic_cast<mir::ops::ConstantOp *>(scale->getNode());
+ auto mean_op = dynamic_cast<mir::ops::ConstantOp *>(mean->getNode());
+ auto var_op = dynamic_cast<mir::ops::ConstantOp *>(var->getNode());
+
+ if (scale_op == nullptr || mean_op == nullptr || var_op == nullptr)
+ throw std::runtime_error(
+ "BatchNormalization: only constant 'scale', 'mean' and 'variance' inputs are supported.");
+
+ mir::Tensor<float> scale_accessor(scale_op->getValue());
+ mir::Tensor<float> mean_accessor(mean_op->getValue());
+ mir::Tensor<float> var_accessor(var_op->getValue());
+
+ // C1 = -mean
+ for (const auto &idx : mir::ShapeRange(mean_accessor.getShape()))
+ mean_accessor.at(idx) *= -1;
+
+ // C2 = scale / sqrt(var + epsilon)
+ for (const auto &idx : mir::ShapeRange(scale_accessor.getShape()))
+ scale_accessor.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon);
+
+ assert(mean_accessor.getShape().rank() == 1);
+ auto input_rank = input->getShape().rank();
+ if (input_rank < 2)
+ throw std::runtime_error("Inputs with shape rank < 2 are not supported for batchnorm");
+
+ mir::Shape new_shape(std::vector<std::int32_t>(input_rank, 1));
+
+ new_shape.dim(1) = mean_accessor.getShape().dim(0); // set channel dim
+
+ auto reshaped_mean = createOp<mir::ops::ReshapeOp>(graph, mean, new_shape)->getOutput(0);
+ auto reshaped_scale = createOp<mir::ops::ReshapeOp>(graph, scale, new_shape)->getOutput(0);
+ auto reshaped_bias = createOp<mir::ops::ReshapeOp>(graph, bias, new_shape)->getOutput(0);
+
+ // Y = (X + C1) * C2 + bias
+ auto result = createOp<mir::ops::AddOp>(graph, input, reshaped_mean)->getOutput(0);
+ result = createOp<mir::ops::MulOp>(graph, result, reshaped_scale)->getOutput(0);
+ result = createOp<mir::ops::AddOp>(graph, result, reshaped_bias)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/BatchNormalization.h b/compiler/mir-onnx-importer/Op/BatchNormalization.h
new file mode 100644
index 000000000..7c2e37a9c
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/BatchNormalization.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_BATCH_NORMALIZATION_H
+#define MIR_ONNX_OP_BATCH_NORMALIZATION_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertBatchNormalizationV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertBatchNormalizationV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertBatchNormalizationV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertBatchNormalizationV9(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_BATCH_NORMALIZATION_H
diff --git a/compiler/mir-onnx-importer/Op/Concat.cpp b/compiler/mir-onnx-importer/Op/Concat.cpp
new file mode 100644
index 000000000..dbe752647
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Concat.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Concat.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/ConcatOp.h"
+
+namespace mir_onnx
+{
+
+void convertConcatV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ const auto axis = getAttributeValue<int64_t>(onnx_node, "axis", 1);
+
+ auto result = createOp<mir::ops::ConcatOp>(graph, inputs, axis)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertConcatV4(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ // From version 4 axis attribute is required
+ auto attr = findAttribute(onnx_node, "axis");
+ if (!attr)
+ throw std::runtime_error("Attribute axis is required!");
+ int32_t axis = attr->i();
+
+ auto result = createOp<mir::ops::ConcatOp>(graph, inputs, axis)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Concat.h b/compiler/mir-onnx-importer/Op/Concat.h
new file mode 100644
index 000000000..430a2d9e4
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Concat.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_CONCAT_H
+#define MIR_ONNX_OP_CONCAT_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertConcatV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertConcatV4(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_CONCAT_H
diff --git a/compiler/mir-onnx-importer/Op/Constant.cpp b/compiler/mir-onnx-importer/Op/Constant.cpp
new file mode 100644
index 000000000..710760ed3
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Constant.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Constant.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/TensorVariant.h"
+#include "mir/ops/ConstantOp.h"
+
+namespace mir_onnx
+{
+
+void convertConstantV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ const auto onnx_tensor = getAttributeValue<onnx::TensorProto>(onnx_node, "value");
+ auto mir_tensor = createTensor(&onnx_tensor);
+
+ auto result = graph->create<mir::ops::ConstantOp>(mir_tensor)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertConstantV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // Since version 9 Constant operation support other types contained in tensor
+ convertConstantV1(onnx_node, context);
+}
+
+void convertConstantV11(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto *value_attr = findAttribute(onnx_node, "value");
+ const auto *sparse_value_attr = findAttribute(onnx_node, "sparse_value");
+ if (value_attr == nullptr && sparse_value_attr == nullptr)
+ throw std::runtime_error("Not enough attributes in Constant operation!");
+
+ if (value_attr != nullptr)
+ return convertConstantV9(onnx_node, context);
+
+ if (sparse_value_attr != nullptr)
+ throw std::runtime_error("Not supported sparse_tensor in Constant operation!");
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Constant.h b/compiler/mir-onnx-importer/Op/Constant.h
new file mode 100644
index 000000000..2a4db0fb7
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Constant.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_CONSTANT_H
+#define MIR_ONNX_OP_CONSTANT_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertConstantV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertConstantV9(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertConstantV11(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_CONSTANT_H
diff --git a/compiler/mir-onnx-importer/Op/Conv.cpp b/compiler/mir-onnx-importer/Op/Conv.cpp
new file mode 100644
index 000000000..7dc6ce818
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Conv.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Conv.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+#include "ConvPoolHelpers.h"
+
+#include "mir/ops/AddOp.h"
+#include "mir/ops/Conv2DOp.h"
+#include "mir/ops/ReshapeOp.h"
+
+namespace mir_onnx
+{
+
+void convertConvV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() >= 2);
+ auto input = inputs[0];
+ auto kernel = inputs[1];
+
+ auto input_shape = input->getShape();
+ bool conv1d = false;
+ if (input_shape.rank() == 3)
+ {
+ input_shape = {input_shape.dim(0), input_shape.dim(1), input_shape.dim(2), 1};
+ auto reshaped_input = createOp<mir::ops::ReshapeOp>(graph, input, input_shape);
+ input = reshaped_input->getOutput(0);
+ conv1d = true;
+ }
+ else
+ {
+ if (input_shape.rank() != 4)
+ throw std::runtime_error{"Conv is unsupported for tensors with more than 4 dimentions"};
+ }
+
+ constexpr int num_spatial_dims = 2;
+
+ std::vector<int32_t> dilations(num_spatial_dims, 1);
+ if (const auto *dilations_attr = findAttribute(onnx_node, "dilations"))
+ {
+ dilations = getAttributeValue<std::vector<int32_t>>(*dilations_attr);
+ if (conv1d)
+ dilations.emplace_back(1);
+ }
+
+ if (dilations.size() != num_spatial_dims)
+ throw std::runtime_error("Conv: attribute 'dilations' has incorrect size.");
+ if (!std::all_of(dilations.cbegin(), dilations.cend(), [](std::int32_t x) { return x == 1; }))
+ throw std::runtime_error("Conv: attribute 'dilations' has unsupported value.");
+
+ std::vector<int32_t> strides(num_spatial_dims, 1);
+ if (const auto *strides_attr = findAttribute(onnx_node, "strides"))
+ {
+ strides = getAttributeValue<std::vector<int32_t>>(*strides_attr);
+ if (conv1d)
+ strides.emplace_back(1);
+ }
+
+ if (strides.size() != num_spatial_dims)
+ throw std::runtime_error("Conv: attribute 'strides' has incorrect size.");
+
+ // Assuming kernel has OIHW format.
+ if (conv1d)
+ {
+ auto kernel_shape = kernel->getShape();
+ assert(kernel_shape.rank() == 3);
+ kernel_shape = {kernel_shape.dim(0), kernel_shape.dim(1), kernel_shape.dim(2), 1};
+ auto reshaped_kernel = createOp<mir::ops::ReshapeOp>(graph, kernel, kernel_shape);
+ kernel = reshaped_kernel->getOutput(0);
+ }
+
+ std::vector<std::int32_t> kernel_shape{kernel->getShape().dim(2), kernel->getShape().dim(3)};
+ if (const auto *k_shape_attr = findAttribute(onnx_node, "kernel_shape"))
+ {
+ kernel_shape = getAttributeValue<std::vector<std::int32_t>>(*k_shape_attr);
+ if (conv1d)
+ kernel_shape.emplace_back(1);
+ }
+
+ if (kernel_shape.size() != num_spatial_dims)
+ throw std::runtime_error("Conv: attribute 'kernel_shape' has incorrect size.");
+
+ std::vector<std::int32_t> padding_before(num_spatial_dims, 0);
+ std::vector<std::int32_t> padding_after(num_spatial_dims, 0);
+ if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
+ {
+ auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
+ if (conv1d)
+ {
+ pads.emplace_back(0);
+ pads.emplace_back(0);
+ }
+
+ if (pads.size() != num_spatial_dims * 2)
+ throw std::runtime_error("Conv: attribute 'pads' has incorrect size.");
+ const auto fixed_pads = fixPads(input_shape, pads, strides, dilations, kernel_shape);
+ padding_before.assign(fixed_pads.cbegin(), std::next(fixed_pads.cbegin(), num_spatial_dims));
+ padding_after.assign(std::next(fixed_pads.cbegin(), num_spatial_dims), fixed_pads.cend());
+ }
+ else
+ {
+ const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
+ inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_shape, padding_before,
+ padding_after);
+ }
+
+ const auto group = getAttributeValue<std::int64_t>(onnx_node, "group", 1);
+
+ mir::Conv2DOpAttributes attributes;
+ attributes.strides = strides;
+ attributes.padding_before = padding_before;
+ attributes.padding_after = padding_after;
+ attributes.num_groups = group;
+ attributes.data_format = mir::DataFormat::NCHW;
+
+ std::vector<std::size_t> perm{0, 2, 3, 1}; // OIHW -> OHWI
+ kernel = createOp<mir::ops::TransposeOp>(graph, kernel, perm)->getOutput(0);
+ auto result = createOp<mir::ops::Conv2DOp>(graph, input, kernel, attributes)->getOutput(0);
+
+ if (inputs.size() > 2)
+ {
+ auto bias = inputs[2];
+ bias = createOp<mir::ops::ReshapeOp>(graph, bias, mir::Shape{1, bias->getShape().dim(0), 1, 1})
+ ->getOutput(0);
+ result = createOp<mir::ops::AddOp>(graph, result, bias)->getOutput(0);
+ }
+
+ if (conv1d)
+ {
+ auto output_shape = result->getShape();
+ output_shape.resize(output_shape.rank() - 1);
+ result = createOp<mir::ops::ReshapeOp>(graph, result, output_shape)->getOutput(0);
+ }
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Conv.h b/compiler/mir-onnx-importer/Op/Conv.h
new file mode 100644
index 000000000..2af2b8959
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Conv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_CONV_H
+#define MIR_ONNX_OP_CONV_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertConvV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_CONV_H
diff --git a/compiler/mir-onnx-importer/Op/ConvTranspose.cpp b/compiler/mir-onnx-importer/Op/ConvTranspose.cpp
new file mode 100644
index 000000000..3078a1959
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/ConvTranspose.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConvTranspose.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+#include "ConvPoolHelpers.h"
+
+#include "mir/TensorUtil.h"
+#include "mir/ops/AddOp.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/Deconv2DOp.h"
+#include "mir/ops/ReshapeOp.h"
+
+namespace mir_onnx
+{
+
+void convertConvTransposeV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() >= 2);
+ auto input = inputs[0];
+ auto kernel = inputs[1];
+
+ const auto group = getAttributeValue<std::int64_t>(onnx_node, "group", 1);
+ if (group != 1)
+ throw std::runtime_error("ConvTranspose: attribute 'group' has unsupported value.");
+
+ const auto &input_shape = input->getShape();
+ if (input_shape.rank() != 4)
+ throw std::runtime_error("ConvTranspose: only 2-D input is supported.");
+
+ constexpr int num_spatial_dims = 2;
+
+ const auto dilations =
+ getAttributeValue(onnx_node, "dilations", std::vector<std::int32_t>(num_spatial_dims, 1));
+ if (dilations.size() != num_spatial_dims)
+ throw std::runtime_error("ConvTranspose: attribute 'dilations' has incorrect size.");
+ if (!std::all_of(dilations.cbegin(), dilations.cend(), [](std::int32_t x) { return x == 1; }))
+ throw std::runtime_error("ConvTranspose: attribute 'dilations' has unsupported value.");
+
+ const auto strides =
+ getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
+ if (strides.size() != num_spatial_dims)
+ throw std::runtime_error("ConvTranspose: attribute 'strides' has incorrect size.");
+
+ const auto output_padding = getAttributeValue(onnx_node, "output_padding",
+ std::vector<std::int32_t>(num_spatial_dims, 0));
+ if (output_padding.size() != num_spatial_dims)
+ throw std::runtime_error("ConvTranspose: attribute 'output_padding' has incorrect size.");
+ if (!std::all_of(output_padding.cbegin(), output_padding.cend(),
+ [](std::int32_t x) { return x == 0; }))
+ throw std::runtime_error("ConvTranspose: attribute 'output_padding' has unsupported value.");
+
+ // Assuming kernel has IOHW format.
+ assert(kernel->getShape().rank() == 4);
+ const auto kernel_size = getAttributeValue(
+ onnx_node, "kernel_shape",
+ std::vector<std::int32_t>{kernel->getShape().dim(2), kernel->getShape().dim(3)});
+ if (kernel_size.size() != num_spatial_dims)
+ throw std::runtime_error("ConvTranspose: attribute 'kernel_shape' has incorrect size.");
+
+ // ONNX IOHW -> MIR HWOI
+ std::vector<std::size_t> perm{2, 3, 1, 0}; // OIHW -> OHWI
+ kernel = createOp<mir::ops::TransposeOp>(graph, kernel, perm)->getOutput(0);
+
+ mir::Operation::Output *result;
+ if (const auto *output_shape_attr = findAttribute(onnx_node, "output_shape"))
+ {
+ const auto output_size = getAttributeValue<std::vector<std::int32_t>>(*output_shape_attr);
+ if (output_size.size() != num_spatial_dims)
+ throw std::runtime_error("ConvTranspose: attribute 'output_shape' has incorrect size.");
+ const mir::Shape output_shape{input_shape.dim(0), kernel->getShape().dim(2), output_size[0],
+ output_size[1]};
+ mir::Deconv2DOpAttributes attributes;
+ attributes.strides = strides;
+ attributes.data_format = mir::DataFormat::NCHW;
+ attributes.padding_type = mir::ops::PaddingType::SameUpper;
+ result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, attributes, output_shape)
+ ->getOutput(0);
+ }
+ else
+ {
+ // TODO This code was not tested.
+ throw std::runtime_error(
+ "ConvTranspose: absence of attribute 'output_shape' is not supported.");
+ std::vector<std::int32_t> padding_before(num_spatial_dims, 0);
+ std::vector<std::int32_t> padding_after(num_spatial_dims, 0);
+ if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
+ {
+ const auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
+ if (pads.size() != num_spatial_dims * 2)
+ throw std::runtime_error("ConvTranspose: attribute 'pads' has incorrect size.");
+ padding_before.assign(pads.cbegin(), std::next(pads.cbegin(), num_spatial_dims));
+ padding_after.assign(std::next(pads.cbegin(), num_spatial_dims), pads.cend());
+ }
+ else
+ {
+ const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
+ inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_size, padding_before,
+ padding_after);
+ }
+ mir::Deconv2DOpAttributes attributes;
+ attributes.strides = strides;
+ attributes.padding_before = padding_before;
+ attributes.padding_after = padding_after;
+ attributes.data_format = mir::DataFormat::NCHW;
+ result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, attributes)->getOutput(0);
+ }
+
+ if (inputs.size() > 2)
+ {
+ auto bias = inputs[2];
+ bias = createOp<mir::ops::ReshapeOp>(graph, bias, mir::Shape{1, bias->getShape().dim(0), 1, 1})
+ ->getOutput(0);
+ result = createOp<mir::ops::AddOp>(graph, result, bias)->getOutput(0);
+ }
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/ConvTranspose.h b/compiler/mir-onnx-importer/Op/ConvTranspose.h
new file mode 100644
index 000000000..d203dc6c1
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/ConvTranspose.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_CONV_TRANSPOSE_H
+#define MIR_ONNX_OP_CONV_TRANSPOSE_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertConvTransposeV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_CONV_TRANSPOSE_H
diff --git a/compiler/mir-onnx-importer/Op/Div.cpp b/compiler/mir-onnx-importer/Op/Div.cpp
new file mode 100644
index 000000000..40620169a
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Div.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Div.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/DivOp.h"
+
+namespace mir_onnx
+{
+
+void convertDivV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+
+ mir::Graph *graph = context->getGraph();
+
+ auto result = createOp<mir::ops::DivOp>(graph, inputs[0], inputs[1])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Div.h b/compiler/mir-onnx-importer/Op/Div.h
new file mode 100644
index 000000000..cdc254fb8
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Div.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_DIV_H
+#define MIR_ONNX_OP_DIV_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertDivV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_DIV_H
diff --git a/compiler/mir-onnx-importer/Op/Dropout.cpp b/compiler/mir-onnx-importer/Op/Dropout.cpp
new file mode 100644
index 000000000..ef6972784
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Dropout.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dropout.h"
+
+#include "AttributeHelpers.h"
+
+namespace mir_onnx
+{
+
+void convertDropoutV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // consumed_inputs attribute not used
+ convertDropoutV6(onnx_node, context);
+}
+
+void convertDropoutV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto is_test = getAttributeValue<std::int64_t>(onnx_node, "is_test", 0);
+ if (is_test == 0)
+ throw std::runtime_error("Not supported is_test attribute!");
+
+ convertDropoutV10(onnx_node, context);
+}
+
+void convertDropoutV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertDropoutV10(onnx_node, context);
+}
+
+void convertDropoutV10(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+
+ // ratio attribute not used
+
+ // This is a no-op in inference mode.
+ context->setNodeOutputs(onnx_node, {inputs[0]});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Dropout.h b/compiler/mir-onnx-importer/Op/Dropout.h
new file mode 100644
index 000000000..9a90ac79b
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Dropout.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_DROPOUT_H
+#define MIR_ONNX_OP_DROPOUT_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertDropoutV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertDropoutV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertDropoutV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertDropoutV10(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_DROPOUT_H
diff --git a/compiler/mir-onnx-importer/Op/Equal.cpp b/compiler/mir-onnx-importer/Op/Equal.cpp
new file mode 100644
index 000000000..242389eb5
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Equal.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Equal.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/EqualOp.h"
+
+namespace mir_onnx
+{
+
+void convertEqualV11(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ auto result = createOp<mir::ops::EqualOp>(graph, inputs[0], inputs[1])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertEqualV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // Other type constraints
+ convertEqualV11(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Equal.h b/compiler/mir-onnx-importer/Op/Equal.h
new file mode 100644
index 000000000..0672cd661
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Equal.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_EQUAL_H
+#define MIR_ONNX_OP_EQUAL_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertEqualV11(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertEqualV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_EQUAL_H
diff --git a/compiler/mir-onnx-importer/Op/Expand.cpp b/compiler/mir-onnx-importer/Op/Expand.cpp
new file mode 100644
index 000000000..40002dfa9
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Expand.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Expand.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/BroadcastOp.h"
+
+namespace mir_onnx
+{
+
+void convertExpandV8(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ if (inputs[1]->getNode()->getType() != mir::Operation::Type::constant)
+ {
+ throw std::runtime_error{"Expand with non-constant input shape is not supported"};
+ }
+
+ auto target_shape = constantToShape(static_cast<mir::ops::ConstantOp *>(inputs[1]->getNode()));
+
+ auto *result = createOp<mir::ops::BroadcastOp>(graph, inputs[0], target_shape)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Expand.h b/compiler/mir-onnx-importer/Op/Expand.h
new file mode 100644
index 000000000..35f7af407
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Expand.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_EXPAND_H
+#define MIR_ONNX_OP_EXPAND_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertExpandV8(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_EXPAND_H
diff --git a/compiler/mir-onnx-importer/Op/Flatten.cpp b/compiler/mir-onnx-importer/Op/Flatten.cpp
new file mode 100644
index 000000000..dfad6ddbf
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Flatten.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Flatten.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/ReshapeOp.h"
+
+namespace mir_onnx
+{
+
+void convertFlattenV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ const auto axis = getAttributeValue<int64_t>(onnx_node, "axis", 1);
+ assert(inputs.size() == 1);
+ const auto &in_shape = inputs[0]->getShape();
+ assert(axis <= in_shape.rank()); // A tensor of rank >= axis
+ int32_t first_dim = 1, second_dim = 1;
+ int32_t dim = 0;
+
+ for (; dim < axis; dim++)
+ first_dim *= in_shape.dim(dim);
+
+ for (; dim < in_shape.rank(); dim++)
+ second_dim *= in_shape.dim(dim);
+
+ mir::Shape out_shape({first_dim, second_dim}); // Output 2D tensor
+
+ auto result = createOp<mir::ops::ReshapeOp>(graph, inputs[0], out_shape)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertFlattenV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // Other type constraints
+ convertFlattenV1(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Flatten.h b/compiler/mir-onnx-importer/Op/Flatten.h
new file mode 100644
index 000000000..174a8d906
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Flatten.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_FLATTEN_H
+#define MIR_ONNX_OP_FLATTEN_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertFlattenV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertFlattenV9(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_FLATTEN_H
diff --git a/compiler/mir-onnx-importer/Op/Gather.cpp b/compiler/mir-onnx-importer/Op/Gather.cpp
new file mode 100644
index 000000000..fa3746c67
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Gather.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Gather.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/GatherOp.h"
+
+namespace mir_onnx
+{
+
+void convertGatherV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ // 0 is the default axis number.
+ const auto axis = getAttributeValue<std::int64_t>(onnx_node, "axis", 0);
+
+ auto result = createOp<mir::ops::GatherOp>(graph, inputs[0], inputs[1], axis)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Gather.h b/compiler/mir-onnx-importer/Op/Gather.h
new file mode 100644
index 000000000..c4308d2be
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Gather.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_GATHER_H
+#define MIR_ONNX_OP_GATHER_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertGatherV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_GATHER_H
diff --git a/compiler/mir-onnx-importer/Op/Gemm.cpp b/compiler/mir-onnx-importer/Op/Gemm.cpp
new file mode 100644
index 000000000..1e0759dda
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Gemm.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Gemm.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/TensorUtil.h"
+
+#include "mir/ops/AddOp.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/FullyConnectedOp.h"
+#include "mir/ops/MulOp.h"
+#include "mir/ops/ReshapeOp.h"
+#include "mir/ops/TransposeOp.h"
+
+namespace mir_onnx
+{
+
+static void convertGemm(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 2 || inputs.size() == 3);
+
+ auto a = inputs[0];
+ auto b = inputs[1];
+ auto c = inputs.size() > 2 ? inputs[2] : nullptr;
+
+ // 1.0f is the default factor.
+ const auto alpha_val = getAttributeValue<float>(onnx_node, "alpha", 1.0f);
+ const auto beta_val = getAttributeValue<float>(onnx_node, "beta", 1.0f);
+
+ // 0 means that no transpose is needed. It is the default value.
+ const auto trans_a = getAttributeValue<std::int64_t>(onnx_node, "transA", 0);
+ const auto trans_b = getAttributeValue<std::int64_t>(onnx_node, "transB", 0);
+
+ // Transpose the A and B matrices as needed.
+ if (trans_a)
+ a = createOp<mir::ops::TransposeOp>(graph, a, std::vector<std::size_t>{1, 0})->getOutput(0);
+ if (trans_b)
+ b = createOp<mir::ops::TransposeOp>(graph, b, std::vector<std::size_t>{1, 0})->getOutput(0);
+
+ // Calculate A * B.
+ auto ab = createOp<mir::ops::FullyConnectedOp>(graph, a, b)->getOutput(0);
+
+ // Multiply A * B by the constant factor.
+ if (alpha_val != 1.0f)
+ {
+ mir::TensorVariant alpha_tensor({mir::DataType::FLOAT32, {}}, &alpha_val);
+ auto alpha = createOp<mir::ops::ConstantOp>(graph, alpha_tensor)->getOutput(0);
+ ab = createOp<mir::ops::MulOp>(graph, alpha, ab)->getOutput(0);
+ }
+
+ // If there are no third input, node is simple A*B multiplication
+ if (!c)
+ {
+ context->setNodeOutputs(onnx_node, {ab});
+ return;
+ }
+
+ // Multiply C by the constant factor.
+ if (beta_val != 1.0f)
+ {
+ mir::TensorVariant beta_tensor({mir::DataType::FLOAT32, {}}, &beta_val);
+ auto beta = createOp<mir::ops::ConstantOp>(graph, beta_tensor)->getOutput(0);
+ c = createOp<mir::ops::MulOp>(graph, beta, c)->getOutput(0);
+ }
+
+ // Calculate the result: alpha * A * B + beta * C.
+ auto result = createOp<mir::ops::AddOp>(graph, ab, c)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertGemmV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ return convertGemm(onnx_node, context);
+}
+
+void convertGemmV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // This version differs from V1: in description of C input (redundant text "can be inplace.")
+ return convertGemm(onnx_node, context);
+}
+
+void convertGemmV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // This version differs from V6: removed "broadcast" atribute
+ return convertGemm(onnx_node, context);
+}
+
+void convertGemmV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // This version differs from V7: added more supported types
+ return convertGemm(onnx_node, context);
+}
+
+void convertGemmV11(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // This operation differs from V11: input C is optional
+ return convertGemm(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Gemm.h b/compiler/mir-onnx-importer/Op/Gemm.h
new file mode 100644
index 000000000..d87a36e7b
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Gemm.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_GEMM_H
+#define MIR_ONNX_OP_GEMM_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertGemmV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertGemmV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertGemmV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertGemmV9(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertGemmV11(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_GEMM_H
diff --git a/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp b/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
new file mode 100644
index 000000000..379c8b596
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GlobalAveragePool.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/AvgPool2DOp.h"
+
+namespace mir_onnx
+{
+
+void convertGlobalAveragePoolV2(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ const auto &input_shape = input->getShape();
+ if (input_shape.rank() != 4)
+ throw std::runtime_error("GlobalAveragePool: only 2-D input is supported.");
+
+ // GlobalAveragePool is equivalent to AveragePool with kernel size equal
+ // to the spatial dimension of input tensor.
+ const std::vector<std::int32_t> window_size{input->getShape().dim(2), input->getShape().dim(3)};
+ mir::AvgPool2DOpAttributes attributes;
+ attributes.window = window_size;
+ attributes.data_format = mir::DataFormat::NCHW;
+
+ auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, attributes)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/GlobalAveragePool.h b/compiler/mir-onnx-importer/Op/GlobalAveragePool.h
new file mode 100644
index 000000000..b2fb9b8c9
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/GlobalAveragePool.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_GLOBAL_AVERAGE_POOL_H
+#define MIR_ONNX_OP_GLOBAL_AVERAGE_POOL_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertGlobalAveragePoolV2(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_GLOBAL_AVERAGE_POOL_H
diff --git a/compiler/mir-onnx-importer/Op/Greater.cpp b/compiler/mir-onnx-importer/Op/Greater.cpp
new file mode 100644
index 000000000..deaf96d4b
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Greater.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Greater.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/GreaterOp.h"
+
+namespace mir_onnx
+{
+
+static void convertGreaterVGeneric(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ auto result = createOp<mir::ops::GreaterOp>(graph, inputs[0], inputs[1])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertGreaterV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertGreaterVGeneric(onnx_node, context);
+}
+
+void convertGreaterV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertGreaterVGeneric(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Greater.h b/compiler/mir-onnx-importer/Op/Greater.h
new file mode 100644
index 000000000..3b6a44f33
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Greater.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_GREATER_H
+#define MIR_ONNX_OP_GREATER_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertGreaterV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertGreaterV9(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_GREATER_H
diff --git a/compiler/mir-onnx-importer/Op/Identity.cpp b/compiler/mir-onnx-importer/Op/Identity.cpp
new file mode 100644
index 000000000..6db70ffcd
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Identity.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Identity.h"
+
+namespace mir_onnx
+{
+
+void convertIdentityV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto inputs = context->getNodeInputs(onnx_node);
+ assert(inputs.size() == 1);
+
+ context->setNodeOutputs(onnx_node, {inputs[0]});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Identity.h b/compiler/mir-onnx-importer/Op/Identity.h
new file mode 100644
index 000000000..ea63bab4a
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Identity.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_IDENTITY_H
+#define MIR_ONNX_OP_IDENTITY_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertIdentityV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_IDENTITY_H
diff --git a/compiler/mir-onnx-importer/Op/Less.cpp b/compiler/mir-onnx-importer/Op/Less.cpp
new file mode 100644
index 000000000..44f5d8cf4
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Less.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Less.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/LessOp.h"
+
+namespace mir_onnx
+{
+
+static void convertLessGeneric(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ auto result = createOp<mir::ops::LessOp>(graph, inputs[0], inputs[1])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertLessV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertLessGeneric(onnx_node, context);
+}
+
+void convertLessV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertLessGeneric(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Less.h b/compiler/mir-onnx-importer/Op/Less.h
new file mode 100644
index 000000000..682c08725
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Less.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_LESS_H
+#define MIR_ONNX_OP_LESS_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertLessV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertLessV9(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_LESS_H
diff --git a/compiler/mir-onnx-importer/Op/MatMul.cpp b/compiler/mir-onnx-importer/Op/MatMul.cpp
new file mode 100644
index 000000000..6d8ea6b83
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/MatMul.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MatMul.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/FullyConnectedOp.h"
+
+namespace mir_onnx
+{
+
+void convertMatMulV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 2);
+ auto A = inputs[0];
+ auto B = inputs[1];
+ // MatMul multiply N-dimentional matrix
+ // FullyConnected layer multiply only 2-dimentional matrix
+ if (A->getShape().rank() != 2 || B->getShape().rank() != 2)
+ throw std::runtime_error("Supported only 2D matrix multiplying!");
+ // Calculate A * B.
+ auto result = createOp<mir::ops::FullyConnectedOp>(graph, A, B)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertMatMulV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // Other type constraints
+ convertMatMulV1(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/MatMul.h b/compiler/mir-onnx-importer/Op/MatMul.h
new file mode 100644
index 000000000..97e641ebb
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/MatMul.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_MATMUL_H
+#define MIR_ONNX_OP_MATMUL_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertMatMulV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertMatMulV9(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_MATMUL_H
diff --git a/compiler/mir-onnx-importer/Op/Max.cpp b/compiler/mir-onnx-importer/Op/Max.cpp
new file mode 100644
index 000000000..d4c7d1775
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Max.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Max.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/MaxOp.h"
+
+namespace mir_onnx
+{
+
+static void convertMaxGeneric(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ if (inputs.size() != 2)
+ {
+ throw std::runtime_error{"Unsupported number of inputs for Max operator"};
+ }
+ mir::Graph *graph = context->getGraph();
+ auto result = createOp<mir::ops::MaxOp>(graph, inputs[0], inputs[1])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertMaxV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertMaxGeneric(onnx_node, context);
+}
+
+void convertMaxV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertMaxGeneric(onnx_node, context);
+}
+
+void convertMaxV8(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertMaxGeneric(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Max.h b/compiler/mir-onnx-importer/Op/Max.h
new file mode 100644
index 000000000..1f2754b62
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Max.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_MAX_H
+#define MIR_ONNX_OP_MAX_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertMaxV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertMaxV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertMaxV8(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_MAX_H
diff --git a/compiler/mir-onnx-importer/Op/MaxPool.cpp b/compiler/mir-onnx-importer/Op/MaxPool.cpp
new file mode 100644
index 000000000..53e6e1556
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/MaxPool.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MaxPool.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+#include "ConvPoolHelpers.h"
+
+#include "mir/ops/MaxPool2DOp.h"
+
+namespace mir_onnx
+{
+
+void convertMaxPoolV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ const auto &input_shape = input->getShape();
+ if (input_shape.rank() != 4)
+ throw std::runtime_error("MaxPool: only 2-D input is supported.");
+
+ constexpr int num_spatial_dims = 2;
+
+ const auto strides =
+ getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
+ if (strides.size() != num_spatial_dims)
+ throw std::runtime_error("MaxPool: attribute 'strides' has incorrect size.");
+
+ const auto kernel_shape = getAttributeValue<std::vector<std::int32_t>>(onnx_node, "kernel_shape");
+ if (kernel_shape.size() != num_spatial_dims)
+ throw std::runtime_error("MaxPool: attribute 'kernel_shape' has incorrect size.");
+
+ std::vector<std::int32_t> padding_before;
+ std::vector<std::int32_t> padding_after;
+ if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
+ {
+ const auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
+ if (pads.size() != num_spatial_dims * 2)
+ throw std::runtime_error("MaxPool: attribute 'pads' has incorrect size.");
+ padding_before.assign(pads.cbegin(), std::next(pads.cbegin(), num_spatial_dims));
+ padding_after.assign(std::next(pads.cbegin(), num_spatial_dims), pads.cend());
+ }
+ else
+ {
+ const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
+ const std::vector<std::int32_t> dilations(num_spatial_dims, 1);
+ inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_shape, padding_before,
+ padding_after);
+ }
+
+ mir::MaxPool2DOpAttributes attributes;
+ attributes.window = kernel_shape;
+ attributes.strides = strides;
+ attributes.padding_before = padding_before;
+ attributes.padding_after = padding_after;
+ attributes.data_format = mir::DataFormat::NCHW;
+ auto result = createOp<mir::ops::MaxPool2DOp>(graph, input, attributes)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertMaxPoolV8(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto storage_order = getAttributeValue<int64_t>(onnx_node, "storage_order", 0);
+ if (storage_order != 0)
+ throw std::runtime_error("Not supported storage order attribute!");
+
+ convertMaxPoolV1(onnx_node, context);
+}
+
+void convertMaxPoolV10(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto ceil_mode = getAttributeValue<int64_t>(onnx_node, "ceil_mode", 0);
+ if (ceil_mode != 0)
+ throw std::runtime_error("Not supported ceil_mode attribute!");
+
+ const auto *dilations = findAttribute(onnx_node, "dilations");
+ if (dilations != nullptr)
+ {
+ // check default (=1) dilations on each spatial axis
+ for (auto index = 0; index < dilations->ints_size(); index++)
+ if (dilations->ints(index) != 1)
+ throw std::runtime_error("Not supported dilations in MaxPool operation!");
+ }
+
+ convertMaxPoolV8(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/MaxPool.h b/compiler/mir-onnx-importer/Op/MaxPool.h
new file mode 100644
index 000000000..85bd9cf1a
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/MaxPool.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_MAX_POOL_H
+#define MIR_ONNX_OP_MAX_POOL_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertMaxPoolV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertMaxPoolV8(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertMaxPoolV10(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_MAX_POOL_H
diff --git a/compiler/mir-onnx-importer/Op/Mul.cpp b/compiler/mir-onnx-importer/Op/Mul.cpp
new file mode 100644
index 000000000..dbfdd4950
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Mul.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Mul.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/MulOp.h"
+
+namespace mir_onnx
+{
+
+void convertMulV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ auto result = createOp<mir::ops::MulOp>(graph, inputs[0], inputs[1])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Mul.h b/compiler/mir-onnx-importer/Op/Mul.h
new file mode 100644
index 000000000..58738c81d
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Mul.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_MUL_H
+#define MIR_ONNX_OP_MUL_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertMulV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_MUL_H
diff --git a/compiler/mir-onnx-importer/Op/Pad.cpp b/compiler/mir-onnx-importer/Op/Pad.cpp
new file mode 100644
index 000000000..504a32bb8
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Pad.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Pad.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/PadOp.h"
+
+namespace mir_onnx
+{
+
+void convertPadAttrName(const std::string &pad_attr_name, const onnx::NodeProto &onnx_node,
+ ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ // 0.0f is the default value to be filled into padded cells.
+ const auto value = getAttributeValue<float>(onnx_node, "value", 0.0f);
+ const auto pads = getAttributeValue<std::vector<std::int64_t>>(onnx_node, pad_attr_name);
+ // "constant" is the default mode.
+ const auto mode = getAttributeValue<std::string>(onnx_node, "mode", "constant");
+ if (mode != "constant")
+ throw std::runtime_error("Not supported Pad mode attribute!");
+
+ const int num_dims = input->getShape().rank();
+ assert(pads.size() == num_dims * 2);
+ mir::PadOpAttributes attributes(num_dims);
+ for (int i = 0; i < num_dims; i++)
+ {
+ attributes.padding_before[i] = pads[i];
+ attributes.padding_after[i] = pads[num_dims + i];
+ }
+
+ attributes.padding_value = value;
+
+ auto result = createOp<mir::ops::PadOp>(graph, input, attributes)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertPadV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertPadAttrName("paddings", onnx_node, context);
+}
+
+void convertPadV2(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertPadAttrName("pads", onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Pad.h b/compiler/mir-onnx-importer/Op/Pad.h
new file mode 100644
index 000000000..a0731ae4c
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Pad.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_PAD_H
+#define MIR_ONNX_OP_PAD_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertPadV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertPadV2(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_PAD_H
diff --git a/compiler/mir-onnx-importer/Op/Reciprocal.cpp b/compiler/mir-onnx-importer/Op/Reciprocal.cpp
new file mode 100644
index 000000000..b063d4b8c
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Reciprocal.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Reciprocal.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/DivOp.h"
+
+namespace mir_onnx
+{
+
+static void convertReciprocal(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ const float one_value = 1.0f;
+ mir::TensorVariant one_tensor({mir::DataType::FLOAT32, {}}, &one_value);
+ auto one = createOp<mir::ops::ConstantOp>(graph, one_tensor)->getOutput(0);
+ auto result = createOp<mir::ops::DivOp>(graph, input, one)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertReciprocalV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertReciprocal(onnx_node, context);
+}
+
+void convertReciprocalV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertReciprocal(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Reciprocal.h b/compiler/mir-onnx-importer/Op/Reciprocal.h
new file mode 100644
index 000000000..747623ab5
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Reciprocal.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_RECIPROCAL_H
+#define MIR_ONNX_OP_RECIPROCAL_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertReciprocalV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertReciprocalV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_RECIPROCAL_H
diff --git a/compiler/mir-onnx-importer/Op/ReduceMean.cpp b/compiler/mir-onnx-importer/Op/ReduceMean.cpp
new file mode 100644
index 000000000..ec43bffb4
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/ReduceMean.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReduceMean.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/ReduceMeanOp.h"
+
+#include <numeric>
+
+namespace mir_onnx
+{
+
+void convertReduceMeanV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto inputs = context->getNodeInputs(onnx_node);
+ assert(inputs.size() == 1);
+
+ const auto axes = getAttributeValue<std::vector<std::int64_t>>(onnx_node, "axes");
+ const auto keepdims = getAttributeValue<int64_t>(onnx_node, "keepdims", 1);
+
+ std::vector<int32_t> reduce_dims;
+ if (axes.empty())
+ { // reduce over all dimensions
+ reduce_dims.resize(inputs[0]->getShape().rank());
+ std::iota(reduce_dims.begin(), reduce_dims.end(), 0);
+ }
+ else
+ {
+ auto rank = inputs[0]->getShape().rank();
+
+ std::transform(axes.begin(), axes.end(), std::back_inserter(reduce_dims),
+ [rank](int64_t axis) { return axis < 0 ? axis + rank : axis; });
+ }
+ // Keep the reduced dimension or not, default 1 mean keep reduced dimension.
+ bool keep_dims = static_cast<bool>(keepdims);
+
+ mir::Graph *graph = context->getGraph();
+ auto result =
+ createOp<mir::ops::ReduceMeanOp>(graph, inputs[0], reduce_dims, keep_dims)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/ReduceMean.h b/compiler/mir-onnx-importer/Op/ReduceMean.h
new file mode 100644
index 000000000..3553c96b5
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/ReduceMean.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_REDUCEMEAN_H
+#define MIR_ONNX_OP_REDUCEMEAN_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertReduceMeanV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_REDUCEMEAN_H
diff --git a/compiler/mir-onnx-importer/Op/Relu.cpp b/compiler/mir-onnx-importer/Op/Relu.cpp
new file mode 100644
index 000000000..72424e847
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Relu.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Relu.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/ReluOp.h"
+
+namespace mir_onnx
+{
+
+static void convertRelu(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ assert(inputs.size() == 1);
+ auto result = createOp<mir::ops::ReluOp>(graph, inputs[0])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertReluV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertRelu(onnx_node, context);
+}
+
+void convertReluV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertRelu(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Relu.h b/compiler/mir-onnx-importer/Op/Relu.h
new file mode 100644
index 000000000..7159f0add
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Relu.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_RELU_H
+#define MIR_ONNX_OP_RELU_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertReluV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertReluV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_RELU_H
diff --git a/compiler/mir-onnx-importer/Op/Reshape.cpp b/compiler/mir-onnx-importer/Op/Reshape.cpp
new file mode 100644
index 000000000..5cd4985e2
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Reshape.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Reshape.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/Tensor.h"
+#include "mir/ShapeRange.h"
+
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/ReshapeOp.h"
+
+namespace mir_onnx
+{
+
+void convertReshapeV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ // consumed_inputs attribute not used
+ const auto *shape_attr = findAttribute(onnx_node, "shape");
+ if (shape_attr && shape_attr->ints_size() > 0)
+ {
+ mir::Shape in_shape = inputs[0]->getShape();
+ mir::Shape out_shape(shape_attr->ints_size());
+ for (int32_t index = 0; index < out_shape.rank(); index++)
+ {
+ const auto dim_value = shape_attr->ints(index);
+ if (dim_value == 0)
+ out_shape.dim(index) = in_shape.dim(index);
+ else
+ out_shape.dim(index) = dim_value;
+ }
+
+ auto result = createOp<mir::ops::ReshapeOp>(graph, inputs[0], out_shape)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+ }
+ else // dimension value is unchanged
+ {
+ context->setNodeOutputs(onnx_node, {inputs[0]});
+ }
+}
+
+void convertReshapeV5(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ // The original shape
+ const auto &in_shape = inputs[0]->getShape();
+
+ // Input tensor describing the new shape
+ auto *op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
+ assert(op && "We support only constant shape input");
+ auto shape_tensor = op->getValue();
+ mir::Shape shape_tensor_shape = (shape_tensor).getShape();
+ assert(shape_tensor_shape.rank() == 1);
+ // The rank of the new shape
+ auto cnt = shape_tensor_shape.numElements();
+ // The vector to build the new shape from
+ std::vector<int32_t> shape_vector(cnt);
+ mir::ShapeRange out_range(shape_tensor_shape);
+ mir::Tensor<int64_t> tensor_accessor(shape_tensor);
+
+ int i = 0;
+ for (auto idx : out_range)
+ {
+ if (tensor_accessor.at(idx) == 0)
+ shape_vector[i] = in_shape.dim(i);
+ else if (tensor_accessor.at(idx) == -1)
+ shape_vector[i] = mir::Shape::autoDim;
+ else
+ shape_vector[i] = tensor_accessor.at(idx);
+ i++;
+ }
+ auto out_shape = mir::Shape(shape_vector);
+ auto result = createOp<mir::ops::ReshapeOp>(graph, inputs[0], out_shape)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Reshape.h b/compiler/mir-onnx-importer/Op/Reshape.h
new file mode 100644
index 000000000..4ebbcb7a7
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Reshape.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_RESHAPE_H
+#define MIR_ONNX_OP_RESHAPE_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertReshapeV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertReshapeV5(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_RESHAPE_H
diff --git a/compiler/mir-onnx-importer/Op/Shape.cpp b/compiler/mir-onnx-importer/Op/Shape.cpp
new file mode 100644
index 000000000..8cc250b6e
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Shape.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Shape.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/TensorVariant.h"
+
+#include "mir/ops/ConstantOp.h"
+
+namespace mir_onnx
+{
+
+void convertShapeV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ const auto &input_shape = inputs[0]->getShape();
+ int size = input_shape.rank();
+ mir::Shape output_shape{size};
+ std::vector<int64_t> data(static_cast<std::size_t>(size));
+ for (int i = 0; i < size; i++)
+ {
+ data[i] = input_shape.dim(i);
+ }
+ mir::TensorVariant tensor({mir::DataType::INT64, output_shape}, data.data());
+ auto result = createOp<mir::ops::ConstantOp>(graph, tensor)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Shape.h b/compiler/mir-onnx-importer/Op/Shape.h
new file mode 100644
index 000000000..e427d0330
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Shape.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_SHAPE_H
+#define MIR_ONNX_OP_SHAPE_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertShapeV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_SHAPE_H
diff --git a/compiler/mir-onnx-importer/Op/Sigmoid.cpp b/compiler/mir-onnx-importer/Op/Sigmoid.cpp
new file mode 100644
index 000000000..3db547186
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sigmoid.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sigmoid.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/SigmoidOp.h"
+
+namespace mir_onnx
+{
+
+static void convertSigmoid(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ assert(inputs.size() == 1);
+ auto result = createOp<mir::ops::SigmoidOp>(graph, inputs[0])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertSigmoidV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertSigmoid(onnx_node, context);
+}
+
+void convertSigmoidV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertSigmoid(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Sigmoid.h b/compiler/mir-onnx-importer/Op/Sigmoid.h
new file mode 100644
index 000000000..e2d85298f
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sigmoid.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_SIGMOID_H
+#define MIR_ONNX_OP_SIGMOID_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertSigmoidV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertSigmoidV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_SIGMOID_H
diff --git a/compiler/mir-onnx-importer/Op/Softmax.cpp b/compiler/mir-onnx-importer/Op/Softmax.cpp
new file mode 100644
index 000000000..1a2ca04ae
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Softmax.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Softmax.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/SoftmaxOp.h"
+
+namespace mir_onnx
+{
+
+void convertSoftmaxV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ // 1 is the default axis number.
+ const auto axis = getAttributeValue<std::int64_t>(onnx_node, "axis", 1);
+
+ auto result = createOp<mir::ops::SoftmaxOp>(graph, inputs[0], axis)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Softmax.h b/compiler/mir-onnx-importer/Op/Softmax.h
new file mode 100644
index 000000000..23d14c123
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Softmax.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_SOFTMAX_H
+#define MIR_ONNX_OP_SOFTMAX_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertSoftmaxV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_SOFTMAX_H
diff --git a/compiler/mir-onnx-importer/Op/Sqrt.cpp b/compiler/mir-onnx-importer/Op/Sqrt.cpp
new file mode 100644
index 000000000..70ef252fe
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sqrt.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sqrt.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/SqrtOp.h"
+
+namespace mir_onnx
+{
+
+static void convertSqrt(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ assert(inputs.size() == 1);
+ auto result = createOp<mir::ops::SqrtOp>(graph, inputs[0])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertSqrtV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertSqrt(onnx_node, context);
+}
+
+void convertSqrtV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertSqrt(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Sqrt.h b/compiler/mir-onnx-importer/Op/Sqrt.h
new file mode 100644
index 000000000..51815c93c
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sqrt.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_SQRT_H
+#define MIR_ONNX_OP_SQRT_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertSqrtV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertSqrtV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_SQRT_H
diff --git a/compiler/mir-onnx-importer/Op/Sub.cpp b/compiler/mir-onnx-importer/Op/Sub.cpp
new file mode 100644
index 000000000..0c3251909
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sub.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sub.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/SubOp.h"
+
+namespace mir_onnx
+{
+
+void convertSubV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // consumed_inputs attribute not used
+ convertSubV6(onnx_node, context);
+}
+
+void convertSubV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ // broadcast attribute not used
+ const auto *axis = findAttribute(onnx_node, "axis");
+ if (axis != nullptr)
+ throw std::runtime_error("Not supported axis attribute in Sub operation!");
+
+ convertSubV7(onnx_node, context);
+}
+
+void convertSubV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ auto result = createOp<mir::ops::SubOp>(graph, inputs[0], inputs[1])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Sub.h b/compiler/mir-onnx-importer/Op/Sub.h
new file mode 100644
index 000000000..b521e71ae
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sub.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_SUB_H
+#define MIR_ONNX_OP_SUB_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertSubV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertSubV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertSubV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_SUB_H
diff --git a/compiler/mir-onnx-importer/Op/Sum.cpp b/compiler/mir-onnx-importer/Op/Sum.cpp
new file mode 100644
index 000000000..c3a8dacca
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sum.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Sum.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/AddOp.h"
+
+namespace mir_onnx
+{
+
+void convertSumV8(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ assert(inputs.size() >= 1);
+
+ auto result = inputs[0];
+ for (int i = 1; i < static_cast<int>(inputs.size()); ++i)
+ {
+ result = createOp<mir::ops::AddOp>(graph, result, inputs[i])->getOutput(0);
+ }
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Sum.h b/compiler/mir-onnx-importer/Op/Sum.h
new file mode 100644
index 000000000..74ceb6dd7
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Sum.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_SUM_H
+#define MIR_ONNX_OP_SUM_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertSumV8(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_SUM_H
diff --git a/compiler/mir-onnx-importer/Op/Tanh.cpp b/compiler/mir-onnx-importer/Op/Tanh.cpp
new file mode 100644
index 000000000..c7faf157c
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Tanh.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tanh.h"
+
+#include "ONNXHelpers.h"
+
+#include "mir/ops/TanhOp.h"
+
+namespace mir_onnx
+{
+
+static void convertTanh(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ assert(inputs.size() == 1);
+ auto result = createOp<mir::ops::TanhOp>(graph, inputs[0])->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertTanhV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertTanh(onnx_node, context);
+}
+
+void convertTanhV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ convertTanh(onnx_node, context);
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Tanh.h b/compiler/mir-onnx-importer/Op/Tanh.h
new file mode 100644
index 000000000..5d3199541
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Tanh.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_TANH_H
+#define MIR_ONNX_OP_TANH_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertTanhV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertTanhV6(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_TANH_H
diff --git a/compiler/mir-onnx-importer/Op/Transpose.cpp b/compiler/mir-onnx-importer/Op/Transpose.cpp
new file mode 100644
index 000000000..82bb2f122
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Transpose.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Transpose.h"
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/TransposeOp.h"
+
+#include <numeric>
+
+namespace mir_onnx
+{
+
+void convertTransposeV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ const auto inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ assert(inputs.size() == 1);
+ auto input = inputs[0];
+
+ const auto num_axes = input->getShape().rank();
+ std::vector<std::size_t> axis_order(num_axes);
+ const auto *perm_attr = findAttribute(onnx_node, "perm");
+
+ if (perm_attr == nullptr)
+ {
+ // Reverse the dimensions.
+ std::iota(axis_order.rbegin(), axis_order.rend(), 0);
+ }
+ else
+ {
+ const auto perm = getAttributeValue<std::vector<std::int64_t>>(*perm_attr);
+ assert(perm.size() == num_axes);
+ std::copy(perm.cbegin(), perm.cend(), axis_order.begin());
+ }
+
+ auto result = createOp<mir::ops::TransposeOp>(graph, input, axis_order)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Transpose.h b/compiler/mir-onnx-importer/Op/Transpose.h
new file mode 100644
index 000000000..1f8c4369a
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Transpose.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_TRANSPOSE_H
+#define MIR_ONNX_OP_TRANSPOSE_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertTransposeV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_TRANSPOSE_H
diff --git a/compiler/mir-onnx-importer/Op/Unsqueeze.cpp b/compiler/mir-onnx-importer/Op/Unsqueeze.cpp
new file mode 100644
index 000000000..1b5995532
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Unsqueeze.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Unsqueeze.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ops/ReshapeOp.h"
+
+namespace mir_onnx
+{
+
+void convertUnsqueezeV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ const auto axes = getAttributeValue<std::vector<std::int64_t>>(onnx_node, "axes");
+ assert(!axes.empty());
+ const mir::Shape &input_shape = inputs[0]->getShape();
+ const int out_rank = input_shape.rank() + static_cast<int>(axes.size());
+ mir::Shape out_shape(out_rank);
+ auto ints_iterator = axes.cbegin();
+ int j = 0;
+ for (int i = 0; i < out_rank; i++)
+ {
+ if (ints_iterator < axes.cend() && i == *ints_iterator)
+ {
+ out_shape.dim(i) = 1;
+ ints_iterator++;
+ }
+ else
+ {
+ out_shape.dim(i) = input_shape.dim(j);
+ j++;
+ }
+ }
+ auto result = createOp<mir::ops::ReshapeOp>(graph, inputs[0], out_shape)->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Unsqueeze.h b/compiler/mir-onnx-importer/Op/Unsqueeze.h
new file mode 100644
index 000000000..46fea97ee
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Unsqueeze.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_UNSQUEEZE_H
+#define MIR_ONNX_OP_UNSQUEEZE_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertUnsqueezeV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_UNSQUEEZE_H
diff --git a/compiler/mir-onnx-importer/Op/Upsample.cpp b/compiler/mir-onnx-importer/Op/Upsample.cpp
new file mode 100644
index 000000000..49a555647
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Upsample.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Upsample.h"
+
+#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/Tensor.h"
+
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/ResizeOp.h"
+
+namespace mir_onnx
+{
+
+void convertUpsampleV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ // "nearest" is the default mode.
+ std::string mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
+ assert(mode == "nearest" && "Unsupported upscale mode!");
+
+ const float h_scale = getAttributeValue<float>(onnx_node, "height_scale", 0.0f); // required
+ const float w_scale = getAttributeValue<float>(onnx_node, "width_scale", 0.0f); // required
+ if (h_scale < 1.0f || w_scale < 1.0f)
+ throw std::runtime_error("Wrong scale attributes!");
+
+ assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
+ std::vector<float> scales_vector(4);
+ // NCHW
+ scales_vector.at(0) = 1.0f;
+ scales_vector.at(1) = 1.0f;
+ scales_vector.at(2) = h_scale;
+ scales_vector.at(3) = w_scale;
+
+ auto result =
+ createOp<mir::ops::ResizeOp>(graph, inputs[0],
+ mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertUpsampleV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ // "nearest" is the default mode.
+ std::string mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
+ assert(mode == "nearest" && "Unsupported upscale mode!");
+
+ const auto *scales_attr = findAttribute(onnx_node, "scales");
+ if (!scales_attr)
+ throw std::runtime_error("Not enough required scales attribute!");
+
+ if (scales_attr->floats_size() != inputs[0]->getShape().rank())
+ throw std::runtime_error(
+ "Number of elements of scales should be the same as the rank of input");
+
+ assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
+ std::vector<float> scales_vector(4);
+ // NCHW
+ scales_vector.at(0) = scales_attr->floats(0);
+ scales_vector.at(1) = scales_attr->floats(1);
+ scales_vector.at(2) = scales_attr->floats(2);
+ scales_vector.at(3) = scales_attr->floats(3);
+
+ auto result =
+ createOp<mir::ops::ResizeOp>(graph, inputs[0],
+ mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+void convertUpsampleV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
+{
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+
+ // "nearest" is the default mode.
+ const auto mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
+ assert(mode == "nearest" && "Unsupported upscale mode!");
+
+ // relies on attributes being lifted to constants (ONNX optimization pass)
+ assert(inputs.size() > 1);
+ auto *scales = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
+ assert(scales && "Weights could be a constant tensor only");
+ auto scales_tensor = mir::Tensor<float>(scales->getValue());
+ int rank = inputs[0]->getShape().rank();
+ assert(scales_tensor.getShape().numElements() == rank &&
+ "The number of elements of 'scales' should be the same as the rank of input 'X'");
+ assert(rank == 4 && "Only rank 4 is supported");
+ std::vector<float> scales_vector(4);
+ assert(scales_tensor.getShape().rank() == 1 && "Scales are a 1d tensor");
+ for (int i = 0; i < scales_tensor.getShape().numElements(); i++)
+ scales_vector[i] = scales_tensor.atOffset(i);
+
+ auto result =
+ createOp<mir::ops::ResizeOp>(graph, inputs[0],
+ mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0);
+
+ context->setNodeOutputs(onnx_node, {result});
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/Op/Upsample.h b/compiler/mir-onnx-importer/Op/Upsample.h
new file mode 100644
index 000000000..99600eede
--- /dev/null
+++ b/compiler/mir-onnx-importer/Op/Upsample.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_OP_UPSAMPLE_H
+#define MIR_ONNX_OP_UPSAMPLE_H
+
+#include "ONNXNodeConverterRegistry.h"
+
+namespace mir_onnx
+{
+
+void convertUpsampleV1(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertUpsampleV7(const onnx::NodeProto &onnx_node, ConverterContext *context);
+void convertUpsampleV9(const onnx::NodeProto &onnx_node, ConverterContext *context);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_OP_UPSAMPLE_H
diff --git a/compiler/mir-onnx-importer/requires.cmake b/compiler/mir-onnx-importer/requires.cmake
new file mode 100644
index 000000000..52a7837df
--- /dev/null
+++ b/compiler/mir-onnx-importer/requires.cmake
@@ -0,0 +1,2 @@
+require("mir")
+require("mir-interpreter")