summaryrefslogtreecommitdiff
path: root/compiler/mir-onnx-importer
diff options
context:
space:
mode:
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>2019-08-21 04:02:53 +0900
committerAlexander Efimov/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>2019-08-20 22:02:53 +0300
commit7103b3aacc4371cd61eca77ce9750aabe89f7b34 (patch)
tree23c4a73c31d5d8947aa0f629de30119a6745f445 /compiler/mir-onnx-importer
parent725c030eb1100ee7c204a028cb9cfa467b66eddb (diff)
downloadnnfw-7103b3aacc4371cd61eca77ce9750aabe89f7b34.tar.gz
nnfw-7103b3aacc4371cd61eca77ce9750aabe89f7b34.tar.bz2
nnfw-7103b3aacc4371cd61eca77ce9750aabe89f7b34.zip
[mir_onnx] Rework attribute parsing (#6651)
* Make attribute parsing functions templated and extract them to a separate file. * Move definition of `getKernelStridesPadding` into cpp file. Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
Diffstat (limited to 'compiler/mir-onnx-importer')
-rw-r--r--compiler/mir-onnx-importer/AttributeHelpers.h97
-rw-r--r--compiler/mir-onnx-importer/CMakeLists.txt1
-rw-r--r--compiler/mir-onnx-importer/ONNXHelpers.cpp34
-rw-r--r--compiler/mir-onnx-importer/ONNXHelpers.h78
-rw-r--r--compiler/mir-onnx-importer/Op/Add.cpp1
-rw-r--r--compiler/mir-onnx-importer/Op/AveragePool.cpp7
-rw-r--r--compiler/mir-onnx-importer/Op/BatchNormalization.cpp7
-rw-r--r--compiler/mir-onnx-importer/Op/Concat.cpp3
-rw-r--r--compiler/mir-onnx-importer/Op/Constant.cpp10
-rw-r--r--compiler/mir-onnx-importer/Op/Conv.cpp11
-rw-r--r--compiler/mir-onnx-importer/Op/Gather.cpp3
-rw-r--r--compiler/mir-onnx-importer/Op/Gemm.cpp9
-rw-r--r--compiler/mir-onnx-importer/Op/MaxPool.cpp7
-rw-r--r--compiler/mir-onnx-importer/Op/Pad.cpp22
-rw-r--r--compiler/mir-onnx-importer/Op/Reshape.cpp2
-rw-r--r--compiler/mir-onnx-importer/Op/Softmax.cpp3
-rw-r--r--compiler/mir-onnx-importer/Op/Unsqueeze.cpp11
-rw-r--r--compiler/mir-onnx-importer/Op/Upsample.cpp11
18 files changed, 191 insertions, 126 deletions
diff --git a/compiler/mir-onnx-importer/AttributeHelpers.h b/compiler/mir-onnx-importer/AttributeHelpers.h
new file mode 100644
index 000000000..24e14d46f
--- /dev/null
+++ b/compiler/mir-onnx-importer/AttributeHelpers.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_ATTRIBUTE_HELPERS_H
+#define MIR_ONNX_ATTRIBUTE_HELPERS_H
+
+#include "onnx/onnx.pb.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace mir_onnx
+{
+
+template <typename T> T getAttributeValue(const onnx::AttributeProto &attribute) = delete;
+
+template <> inline float getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::FLOAT);
+ return attribute.f();
+}
+
+template <> inline std::int64_t getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::INT);
+ return attribute.i();
+}
+
+template <> inline std::string getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::STRING);
+ return attribute.s();
+}
+
+template <> inline onnx::TensorProto getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::TENSOR);
+ return attribute.t();
+}
+
+template <>
+inline std::vector<std::int64_t> getAttributeValue(const onnx::AttributeProto &attribute)
+{
+ assert(attribute.type() == onnx::AttributeProto::INTS);
+ return {attribute.ints().cbegin(), attribute.ints().cend()};
+}
+
+inline const onnx::AttributeProto *findAttribute(const onnx::NodeProto &node,
+ const std::string &name)
+{
+ const auto &attributes = node.attribute();
+ const auto it = std::find_if(
+ attributes.cbegin(), attributes.cend(),
+ [&name](const onnx::AttributeProto &attribute) { return attribute.name() == name; });
+ if (it == attributes.cend())
+ return nullptr;
+ return &*it;
+}
+
+template <typename T> T getAttributeValue(const onnx::NodeProto &node, const std::string &name)
+{
+ const auto *attribute = findAttribute(node, name);
+ if (attribute == nullptr)
+ throw std::runtime_error("Cannot find attribute '" + name + "' in node '" + node.name() + "'.");
+ return getAttributeValue<T>(*attribute);
+}
+
+template <typename T>
+T getAttributeValue(const onnx::NodeProto &node, const std::string &name, T default_value)
+{
+ const auto *attribute = findAttribute(node, name);
+ if (attribute == nullptr)
+ return std::move(default_value);
+ return getAttributeValue<T>(*attribute);
+}
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_ATTRIBUTE_HELPERS_H
diff --git a/compiler/mir-onnx-importer/CMakeLists.txt b/compiler/mir-onnx-importer/CMakeLists.txt
index 1a3347695..10de9dfed 100644
--- a/compiler/mir-onnx-importer/CMakeLists.txt
+++ b/compiler/mir-onnx-importer/CMakeLists.txt
@@ -20,6 +20,7 @@ target_include_directories(mir_onnx_proto PUBLIC ${MIR_ONNX_PROTO_INCLUDE_DIRS})
target_link_libraries(mir_onnx_proto PUBLIC libprotobuf)
set(MIR_ONNX_IMPORTER_SOURCES
+ AttributeHelpers.h
ONNXHelpers.cpp
ONNXHelpers.h
ONNXImporterImpl.cpp
diff --git a/compiler/mir-onnx-importer/ONNXHelpers.cpp b/compiler/mir-onnx-importer/ONNXHelpers.cpp
index 4e4f1b0a2..2035918e1 100644
--- a/compiler/mir-onnx-importer/ONNXHelpers.cpp
+++ b/compiler/mir-onnx-importer/ONNXHelpers.cpp
@@ -15,6 +15,9 @@
*/
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
+
+#include "mir/ShapeRange.h"
namespace mir_onnx
{
@@ -124,4 +127,35 @@ mir::TensorVariant createTensor(const onnx::TensorProto *tensor)
return mir::TensorVariant(type, shape, src_data);
}
+void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata)
+{
+ const auto kernel_shape = getAttributeValue<std::vector<std::int64_t>>(onnx_node, "kernel_shape");
+ assert(!kernel_shape.empty());
+ const auto strides = getAttributeValue<std::vector<std::int64_t>>(onnx_node, "strides");
+ assert(!strides.empty());
+ const auto *pads_attribute = findAttribute(onnx_node, "pads");
+
+ cdata.kernel_shape = mir::Shape(kernel_shape.size());
+ for (std::size_t i = 0; i < kernel_shape.size(); ++i)
+ {
+ cdata.kernel_shape.dim(i) = kernel_shape[i];
+ }
+
+ cdata.strides_shape = mir::Shape(strides.size());
+ for (std::size_t i = 0; i < strides.size(); ++i)
+ {
+ cdata.strides_shape.dim(i) = strides[i];
+ }
+
+ if (pads_attribute != nullptr)
+ {
+ const auto pads = getAttributeValue<std::vector<std::int64_t>>(*pads_attribute);
+ assert(pads.size() == 4);
+ cdata.padding_before[0] = pads[0];
+ cdata.padding_before[1] = pads[1];
+ cdata.padding_after[0] = pads[2];
+ cdata.padding_after[1] = pads[3];
+ }
+}
+
} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/ONNXHelpers.h b/compiler/mir-onnx-importer/ONNXHelpers.h
index ef00f4303..8ccf1fcd4 100644
--- a/compiler/mir-onnx-importer/ONNXHelpers.h
+++ b/compiler/mir-onnx-importer/ONNXHelpers.h
@@ -19,63 +19,16 @@
#include "mir/Graph.h"
#include "mir/TensorVariant.h"
-#include "mir/ShapeRange.h"
-
#include "mir/ops/TransposeOp.h"
#include "onnx/onnx.pb.h"
-#include <cassert>
-
namespace mir_onnx
{
mir::TensorVariant fixGroupedKernel(int groups, const mir::TensorVariant &folded_kernel);
mir::TensorVariant createTensor(const onnx::TensorProto *tensor);
-inline const onnx::AttributeProto *findAttribute(const onnx::NodeProto &onnx_node,
- const std::string &name)
-{
- for (auto &att : onnx_node.attribute())
- {
- if (att.name() == name)
- {
- return &att;
- }
- }
- return nullptr;
-}
-
-inline int64_t getIntAttribute(const onnx::NodeProto &onnx_node, const std::string &name,
- const int64_t default_value)
-{
- auto result = findAttribute(onnx_node, name);
- if (!result)
- return default_value;
- assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_INT);
- return result->i();
-}
-
-inline std::string getStringAttribute(const onnx::NodeProto &onnx_node, const std::string &name,
- const std::string &default_value)
-{
- auto result = findAttribute(onnx_node, name);
- if (!result)
- return default_value;
- assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING);
- return result->s();
-}
-
-inline float getFloatAttribute(const onnx::NodeProto &onnx_node, const std::string &name,
- const float default_value)
-{
- auto result = findAttribute(onnx_node, name);
- if (!result)
- return default_value;
- assert(result->type() == onnx::AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT);
- return result->f();
-}
-
struct KernelStridesPadding
{
mir::Shape kernel_shape;
@@ -84,36 +37,7 @@ struct KernelStridesPadding
std::vector<int32_t> padding_after{0, 0};
};
-inline void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata)
-{
- auto *kshape = findAttribute(onnx_node, "kernel_shape");
- assert(kshape && kshape->ints_size());
- auto *strides = findAttribute(onnx_node, "strides");
- assert(strides && strides->ints_size());
- auto *pads = findAttribute(onnx_node, "pads");
-
- cdata.kernel_shape = mir::Shape(kshape->ints_size());
- for (int i = 0; i < kshape->ints_size(); ++i)
- {
- cdata.kernel_shape.dim(i) = kshape->ints(i);
- }
- cdata.strides_shape = mir::Shape(strides->ints_size());
- for (int i = 0; i < strides->ints_size(); ++i)
- {
- cdata.strides_shape.dim(i) = strides->ints(i);
- }
-
- if (pads)
- {
- assert(pads->ints_size() == 4);
- cdata.padding_before[0] = pads->ints(0);
- cdata.padding_before[1] = pads->ints(1);
- // TODO: ONNX padding could be for the beginning and ending along each axis that's why we
- // should select the interesting ones.
- cdata.padding_after[0] = pads->ints(2);
- cdata.padding_after[1] = pads->ints(3);
- }
-}
+void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata);
template <typename OpType, typename... Types>
mir::Operation *createOp(mir::Graph *graph, Types &&... args)
diff --git a/compiler/mir-onnx-importer/Op/Add.cpp b/compiler/mir-onnx-importer/Op/Add.cpp
index a9542de6e..e00e4f4ff 100644
--- a/compiler/mir-onnx-importer/Op/Add.cpp
+++ b/compiler/mir-onnx-importer/Op/Add.cpp
@@ -17,6 +17,7 @@
#include "Add.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/ops/AddOp.h"
diff --git a/compiler/mir-onnx-importer/Op/AveragePool.cpp b/compiler/mir-onnx-importer/Op/AveragePool.cpp
index ca1593f59..e1ea2a159 100644
--- a/compiler/mir-onnx-importer/Op/AveragePool.cpp
+++ b/compiler/mir-onnx-importer/Op/AveragePool.cpp
@@ -17,6 +17,7 @@
#include "AveragePool.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/ops/PoolOp.h"
@@ -40,7 +41,7 @@ void AveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
void AveragePoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
ConverterContext *context) const
{
- const auto auto_pad = getStringAttribute(onnx_node, "auto_pad", "NOTSET");
+ const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
// auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID.
if (auto_pad != "NOTSET")
throw std::runtime_error("Supported only explicit padding!");
@@ -69,7 +70,7 @@ void AveragePoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
void AveragePoolNodeConverter::convertV7(const onnx::NodeProto &onnx_node,
ConverterContext *context) const
{
- const auto count_include_pad = getIntAttribute(onnx_node, "count_include_pad", 0);
+ const auto count_include_pad = getAttributeValue<int64_t>(onnx_node, "count_include_pad", 0);
if (count_include_pad != 0)
throw std::runtime_error("Not supported count_include_pad attribute!");
@@ -79,7 +80,7 @@ void AveragePoolNodeConverter::convertV7(const onnx::NodeProto &onnx_node,
void AveragePoolNodeConverter::convertV10(const onnx::NodeProto &onnx_node,
ConverterContext *context) const
{
- const auto ceil_mode = getIntAttribute(onnx_node, "ceil_mode", 0);
+ const auto ceil_mode = getAttributeValue<int64_t>(onnx_node, "ceil_mode", 0);
if (ceil_mode != 0)
throw std::runtime_error("Not supported ceil_mode attribute!");
diff --git a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
index a6baf5e89..9cc02e3a1 100644
--- a/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
+++ b/compiler/mir-onnx-importer/Op/BatchNormalization.cpp
@@ -17,6 +17,7 @@
#include "BatchNormalization.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/ShapeRange.h"
#include "mir/Tensor.h"
@@ -56,7 +57,7 @@ void BatchNormalizationNodeConverter::convertV1(const onnx::NodeProto &onnx_node
void BatchNormalizationNodeConverter::convertV6(const onnx::NodeProto &onnx_node,
ConverterContext *context) const
{
- const auto is_test = getIntAttribute(onnx_node, "is_test", 0);
+ const auto is_test = getAttributeValue<int64_t>(onnx_node, "is_test", 0);
if (is_test != 0)
throw std::runtime_error("Not supported is_test attribute!");
@@ -66,7 +67,7 @@ void BatchNormalizationNodeConverter::convertV6(const onnx::NodeProto &onnx_node
void BatchNormalizationNodeConverter::convertV7(const onnx::NodeProto &onnx_node,
ConverterContext *context) const
{
- const auto spatial = getIntAttribute(onnx_node, "spatial", 1);
+ const auto spatial = getAttributeValue<int64_t>(onnx_node, "spatial", 1);
if (spatial != 1)
throw std::runtime_error("Not supported spatial attribute!");
@@ -89,7 +90,7 @@ void BatchNormalizationNodeConverter::convertV9(const onnx::NodeProto &onnx_node
auto var = inputs[4];
// 1e-05f is the default epsilon.
- const float epsilon = getFloatAttribute(onnx_node, "epsilon", 1e-05f);
+ const auto epsilon = getAttributeValue<float>(onnx_node, "epsilon", 1e-05f);
// Y = (X - mean) * scale / sqrt(var + epsilon) + bias =
// = (X + C1) * C2 + bias
diff --git a/compiler/mir-onnx-importer/Op/Concat.cpp b/compiler/mir-onnx-importer/Op/Concat.cpp
index 59ae59036..229cb76d8 100644
--- a/compiler/mir-onnx-importer/Op/Concat.cpp
+++ b/compiler/mir-onnx-importer/Op/Concat.cpp
@@ -17,6 +17,7 @@
#include "Concat.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/ops/ConcatOp.h"
@@ -40,7 +41,7 @@ void ConcatNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
mir::Graph *graph = context->getGraph();
- const auto axis = getIntAttribute(onnx_node, "axis", 1);
+ const auto axis = getAttributeValue<int64_t>(onnx_node, "axis", 1);
auto result = createOp<mir::ops::ConcatOp>(graph, inputs, axis)->getOutput(0);
diff --git a/compiler/mir-onnx-importer/Op/Constant.cpp b/compiler/mir-onnx-importer/Op/Constant.cpp
index 7c73cb6c9..3a1a9e95b 100644
--- a/compiler/mir-onnx-importer/Op/Constant.cpp
+++ b/compiler/mir-onnx-importer/Op/Constant.cpp
@@ -17,6 +17,7 @@
#include "Constant.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/TensorVariant.h"
#include "mir/ops/ConstantOp.h"
@@ -41,16 +42,13 @@ void ConstantNodeConverter::convert(const onnx::NodeProto &onnx_node,
void ConstantNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
ConverterContext *context) const
{
- const auto *value_attr = findAttribute(onnx_node, "value");
- if (value_attr == nullptr)
- throw std::runtime_error("Not enough value attribute in Constant operation!");
- assert(value_attr->type() == onnx::AttributeProto_AttributeType_TENSOR);
+ std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
+ mir::Graph *graph = context->getGraph();
+ const auto onnx_tensor = getAttributeValue<onnx::TensorProto>(onnx_node, "value");
const auto &name = onnx_node.output(0);
- const auto &onnx_tensor = value_attr->t();
auto mir_tensor = createTensor(&onnx_tensor);
- mir::Graph *graph = context->getGraph();
auto result = graph->create<mir::ops::ConstantOp>(name, mir_tensor)->getOutput(0);
context->setNodeOutputs(onnx_node, {result});
diff --git a/compiler/mir-onnx-importer/Op/Conv.cpp b/compiler/mir-onnx-importer/Op/Conv.cpp
index 6d9916f67..e25f6b1f5 100644
--- a/compiler/mir-onnx-importer/Op/Conv.cpp
+++ b/compiler/mir-onnx-importer/Op/Conv.cpp
@@ -17,6 +17,7 @@
#include "Conv.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/TensorUtil.h"
@@ -39,7 +40,7 @@ void ConvNodeConverter::convert(const onnx::NodeProto &onnx_node, ConverterConte
void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterContext *context) const
{
- const auto auto_pad = getStringAttribute(onnx_node, "auto_pad", "NOTSET");
+ const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
// auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID.
if (auto_pad != "NOTSET")
throw std::runtime_error("Supported only explicit padding!");
@@ -64,8 +65,8 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
auto out_channels = kernel_tensor.getShape().dim(3);
// 1 is the default number of groups.
- int num_groups = getIntAttribute(onnx_node, "group", 1);
- bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
+ const auto group = getAttributeValue<std::int64_t>(onnx_node, "group", 1);
+ bool is_depthwise = (group != 1) && (in_group_size == 1) && (out_channels == group);
mir::Operation::Output *result;
auto transposed_input = convertONNXToMIR(graph, inputs[0]);
@@ -82,8 +83,8 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
else
{
// first we need to convert kernel of grouped convolution to appropriate ordinary kernel
- if (num_groups != 1)
- kernel_tensor = fixGroupedKernel(num_groups, kernel_tensor);
+ if (group != 1)
+ kernel_tensor = fixGroupedKernel(group, kernel_tensor);
kernel_tensor = mir::transposeTensor<3, 0, 1, 2>(kernel_tensor);
auto kernel = createOp<mir::ops::ConstantOp>(graph, kernel_tensor)->getOutput(0);
result = createOp<mir::ops::Conv2DOp>(graph, transposed_input, kernel, cdata.strides_shape,
diff --git a/compiler/mir-onnx-importer/Op/Gather.cpp b/compiler/mir-onnx-importer/Op/Gather.cpp
index 425e47869..517007ed0 100644
--- a/compiler/mir-onnx-importer/Op/Gather.cpp
+++ b/compiler/mir-onnx-importer/Op/Gather.cpp
@@ -17,6 +17,7 @@
#include "Gather.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/ops/GatherOp.h"
@@ -29,7 +30,7 @@ void GatherNodeConverter::convert(const onnx::NodeProto &onnx_node, ConverterCon
mir::Graph *graph = context->getGraph();
// 0 is the default axis number.
- int axis = getIntAttribute(onnx_node, "axis", 0);
+ const auto axis = getAttributeValue<std::int64_t>(onnx_node, "axis", 0);
auto result = createOp<mir::ops::GatherOp>(graph, inputs[0], inputs[1], axis)->getOutput(0);
diff --git a/compiler/mir-onnx-importer/Op/Gemm.cpp b/compiler/mir-onnx-importer/Op/Gemm.cpp
index 7fd78cf84..793dada25 100644
--- a/compiler/mir-onnx-importer/Op/Gemm.cpp
+++ b/compiler/mir-onnx-importer/Op/Gemm.cpp
@@ -17,6 +17,7 @@
#include "Gemm.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/TensorVariant.h"
@@ -41,12 +42,12 @@ void GemmNodeConverter::convert(const onnx::NodeProto &onnx_node, ConverterConte
auto c = inputs[2];
// 1.0f is the default factor.
- const float alpha_val = getFloatAttribute(onnx_node, "alpha", 1.0f);
- const float beta_val = getFloatAttribute(onnx_node, "beta", 1.0f);
+ const auto alpha_val = getAttributeValue<float>(onnx_node, "alpha", 1.0f);
+ const auto beta_val = getAttributeValue<float>(onnx_node, "beta", 1.0f);
// 0 means that no transpose is needed. It is the default value.
- const bool trans_a = getIntAttribute(onnx_node, "transA", 0);
- const bool trans_b = getIntAttribute(onnx_node, "transB", 0);
+ const auto trans_a = getAttributeValue<std::int64_t>(onnx_node, "transA", 0);
+ const auto trans_b = getAttributeValue<std::int64_t>(onnx_node, "transB", 0);
// Transpose the A and B matrices as needed.
if (trans_a)
diff --git a/compiler/mir-onnx-importer/Op/MaxPool.cpp b/compiler/mir-onnx-importer/Op/MaxPool.cpp
index 0da6e3562..48e35eb8c 100644
--- a/compiler/mir-onnx-importer/Op/MaxPool.cpp
+++ b/compiler/mir-onnx-importer/Op/MaxPool.cpp
@@ -17,6 +17,7 @@
#include "MaxPool.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/ops/PoolOp.h"
@@ -40,7 +41,7 @@ void MaxPoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
void MaxPoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
ConverterContext *context) const
{
- const auto auto_pad = getStringAttribute(onnx_node, "auto_pad", "NOTSET");
+ const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
// auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID.
if (auto_pad != "NOTSET")
throw std::runtime_error("Supported only explicit padding!");
@@ -69,7 +70,7 @@ void MaxPoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
void MaxPoolNodeConverter::convertV8(const onnx::NodeProto &onnx_node,
ConverterContext *context) const
{
- const auto storage_order = getIntAttribute(onnx_node, "storage_order", 0);
+ const auto storage_order = getAttributeValue<int64_t>(onnx_node, "storage_order", 0);
if (storage_order != 0)
throw std::runtime_error("Not supported storage order attribute!");
@@ -79,7 +80,7 @@ void MaxPoolNodeConverter::convertV8(const onnx::NodeProto &onnx_node,
void MaxPoolNodeConverter::convertV10(const onnx::NodeProto &onnx_node,
ConverterContext *context) const
{
- const auto ceil_mode = getIntAttribute(onnx_node, "ceil_mode", 0);
+ const auto ceil_mode = getAttributeValue<int64_t>(onnx_node, "ceil_mode", 0);
if (ceil_mode != 0)
throw std::runtime_error("Not supported ceil_mode attribute!");
diff --git a/compiler/mir-onnx-importer/Op/Pad.cpp b/compiler/mir-onnx-importer/Op/Pad.cpp
index b4cec79e8..f3fdeefa6 100644
--- a/compiler/mir-onnx-importer/Op/Pad.cpp
+++ b/compiler/mir-onnx-importer/Op/Pad.cpp
@@ -17,6 +17,7 @@
#include "Pad.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/ops/PadOp.h"
@@ -42,24 +43,21 @@ void PadNodeConverter::convertPadAttrName(const std::string pad_attr_name,
mir::Graph *graph = context->getGraph();
// 0.0f is the default value to be filled into padded cells.
- const float value = getFloatAttribute(onnx_node, "value", 0.0f);
- const auto *pads_attr = findAttribute(onnx_node, pad_attr_name);
- assert(pads_attr);
+ const float value = getAttributeValue<float>(onnx_node, "value", 0.0f);
+ const auto pads = getAttributeValue<std::vector<int64_t>>(onnx_node, "pads");
// "constant" is the default mode.
- auto mode = getStringAttribute(onnx_node, "mode", "constant");
+ auto mode = getAttributeValue<std::string>(onnx_node, "mode", "constant");
if (mode != "constant")
- throw std::runtime_error("Not supported Pad mode attribue!");
+ throw std::runtime_error("Not supported Pad mode attribute!");
const mir::Scalar scalar(reinterpret_cast<const char *>(&value), mir::DTYPE::FLOAT32,
sizeof(float));
- assert(pads_attr->ints_size() > 0);
- int axis_size = pads_attr->ints_size() / 2;
- std::vector<std::pair<int32_t, int32_t>> vec(axis_size);
- auto *data = pads_attr->ints().data();
- for (int i = 0; i < axis_size; i++)
+ assert(!pads.empty());
+ const auto axis_size = pads.size() / 2;
+ std::vector<std::pair<int32_t, int32_t>> vec;
+ for (std::size_t i = 0; i < axis_size; i++)
{
- auto pair = std::make_pair(data[i], data[axis_size + i]);
- vec[i] = pair;
+ vec.emplace_back(pads[i], pads[axis_size + i]);
}
auto result =
createOp<mir::ops::PadOp>(graph, inputs[0], inputs[0]->getShape().rank(), vec, scalar)
diff --git a/compiler/mir-onnx-importer/Op/Reshape.cpp b/compiler/mir-onnx-importer/Op/Reshape.cpp
index f8fd69cfa..ac6a16d1a 100644
--- a/compiler/mir-onnx-importer/Op/Reshape.cpp
+++ b/compiler/mir-onnx-importer/Op/Reshape.cpp
@@ -17,8 +17,10 @@
#include "Reshape.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/Tensor.h"
+#include "mir/ShapeRange.h"
#include "mir/ops/ConstantOp.h"
#include "mir/ops/ReshapeOp.h"
diff --git a/compiler/mir-onnx-importer/Op/Softmax.cpp b/compiler/mir-onnx-importer/Op/Softmax.cpp
index 21e4c93e6..84db24ea9 100644
--- a/compiler/mir-onnx-importer/Op/Softmax.cpp
+++ b/compiler/mir-onnx-importer/Op/Softmax.cpp
@@ -17,6 +17,7 @@
#include "Softmax.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/ops/SoftmaxOp.h"
@@ -30,7 +31,7 @@ void SoftmaxNodeConverter::convert(const onnx::NodeProto &onnx_node,
mir::Graph *graph = context->getGraph();
// 1 is the default axis number.
- int axis = getIntAttribute(onnx_node, "axis", 1);
+ const auto axis = getAttributeValue<std::int64_t>(onnx_node, "axis", 1);
auto result = createOp<mir::ops::SoftmaxOp>(graph, inputs[0], axis)->getOutput(0);
diff --git a/compiler/mir-onnx-importer/Op/Unsqueeze.cpp b/compiler/mir-onnx-importer/Op/Unsqueeze.cpp
index 0fcb307a7..e1a80619b 100644
--- a/compiler/mir-onnx-importer/Op/Unsqueeze.cpp
+++ b/compiler/mir-onnx-importer/Op/Unsqueeze.cpp
@@ -17,6 +17,7 @@
#include "Unsqueeze.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/ops/ReshapeOp.h"
@@ -28,16 +29,16 @@ void UnsqueezeNodeConverter::convert(const onnx::NodeProto &onnx_node,
{
std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
mir::Graph *graph = context->getGraph();
- auto *axes = findAttribute(onnx_node, "axes");
- assert(axes && axes->ints_size());
+ const auto axes = getAttributeValue<std::vector<std::int64_t>>(onnx_node, "axes");
+ assert(!axes.empty());
const mir::Shape &input_shape = inputs[0]->getShape();
- const int out_rank = input_shape.rank() + axes->ints_size();
+ const int out_rank = input_shape.rank() + static_cast<int>(axes.size());
mir::Shape out_shape(out_rank);
- auto ints_iterator = axes->ints().begin();
+ auto ints_iterator = axes.cbegin();
int j = 0;
for (int i = 0; i < out_rank; i++)
{
- if (ints_iterator < axes->ints().end() && i == *ints_iterator)
+ if (ints_iterator < axes.cend() && i == *ints_iterator)
{
out_shape.dim(i) = 1;
ints_iterator++;
diff --git a/compiler/mir-onnx-importer/Op/Upsample.cpp b/compiler/mir-onnx-importer/Op/Upsample.cpp
index a7e4f78e2..4bc8326ab 100644
--- a/compiler/mir-onnx-importer/Op/Upsample.cpp
+++ b/compiler/mir-onnx-importer/Op/Upsample.cpp
@@ -17,6 +17,7 @@
#include "Upsample.h"
#include "ONNXHelpers.h"
+#include "AttributeHelpers.h"
#include "mir/Tensor.h"
@@ -49,11 +50,11 @@ void UpsampleNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
mir::Graph *graph = context->getGraph();
// "nearest" is the default mode.
- std::string mode = getStringAttribute(onnx_node, "mode", "nearest");
+ std::string mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
assert(mode == "nearest" && "Unsupported upscale mode!");
- const float h_scale = getFloatAttribute(onnx_node, "height_scale", 0.0f); // required
- const float w_scale = getFloatAttribute(onnx_node, "width_scale", 0.0f); // required
+ const float h_scale = getAttributeValue<float>(onnx_node, "height_scale", 0.0f); // required
+ const float w_scale = getAttributeValue<float>(onnx_node, "width_scale", 0.0f); // required
if (h_scale < 1.0f || w_scale < 1.0f)
throw std::runtime_error("Wrong scale attributes!");
@@ -81,7 +82,7 @@ void UpsampleNodeConverter::convertV7(const onnx::NodeProto &onnx_node,
mir::Graph *graph = context->getGraph();
// "nearest" is the default mode.
- std::string mode = getStringAttribute(onnx_node, "mode", "nearest");
+ std::string mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
assert(mode == "nearest" && "Unsupported upscale mode!");
const auto *scales_attr = findAttribute(onnx_node, "scales");
@@ -116,7 +117,7 @@ void UpsampleNodeConverter::convertV9(const onnx::NodeProto &onnx_node,
mir::Graph *graph = context->getGraph();
// "nearest" is the default mode.
- std::string mode = getStringAttribute(onnx_node, "mode", "nearest");
+ const auto mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
assert(mode == "nearest" && "Unsupported upscale mode!");
// relies on attributes being lifted to constants (ONNX optimization pass)