summaryrefslogtreecommitdiff
path: root/compiler/mir-onnx-importer/Op/AveragePool.cpp
blob: ca1593f597ebe7a703d206522abcec00703b82b8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
/*
 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "AveragePool.h"

#include "ONNXHelpers.h"

#include "mir/ops/PoolOp.h"

namespace mir_onnx
{

void AveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
                                       ConverterContext *context) const
{
  const auto opset_version = context->getOpsetVersion(onnx_node.domain());
  if (opset_version >= 10)
    convertV10(onnx_node, context);
  else if (opset_version >= 7)
    convertV7(onnx_node, context);
  else if (opset_version >= 1)
    convertV1(onnx_node, context);
  else
    throw std::runtime_error("Not supported opset version on Add operation!");
}

void AveragePoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
                                         ConverterContext *context) const
{
  const auto auto_pad = getStringAttribute(onnx_node, "auto_pad", "NOTSET");
  // auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID.
  if (auto_pad != "NOTSET")
    throw std::runtime_error("Supported only explicit padding!");

  std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
  mir::Graph *graph = context->getGraph();

  mir::ops::PoolOp::BorderType border_type = mir::ops::PoolOp::BorderType::EMPTY;
  mir::ops::PoolOp::PoolingType pool_type = mir::ops::PoolOp::PoolingType::AVG;

  KernelStridesPadding cdata;
  // Transpose ONNX NCHW to MIR NHWC
  auto t_input = convertONNXToMIR(graph, inputs[0]);

  getKernelStridesPadding(onnx_node, cdata);

  auto result =
      createOp<mir::ops::PoolOp>(graph, t_input, pool_type, cdata.kernel_shape, cdata.strides_shape,
                                 cdata.padding_before, cdata.padding_after, border_type)
          ->getOutput(0);
  result = convertMIRToONNX(graph, result);

  context->setNodeOutputs(onnx_node, {result});
}

void AveragePoolNodeConverter::convertV7(const onnx::NodeProto &onnx_node,
                                         ConverterContext *context) const
{
  const auto count_include_pad = getIntAttribute(onnx_node, "count_include_pad", 0);
  if (count_include_pad != 0)
    throw std::runtime_error("Not supported count_include_pad attribute!");

  convertV1(onnx_node, context);
}

void AveragePoolNodeConverter::convertV10(const onnx::NodeProto &onnx_node,
                                          ConverterContext *context) const
{
  const auto ceil_mode = getIntAttribute(onnx_node, "ceil_mode", 0);
  if (ceil_mode != 0)
    throw std::runtime_error("Not supported ceil_mode attribute!");

  convertV7(onnx_node, context);
}

} // namespace mir_onnx