summaryrefslogtreecommitdiff
path: root/compiler/mir-onnx-importer/Op/Gemm.cpp
blob: 793dada2580e80d43e54a7d9112e9a949ce62a6d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
/*
 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "Gemm.h"

#include "ONNXHelpers.h"
#include "AttributeHelpers.h"

#include "mir/TensorVariant.h"

#include "mir/ops/AddOp.h"
#include "mir/ops/ConstantOp.h"
#include "mir/ops/FullyConnectedOp.h"
#include "mir/ops/MulOp.h"
#include "mir/ops/ReshapeOp.h"
#include "mir/ops/TransposeOp.h"

namespace mir_onnx
{

void GemmNodeConverter::convert(const onnx::NodeProto &onnx_node, ConverterContext *context) const
{
  std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
  mir::Graph *graph = context->getGraph();

  assert(inputs.size() == 3);
  auto a = inputs[0];
  auto b = inputs[1];
  auto c = inputs[2];

  // 1.0f is the default factor.
  const auto alpha_val = getAttributeValue<float>(onnx_node, "alpha", 1.0f);
  const auto beta_val = getAttributeValue<float>(onnx_node, "beta", 1.0f);

  // 0 means that no transpose is needed. It is the default value.
  const auto trans_a = getAttributeValue<std::int64_t>(onnx_node, "transA", 0);
  const auto trans_b = getAttributeValue<std::int64_t>(onnx_node, "transB", 0);

  // Transpose the A and B matrices as needed.
  if (trans_a)
    a = createOp<mir::ops::TransposeOp>(graph, a, std::vector<std::size_t>{1, 0})->getOutput(0);
  if (trans_b)
    b = createOp<mir::ops::TransposeOp>(graph, b, std::vector<std::size_t>{1, 0})->getOutput(0);

  // Calculate A * B.
  auto ab = createOp<mir::ops::FullyConnectedOp>(graph, a, b)->getOutput(0);

  // Multiply A * B by the constant factor.
  if (alpha_val != 1.0f)
  {
    mir::TensorVariant alpha_tensor(mir::DataType::FLOAT32, {1}, &alpha_val);
    auto alpha = createOp<mir::ops::ConstantOp>(graph, alpha_tensor)->getOutput(0);
    ab = createOp<mir::ops::MulOp>(graph, alpha, ab)->getOutput(0);
  }

  // Multiply C by the constant factor.
  if (beta_val != 1.0f)
  {
    mir::TensorVariant beta_tensor(mir::DataType::FLOAT32, {1}, &beta_val);
    auto beta = createOp<mir::ops::ConstantOp>(graph, beta_tensor)->getOutput(0);
    c = createOp<mir::ops::MulOp>(graph, beta, c)->getOutput(0);
  }

  // Calculate the result: alpha * A * B + beta * C.
  auto result = createOp<mir::ops::AddOp>(graph, ab, c)->getOutput(0);

  context->setNodeOutputs(onnx_node, {result});
}

} // namespace mir_onnx