summaryrefslogtreecommitdiff
path: root/compiler/locomotiv/src/Node/EltwiseAdd.test.cpp
blob: 2899dccdd9bf9b88d21d92cefb53a2617dbf7231 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
/*
 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "NodeExecution.h"

#include "locomotiv/NodeData.h"
#include "NodeDataImpl.h"
#include "NodeDomain.h"

#include <nncc/core/ADT/tensor/Shape.h>
#include <nncc/core/ADT/tensor/Buffer.h>
#include <nncc/core/ADT/tensor/LexicalLayout.h>
#include <nncc/core/ADT/tensor/Index.h>
#include <nncc/core/ADT/tensor/IndexEnumerator.h>

#include <gtest/gtest.h>

using nncc::core::ADT::tensor::Shape;
using nncc::core::ADT::tensor::LexicalLayout;
using nncc::core::ADT::tensor::make_buffer;
using nncc::core::ADT::tensor::IndexEnumerator;

/*
test case generated from the following:

x = tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
                shape=[1, 3, 3, 2], dtype=tf.float32)
y = tf.constant([-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18],
                shape=[1, 3, 3, 2], dtype=tf.float32)
out = tf.math.add(x, y)

with tf.Session() as sess:
    print(sess.run(out))
*/
TEST(NodeExecution_EltwiseAdd, f32)
{
  float x_val[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18};
  float y_val[] = {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18};
  float out_val[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};

  // make EltwiseAdd(Pull, Pull)
  auto g = loco::make_graph();
  Shape input_shape{1, 3, 3, 2}; // NHWC

  auto inp_lhs = g->nodes()->create<loco::Pull>();
  {
    inp_lhs->dtype(loco::DataType::FLOAT32);
    inp_lhs->shape({1, 3, 3, 2});
  }

  auto inp_rhs = g->nodes()->create<loco::Pull>();
  {
    inp_rhs->dtype(loco::DataType::FLOAT32);
    inp_rhs->shape({1, 3, 3, 2});
  }

  auto eltwise_add = g->nodes()->create<loco::EltwiseAdd>();
  {
    eltwise_add->lhs(inp_lhs);
    eltwise_add->rhs(inp_rhs);
  }

  // Make and assign data to two pull nodes
  auto inp_lhs_buf = make_buffer<float, LexicalLayout>(input_shape);
  {
    int n = 0;
    for (IndexEnumerator e{inp_lhs_buf.shape()}; e.valid(); e.advance())
    {
      inp_lhs_buf.at(e.current()) = x_val[n++];
    }
  }

  auto inp_rhs_buf = make_buffer<float, LexicalLayout>(input_shape);
  {
    int n = 0;
    for (IndexEnumerator e{inp_rhs_buf.shape()}; e.valid(); e.advance())
    {
      inp_rhs_buf.at(e.current()) = y_val[n++];
    }
  }

  auto inp_lhs_data = locomotiv::make_data(inp_lhs_buf);
  locomotiv::annot_data(inp_lhs, std::move(inp_lhs_data));
  locomotiv::annot_domain(inp_lhs, loco::Domain::Tensor);

  auto inp_rhs_data = locomotiv::make_data(inp_rhs_buf);
  locomotiv::annot_data(inp_rhs, std::move(inp_rhs_data));
  locomotiv::annot_domain(inp_rhs, loco::Domain::Tensor);

  // run the network
  locomotiv::NodeExecution::get().run(eltwise_add);

  // get result
  auto eltwise_add_data = locomotiv::annot_data(eltwise_add);

  // comparing the result
  ASSERT_NE(eltwise_add_data, nullptr);
  ASSERT_EQ(eltwise_add_data->dtype(), loco::DataType::FLOAT32);
  ASSERT_EQ(*(eltwise_add_data->shape()), Shape({1, 3, 3, 2}));

  uint32_t n = 0;
  for (IndexEnumerator e{*(eltwise_add_data->shape())}; e.valid(); e.advance())
  {
    ASSERT_FLOAT_EQ(eltwise_add_data->as_f32_bufptr()->at(e.current()), out_val[n++]);
  }

  ASSERT_EQ(locomotiv::annot_domain(eltwise_add), loco::Domain::Tensor);
}