blob: 96799129554da7c2cb03885f4d9f313ab4a86ea9 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
|
/*
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "AddNLayer.h"
#include "OperationUtils.h"
#include <cker/operation/AddN.h>
#include <assert.h>
namespace onert
{
namespace backend
{
namespace cpu
{
namespace ops
{
void AddNLayer::configure(std::vector<const IPortableTensor *> &&inputs, IPortableTensor *output)
{
_inputs = std::move(inputs);
_output = output;
}
void AddNLayer::run()
{
size_t input_size = _inputs.size();
if (_output->data_type() == ir::DataType::INT32)
{
std::vector<const int32_t *> input_buffers(input_size);
for (size_t i = 0; i < input_size; i++)
{
input_buffers[i] = getBuffer<int32_t>(_inputs[i]);
}
AddN(getShape(_inputs[0]), input_size, input_buffers.data(), getBuffer<int32_t>(_output));
}
else if (_output->data_type() == ir::DataType::FLOAT32)
{
std::vector<const float *> input_buffers(input_size);
for (size_t i = 0; i < input_size; i++)
{
input_buffers[i] = getBuffer<float>(_inputs[i]);
}
AddN(getShape(_inputs[0]), input_size, input_buffers.data(), getBuffer<float>(_output));
}
else
{
throw std::runtime_error("AddN: unsupported data type");
}
}
} // namespace ops
} // namespace cpu
} // namespace backend
} // namespace onert
|