blob: 5dfdc7ec5102b25b77ffdaa70ef0b240086766dd (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
|
/*
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __ONERT_BACKEND_RUY_OPS_OPERATION_UTILS_H__
#define __ONERT_BACKEND_RUY_OPS_OPERATION_UTILS_H__
#include <backend/IPortableTensor.h>
#include <ruy/Shape.h>
#include <ruy/Types.h>
#include <iostream>
#include <ir/DataType.h>
#include <ir/InternalType.h>
#include <ir/Padding.h>
#include <limits>
using OperandType = onert::ir::DataType;
namespace onert
{
namespace backend
{
namespace ruy
{
namespace ops
{
inline nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor)
{
if (tensor == nullptr)
return nnfw::ruy::Shape();
const ir::Shape &shape = tensor->get_info().shape();
assert(tensor->layout() == ir::Layout::NHWC);
auto rank = shape.rank();
nnfw::ruy::Shape ret(rank);
auto data = ret.DimsData();
for (int i = 0; i < rank; ++i)
{
data[i] = shape.dim(i);
}
return ret;
}
inline nnfw::ruy::FusedActivationFunctionType convertActivationType(const ir::Activation activation)
{
switch (activation)
{
case ir::Activation::NONE:
return nnfw::ruy::FusedActivationFunctionType::kNone;
case ir::Activation::RELU:
return nnfw::ruy::FusedActivationFunctionType::kRelu;
case ir::Activation::RELU1:
return nnfw::ruy::FusedActivationFunctionType::kRelu1;
case ir::Activation::RELU6:
return nnfw::ruy::FusedActivationFunctionType::kRelu6;
case ir::Activation::TANH:
return nnfw::ruy::FusedActivationFunctionType::kTanh;
case ir::Activation::SIGMOID:
return nnfw::ruy::FusedActivationFunctionType::kSigmoid;
default:
throw std::runtime_error{"RUY backend: Cannot convert activation type"};
}
}
template <typename T>
void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
{
if (activation == ir::Activation::RELU)
{
*activation_min = 0;
*activation_max = std::numeric_limits<T>::max();
}
else if (activation == ir::Activation::RELU6)
{
*activation_min = 0;
*activation_max = 6;
}
else if (activation == ir::Activation::RELU1)
{
*activation_min = -1;
*activation_max = 1;
}
else if (activation == ir::Activation::SIGMOID)
{
*activation_min = 0;
*activation_max = 1;
}
else if (activation == ir::Activation::NONE)
{
*activation_min = std::numeric_limits<T>::lowest();
*activation_max = std::numeric_limits<T>::max();
}
else
{
std::cout << "Unsupported fused activation function." << std::endl;
}
}
nnfw::ruy::PaddingType getPaddingType(ir::PaddingType ir_padding_type);
} // namespace ops
} // namespace ruy
} // namespace backend
} // namespace onert
#endif // __ONERT_BACKEND_RUY_OPS_OPERATION_UTILS_H__
|