1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
|
/*
* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "MaxPoolLayer.h"
#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
#include "kernel/cpu/OperationUtils.h"
namespace neurun
{
namespace kernel
{
namespace cpu
{
#define MAXPOOLING_PARAMETERS \
tflite::PoolParams op_params; \
op_params.stride_height = _strideHeight; \
op_params.stride_width = _strideWidth; \
op_params.filter_height = _kernelHeight; \
op_params.filter_width = _kernelWidth; \
op_params.padding_values.height = (int8_t)_paddingTop; \
op_params.padding_values.width = (int8_t)_paddingLeft;
MaxPoolLayer::MaxPoolLayer()
: _inputData(nullptr), _outputData(nullptr), _inputShape(), _outputShape(), _paddingLeft(0),
_paddingTop(0), _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0),
_kernelWidth(0), _kernelHeight(0), _activation(ANEURALNETWORKS_FUSED_NONE),
_inputType(OperandType::SCALAR_FLOAT32)
{
// DO NOTHING
}
bool MaxPoolLayer::maxPoolFloat32()
{
MAXPOOLING_PARAMETERS
float output_activation_min, output_activation_max;
CalculateActivationRangeFloat(_activation, &output_activation_min, &output_activation_max);
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
::tflite::optimized_ops::MaxPool(op_params, convertShapeToTFLiteShape(_inputShape),
reinterpret_cast<const float *>(_inputData),
convertShapeToTFLiteShape(_outputShape),
reinterpret_cast<float *>(_outputData));
return true;
}
bool MaxPoolLayer::maxPoolQuant8()
{
MAXPOOLING_PARAMETERS
int32_t output_activation_min = 0;
int32_t output_activation_max = 0;
CalculateActivationRangeUint8(_activation, _outputShape, &output_activation_min,
&output_activation_max);
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
::tflite::optimized_ops::MaxPool(op_params, convertShapeToTFLiteShape(_inputShape), _inputData,
convertShapeToTFLiteShape(_outputShape), _outputData);
return true;
}
void MaxPoolLayer::configure(uint8_t *inputData, const Shape inputShape, const uint32_t paddingLeft,
const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideWidth,
const uint32_t strideHeight, const uint32_t kernelWidth,
const uint32_t kernelHeight, const FuseCode activation,
uint8_t *outputData, const Shape outputShape)
{
_inputData = inputData;
_inputShape = inputShape;
_inputType = inputShape.type;
_paddingLeft = paddingLeft;
_paddingRight = paddingRight;
_paddingTop = paddingTop;
_paddingBottom = paddingBottom;
_strideWidth = strideWidth;
_strideHeight = strideHeight;
_kernelWidth = kernelWidth;
_kernelHeight = kernelHeight;
_activation = activation;
_outputData = outputData;
_outputShape = outputShape;
}
void MaxPoolLayer::run()
{
if (_inputType == OperandType::TENSOR_FLOAT32)
{
maxPoolFloat32();
}
else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
{
throw std::runtime_error{"MaxPoolLayer : Not tested for TENSOR_QUANT8_ASYMM"};
// maxPoolQuant8();
}
}
#undef MAXPOOLING_PARAMETERS
} // namespace cpu
} // namespace kernel
} // namespace neurun
|