1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
|
/*
* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "internal/layers/SimpleSpaceToDepth.h"
#include <arm_compute/runtime/CL/CLScheduler.h>
void SimpleSpaceToDepth::configure(::arm_compute::ITensor *input, ::arm_compute::ITensor *output,
int32_t block_size, const ::arm_compute::Coordinates &axises)
{
const auto rank = axises.num_dimensions();
assert(rank == 4);
for (int i = 0; i < rank; ++i)
{
assert(axises[i] >= 0);
assert(axises[i] < rank);
}
_input = input;
_output = output;
_block_size = block_size;
_axises = axises;
}
template <typename T>
inline void SpaceToDepth(const ::arm_compute::ITensor *input,
const ::arm_compute::TensorShape &input_shape, int32_t block_size,
::arm_compute::ITensor *output,
const ::arm_compute::TensorShape &output_shape,
const ::arm_compute::Coordinates &axises)
{
const int input_batch = input_shape[axises[0]];
const int input_height = input_shape[axises[1]];
const int input_width = input_shape[axises[2]];
const int input_depth = input_shape[axises[3]];
for (int in_b = 0; in_b < input_batch; ++in_b)
{
for (int in_h = 0; in_h < input_height; ++in_h)
{
for (int in_w = 0; in_w < input_width; ++in_w)
{
for (int in_d = 0; in_d < input_depth; ++in_d)
{
const int out_b = in_b;
const int out_h = in_h / block_size;
const int out_w = in_w / block_size;
const int out_d =
in_d + ((in_h % block_size) * block_size + in_w % block_size) * input_depth;
auto input_id =
asARMComputeCoordinates(::arm_compute::Coordinates{in_b, in_h, in_w, in_d}, axises);
auto output_id = asARMComputeCoordinates(
::arm_compute::Coordinates{out_b, out_h, out_w, out_d}, axises);
*reinterpret_cast<T *>(output->ptr_to_element(output_id)) =
*reinterpret_cast<T *>(input->ptr_to_element(input_id));
}
}
}
}
}
void SimpleSpaceToDepth::run()
{
if (::internal::arm_compute::isGpuMode())
{
auto &q = ::arm_compute::CLScheduler::get().queue();
CAST_CL(_input)->map(q);
CAST_CL(_output)->map(q);
}
switch (_input->info()->data_type())
{
case ::arm_compute::DataType::U8:
case ::arm_compute::DataType::QASYMM8:
SpaceToDepth<uint8_t>(_input, _input->info()->tensor_shape(), _block_size, _output,
_output->info()->tensor_shape(), _axises);
break;
case ::arm_compute::DataType::F32:
SpaceToDepth<float>(_input, _input->info()->tensor_shape(), _block_size, _output,
_output->info()->tensor_shape(), _axises);
break;
default:
ARM_COMPUTE_ERROR("DataType not supported");
break;
}
if (::internal::arm_compute::isGpuMode())
{
auto &q = ::arm_compute::CLScheduler::get().queue();
CAST_CL(_input)->unmap(q);
CAST_CL(_output)->unmap(q);
}
}
|