1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
|
/*
* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "CLSubTensor.h"
namespace neurun
{
namespace backend
{
namespace acl_cl
{
namespace operand
{
CLSubTensor::CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
const arm_compute::Coordinates &coords, bool extend_parent)
: _cl_sub_tensor(std::make_shared<arm_compute::CLSubTensor>(parent->handle(), tensor_shape,
coords, extend_parent))
{
// DO NOTHING
}
arm_compute::CLSubTensor *CLSubTensor::handle() const { return _cl_sub_tensor.get(); }
arm_compute::CLSubTensor *CLSubTensor::handle() { return _cl_sub_tensor.get(); }
void CLSubTensor::map(bool blocking) { _cl_sub_tensor->map(blocking); }
void CLSubTensor::unmap() { _cl_sub_tensor->unmap(); }
uint8_t *CLSubTensor::doMap(cl::CommandQueue &q, bool blocking)
{
assert(cl_buffer().get() == nullptr);
return static_cast<uint8_t *>(q.enqueueMapBuffer(cl_buffer(), blocking ? CL_TRUE : CL_FALSE,
CL_MAP_READ | CL_MAP_WRITE, 0,
info()->total_size()));
}
void CLSubTensor::doUnmap(cl::CommandQueue &q)
{
assert(cl_buffer().get() == nullptr);
q.enqueueUnmapMemObject(cl_buffer(), buffer());
}
} // namespace operand
} // namespace acl_cl
} // namespace backend
} // namespace neurun
|