diff options
Diffstat (limited to 'runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc')
-rw-r--r-- | runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc | 61 |
1 files changed, 61 insertions, 0 deletions
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc b/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc new file mode 100644 index 000000000..f64b521dd --- /dev/null +++ b/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "CLSubTensor.h" + +namespace neurun +{ +namespace backend +{ +namespace acl_cl +{ +namespace operand +{ + +CLSubTensor::CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape, + const arm_compute::Coordinates &coords, bool extend_parent) + : _cl_sub_tensor(std::make_shared<arm_compute::CLSubTensor>(parent->handle(), tensor_shape, + coords, extend_parent)) +{ + // DO NOTHING +} + +arm_compute::CLSubTensor *CLSubTensor::handle() const { return _cl_sub_tensor.get(); } + +arm_compute::CLSubTensor *CLSubTensor::handle() { return _cl_sub_tensor.get(); } + +void CLSubTensor::map(bool blocking) { _cl_sub_tensor->map(blocking); } + +void CLSubTensor::unmap() { _cl_sub_tensor->unmap(); } + +uint8_t *CLSubTensor::doMap(cl::CommandQueue &q, bool blocking) +{ + assert(cl_buffer().get() == nullptr); + return static_cast<uint8_t *>(q.enqueueMapBuffer(cl_buffer(), blocking ? CL_TRUE : CL_FALSE, + CL_MAP_READ | CL_MAP_WRITE, 0, + info()->total_size())); +} + +void CLSubTensor::doUnmap(cl::CommandQueue &q) +{ + assert(cl_buffer().get() == nullptr); + q.enqueueUnmapMemObject(cl_buffer(), buffer()); +} + +} // namespace operand +} // namespace acl_cl +} // namespace backend +} // namespace neurun |