summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc')
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc81
1 files changed, 81 insertions, 0 deletions
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc
new file mode 100644
index 000000000..e7b718df3
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <arm_compute/runtime/CL/CLScheduler.h>
+#include <arm_compute/runtime/CL/CLMemory.h>
+#include <arm_compute/runtime/CL/CLMemoryRegion.h>
+#include "CLTensor.h"
+
+#include "backend/acl_cl/Convert.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+namespace operand
+{
+
+CLTensor::CLTensor(const compiler::TensorInfo &info)
+ : _cl_tensor(std::make_shared<arm_compute::CLTensor>())
+{
+ auto acl_cl_info = asTensorInfo(info.shape(), info.typeInfo());
+ allocator()->init(acl_cl_info);
+}
+
+arm_compute::CLTensor *CLTensor::handle() const { return _cl_tensor.get(); }
+
+arm_compute::CLTensor *CLTensor::handle() { return _cl_tensor.get(); }
+
+arm_compute::CLTensorAllocator *CLTensor::allocator() { return _cl_tensor->allocator(); }
+
+void CLTensor::map(bool blocking) { _cl_tensor->map(blocking); }
+
+void CLTensor::unmap() { _cl_tensor->unmap(); }
+
+uint8_t *CLTensor::doMap(cl::CommandQueue &q, bool blocking)
+{
+ return allocator()->map(q, blocking);
+}
+
+void CLTensor::doUnmap(cl::CommandQueue &q) { allocator()->unmap(q, buffer()); }
+
+// handle() is Deprecated on acl v18.11
+// TODO Update this
+#if 0
+void CLTensor::setBuffer(void *host_ptr)
+{
+ // create empty MemoryRegion: just context. Since flag isn't used here, no matter which flag to
+ // pass
+ auto memory = arm_compute::CLMemory(std::make_shared<arm_compute::CLBufferMemoryRegion>(
+ arm_compute::CLScheduler::get().context(), CL_MEM_USE_HOST_PTR | CL_MEM_READ_WRITE, 0));
+
+ // set buffer
+ auto mem = reinterpret_cast<cl::Buffer *>(memory.region()->handle());
+ *mem = cl::Buffer(arm_compute::CLScheduler::get().context(),
+ CL_MEM_USE_HOST_PTR | CL_MEM_READ_WRITE, info()->total_size(), host_ptr);
+ // set correct buffer size
+ memory.region()->set_size(info()->total_size());
+ // import memory
+ allocator()->import_memory(memory);
+}
+#endif
+
+} // namespace operand
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun