summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/backend/acl_cl/operand
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/backend/acl_cl/operand')
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc61
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.h63
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc81
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h67
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.cc48
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.h73
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/Object.cc3
-rw-r--r--runtimes/neurun/src/backend/acl_cl/operand/Object.h14
8 files changed, 403 insertions, 7 deletions
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc b/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc
new file mode 100644
index 000000000..f64b521dd
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CLSubTensor.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+namespace operand
+{
+
+CLSubTensor::CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
+ const arm_compute::Coordinates &coords, bool extend_parent)
+ : _cl_sub_tensor(std::make_shared<arm_compute::CLSubTensor>(parent->handle(), tensor_shape,
+ coords, extend_parent))
+{
+ // DO NOTHING
+}
+
+arm_compute::CLSubTensor *CLSubTensor::handle() const { return _cl_sub_tensor.get(); }
+
+arm_compute::CLSubTensor *CLSubTensor::handle() { return _cl_sub_tensor.get(); }
+
+void CLSubTensor::map(bool blocking) { _cl_sub_tensor->map(blocking); }
+
+void CLSubTensor::unmap() { _cl_sub_tensor->unmap(); }
+
+uint8_t *CLSubTensor::doMap(cl::CommandQueue &q, bool blocking)
+{
+ assert(cl_buffer().get() == nullptr);
+ return static_cast<uint8_t *>(q.enqueueMapBuffer(cl_buffer(), blocking ? CL_TRUE : CL_FALSE,
+ CL_MAP_READ | CL_MAP_WRITE, 0,
+ info()->total_size()));
+}
+
+void CLSubTensor::doUnmap(cl::CommandQueue &q)
+{
+ assert(cl_buffer().get() == nullptr);
+ q.enqueueUnmapMemObject(cl_buffer(), buffer());
+}
+
+} // namespace operand
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.h b/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.h
new file mode 100644
index 000000000..cef78c196
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/operand/CLSubTensor.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_CL_OPERAND_CL_SUB_TENSOR_H__
+#define __NEURUN_BACKEND_ACL_CL_OPERAND_CL_SUB_TENSOR_H__
+
+#include <arm_compute/runtime/CL/CLSubTensor.h>
+#include "ICLTensor.h"
+#include "compiler/SubTensorInfo.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+namespace operand
+{
+
+class CLSubTensor : public ICLTensor
+{
+public:
+ CLSubTensor() = delete;
+
+public:
+ CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
+ const arm_compute::Coordinates &coords, bool extend_parent = false);
+
+public:
+ arm_compute::CLSubTensor *handle() const override;
+ arm_compute::CLSubTensor *handle() override;
+
+public:
+ void map(bool blocking = true);
+ void unmap();
+
+protected:
+ uint8_t *doMap(cl::CommandQueue &q, bool blocking) override;
+ virtual void doUnmap(cl::CommandQueue &q) override;
+
+private:
+ std::shared_ptr<arm_compute::CLSubTensor> _cl_sub_tensor;
+};
+
+} // namespace operand
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_CL_OPERAND_CL_SUB_TENSOR_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc
new file mode 100644
index 000000000..e7b718df3
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <arm_compute/runtime/CL/CLScheduler.h>
+#include <arm_compute/runtime/CL/CLMemory.h>
+#include <arm_compute/runtime/CL/CLMemoryRegion.h>
+#include "CLTensor.h"
+
+#include "backend/acl_cl/Convert.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+namespace operand
+{
+
+CLTensor::CLTensor(const compiler::TensorInfo &info)
+ : _cl_tensor(std::make_shared<arm_compute::CLTensor>())
+{
+ auto acl_cl_info = asTensorInfo(info.shape(), info.typeInfo());
+ allocator()->init(acl_cl_info);
+}
+
+arm_compute::CLTensor *CLTensor::handle() const { return _cl_tensor.get(); }
+
+arm_compute::CLTensor *CLTensor::handle() { return _cl_tensor.get(); }
+
+arm_compute::CLTensorAllocator *CLTensor::allocator() { return _cl_tensor->allocator(); }
+
+void CLTensor::map(bool blocking) { _cl_tensor->map(blocking); }
+
+void CLTensor::unmap() { _cl_tensor->unmap(); }
+
+uint8_t *CLTensor::doMap(cl::CommandQueue &q, bool blocking)
+{
+ return allocator()->map(q, blocking);
+}
+
+void CLTensor::doUnmap(cl::CommandQueue &q) { allocator()->unmap(q, buffer()); }
+
+// handle() is Deprecated on acl v18.11
+// TODO Update this
+#if 0
+void CLTensor::setBuffer(void *host_ptr)
+{
+ // create empty MemoryRegion: just context. Since flag isn't used here, no matter which flag to
+ // pass
+ auto memory = arm_compute::CLMemory(std::make_shared<arm_compute::CLBufferMemoryRegion>(
+ arm_compute::CLScheduler::get().context(), CL_MEM_USE_HOST_PTR | CL_MEM_READ_WRITE, 0));
+
+ // set buffer
+ auto mem = reinterpret_cast<cl::Buffer *>(memory.region()->handle());
+ *mem = cl::Buffer(arm_compute::CLScheduler::get().context(),
+ CL_MEM_USE_HOST_PTR | CL_MEM_READ_WRITE, info()->total_size(), host_ptr);
+ // set correct buffer size
+ memory.region()->set_size(info()->total_size());
+ // import memory
+ allocator()->import_memory(memory);
+}
+#endif
+
+} // namespace operand
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h
new file mode 100644
index 000000000..31c96e201
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/operand/CLTensor.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_CL_OPERAND_CL_TENSOR_H__
+#define __NEURUN_BACKEND_ACL_CL_OPERAND_CL_TENSOR_H__
+
+#include <arm_compute/core/TensorInfo.h>
+#include <arm_compute/runtime/CL/CLTensor.h>
+#include <arm_compute/runtime/CL/CLScheduler.h>
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "ICLTensor.h"
+#include "compiler/TensorInfo.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+namespace operand
+{
+
+class CLTensor : public ICLTensor
+{
+public:
+ CLTensor() = delete;
+
+public:
+ CLTensor(const compiler::TensorInfo &info);
+
+public:
+ arm_compute::CLTensor *handle() const override;
+ arm_compute::CLTensor *handle() override;
+
+public:
+ arm_compute::CLTensorAllocator *allocator();
+ void map(bool blocking = true);
+ void unmap();
+ void setBuffer(void *host_ptr);
+
+protected:
+ uint8_t *doMap(cl::CommandQueue &q, bool blocking) override;
+ void doUnmap(cl::CommandQueue &q) override;
+
+private:
+ std::shared_ptr<arm_compute::CLTensor> _cl_tensor;
+};
+
+} // namespace operand
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_CL_OPERAND_CL_TENSOR_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.cc b/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.cc
new file mode 100644
index 000000000..23d723de4
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.cc
@@ -0,0 +1,48 @@
+#include "ICLTensor.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+namespace operand
+{
+
+size_t ICLTensor::total_size() const { return info()->total_size(); }
+
+size_t ICLTensor::dimension(size_t index) const { return info()->dimension(index); }
+
+size_t ICLTensor::num_dimensions() const { return info()->num_dimensions(); }
+
+size_t ICLTensor::calcOffset(const neurun::util::feature::Coordinate4D &coords)
+{
+ int32_t N = coords.n();
+ int32_t C = coords.c();
+ int32_t H = coords.h();
+ int32_t W = coords.w();
+
+ ::arm_compute::Coordinates coordinates{W, H, C, N};
+ return info()->offset_element_in_bytes(coordinates);
+}
+
+arm_compute::DataType ICLTensor::data_type() const { return info()->data_type(); }
+
+uint8_t *ICLTensor::buffer() const { return handle()->buffer(); }
+
+const cl::Buffer &ICLTensor::cl_buffer() const { return handle()->cl_buffer(); }
+
+arm_compute::ITensorInfo *ICLTensor::info() const { return handle()->info(); }
+
+arm_compute::ITensorInfo *ICLTensor::info() { return handle()->info(); }
+
+void ICLTensor::map(cl::CommandQueue &q, bool blocking) { return handle()->map(q, blocking); }
+
+void ICLTensor::unmap(cl::CommandQueue &q) { return handle()->unmap(q); }
+
+void ICLTensor::clear(cl::CommandQueue &q) { return handle()->clear(q); }
+
+} // namespace operand
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.h b/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.h
new file mode 100644
index 000000000..226fbf814
--- /dev/null
+++ b/runtimes/neurun/src/backend/acl_cl/operand/ICLTensor.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_CL_OPERAND_I_CL_TENSOR_H__
+#define __NEURUN_BACKEND_ACL_CL_OPERAND_I_CL_TENSOR_H__
+
+#include <arm_compute/core/ITensorInfo.h>
+#include <arm_compute/core/CL/ICLTensor.h>
+#include "backend/interface/operand/ITensor.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+namespace operand
+{
+
+class ICLTensor : public ::neurun::backend::operand::ITensor
+{
+public:
+ ICLTensor() = default;
+ ICLTensor(const ICLTensor &) = delete;
+ ICLTensor &operator=(const ICLTensor &) = delete;
+ ICLTensor(ICLTensor &&) = default;
+ ICLTensor &operator=(ICLTensor &&) = default;
+ virtual ~ICLTensor() = default;
+
+public:
+ virtual arm_compute::ICLTensor *handle() = 0;
+ virtual arm_compute::ICLTensor *handle() const = 0;
+
+public:
+ uint8_t *buffer() const override;
+ size_t total_size() const override;
+ size_t dimension(size_t index) const override;
+ size_t num_dimensions() const override;
+ size_t calcOffset(const neurun::util::feature::Coordinate4D &coords) override;
+
+public:
+ arm_compute::DataType data_type() const;
+ const cl::Buffer &cl_buffer() const;
+ arm_compute::ITensorInfo *info() const;
+ arm_compute::ITensorInfo *info();
+ void map(cl::CommandQueue &q, bool blocking = true);
+ void unmap(cl::CommandQueue &q);
+ void clear(cl::CommandQueue &q);
+
+protected:
+ virtual uint8_t *doMap(cl::CommandQueue &q, bool blocking) = 0;
+ virtual void doUnmap(cl::CommandQueue &q) = 0;
+};
+
+} // namespace operand
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_CL_OPERAND_I_CL_TENSOR_H__
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/Object.cc b/runtimes/neurun/src/backend/acl_cl/operand/Object.cc
index 98b96a11a..a84fa2366 100644
--- a/runtimes/neurun/src/backend/acl_cl/operand/Object.cc
+++ b/runtimes/neurun/src/backend/acl_cl/operand/Object.cc
@@ -27,7 +27,8 @@ namespace acl_cl
namespace operand
{
-void Object::access(const std::function<void(::arm_compute::ITensor &tensor)> &fn) const
+void Object::access(
+ const std::function<void(::neurun::backend::operand::ITensor &tensor)> &fn) const
{
auto &queue = ::arm_compute::CLScheduler::get().queue();
diff --git a/runtimes/neurun/src/backend/acl_cl/operand/Object.h b/runtimes/neurun/src/backend/acl_cl/operand/Object.h
index da33c0549..4ba22b269 100644
--- a/runtimes/neurun/src/backend/acl_cl/operand/Object.h
+++ b/runtimes/neurun/src/backend/acl_cl/operand/Object.h
@@ -18,9 +18,9 @@
#define __NEURUN_BACKEND_ACL_CL_OPERAND_OBJECT_H__
#include <memory>
-#include <arm_compute/core/CL/ICLTensor.h>
-#include "backend/IObject.h"
+#include "backend/interface/operand/IObject.h"
+#include "backend/acl_cl/operand/ICLTensor.h"
namespace neurun
{
@@ -37,19 +37,21 @@ public:
Object() = default;
public:
- Object(const std::shared_ptr<::arm_compute::ICLTensor> &tensor) : _tensor{tensor}
+ Object(const std::shared_ptr<::neurun::backend::acl_cl::operand::ICLTensor> &tensor)
+ : _tensor{tensor}
{
// DO NOTHING
}
public:
- ::arm_compute::ICLTensor *ptr(void) const override { return _tensor.get(); }
+ ::neurun::backend::acl_cl::operand::ICLTensor *ptr(void) const override { return _tensor.get(); }
private:
- std::shared_ptr<::arm_compute::ICLTensor> _tensor;
+ std::shared_ptr<::neurun::backend::acl_cl::operand::ICLTensor> _tensor;
public:
- void access(const std::function<void(::arm_compute::ITensor &tensor)> &fn) const override;
+ void
+ access(const std::function<void(::neurun::backend::operand::ITensor &tensor)> &fn) const override;
};
} // namespace operand