summaryrefslogtreecommitdiff
path: root/runtimes/neurun/backend/cpu/MemoryManager.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/backend/cpu/MemoryManager.h')
-rw-r--r--runtimes/neurun/backend/cpu/MemoryManager.h65
1 files changed, 65 insertions, 0 deletions
diff --git a/runtimes/neurun/backend/cpu/MemoryManager.h b/runtimes/neurun/backend/cpu/MemoryManager.h
new file mode 100644
index 000000000..6b225edcb
--- /dev/null
+++ b/runtimes/neurun/backend/cpu/MemoryManager.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CPU_MEMORY_MANAGER_H__
+#define __NEURUN_BACKEND_CPU_MEMORY_MANAGER_H__
+
+#include "backend/IMemoryManager.h"
+#include "MemoryPlanner.h"
+#include "operand/Tensor.h"
+#include <backend/operand/Object.h>
+#include "model/OperandIndexMap.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+class MemoryManager : public backend::IMemoryManager
+{
+public:
+ MemoryManager();
+ virtual ~MemoryManager() = default;
+
+ void allocate(void) override;
+ void deallocate(void) override { _mem_alloc->release(); }
+
+ void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info);
+ void claimPlan(const model::OperandIndex &ind, uint32_t size);
+ void releasePlan(const model::OperandIndex &ind);
+
+ model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &tensors(void) { return _tensors; }
+
+ std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
+
+private:
+ IMemoryPlanner *createMemoryPlanner();
+
+private:
+ model::OperandIndexMap<std::shared_ptr<operand::Tensor>> _tensors;
+ model::OperandIndexMap<std::shared_ptr<::neurun::backend::operand::Object>> _objects;
+ model::OperandIndexMap<Block> _tensor_mem_map;
+ std::shared_ptr<IMemoryPlanner> _mem_planner;
+ std::shared_ptr<Allocator> _mem_alloc;
+};
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CPU_MEMORY_MANAGER_H__