summaryrefslogtreecommitdiff
path: root/runtime/neurun/backend/srcn/MemoryManager.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/neurun/backend/srcn/MemoryManager.cc')
-rw-r--r--runtime/neurun/backend/srcn/MemoryManager.cc92
1 files changed, 92 insertions, 0 deletions
diff --git a/runtime/neurun/backend/srcn/MemoryManager.cc b/runtime/neurun/backend/srcn/MemoryManager.cc
new file mode 100644
index 000000000..aa07ab168
--- /dev/null
+++ b/runtime/neurun/backend/srcn/MemoryManager.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MemoryManager.h"
+
+#include <cassert>
+
+#include <MemoryPlannerFactory.h>
+#include "util/logging.h"
+#include "util/ConfigSource.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+MemoryManager::MemoryManager() : _mem_planner{createMemoryPlanner()}
+{
+ // DO NOTHING
+}
+
+MemoryManager::MemoryManager(const std::string planner_id)
+ : _mem_planner{createMemoryPlanner(planner_id)}
+{
+ // DO NOTHING
+}
+
+cpu_common::IMemoryPlanner *MemoryManager::createMemoryPlanner()
+{
+ auto planner_id = util::getConfigString(util::config::CPU_MEMORY_PLANNER);
+ return cpu_common::MemoryPlannerFactory::get().create(planner_id);
+}
+
+cpu_common::IMemoryPlanner *MemoryManager::createMemoryPlanner(const std::string planner_id)
+{
+ return cpu_common::MemoryPlannerFactory::get().create(planner_id);
+}
+
+void MemoryManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info,
+ ir::Layout layout)
+{
+ auto tensor = std::make_shared<operand::Tensor>(info, layout);
+ _tensors[ind] = tensor;
+}
+
+void MemoryManager::claimPlan(const ir::OperandIndex &ind, uint32_t size)
+{
+ _mem_planner->claim(ind, size);
+}
+
+void MemoryManager::releasePlan(const ir::OperandIndex &ind) { _mem_planner->release(ind); }
+
+void MemoryManager::allocate(void)
+{
+ _mem_alloc = std::make_shared<cpu_common::Allocator>(_mem_planner->capacity());
+ assert(_mem_alloc->base());
+
+ for (auto &mem_plan : _mem_planner->memory_plans())
+ {
+ auto ind = mem_plan.first;
+ auto mem_blk = mem_plan.second;
+
+ uint8_t *buffer = _mem_alloc->base() + mem_blk.offset;
+ auto tensor = _tensors[ind];
+ tensor->setBuffer(buffer);
+
+ VERBOSE(CPU_MEMORYMANAGER) << "TENSOR(#" << ind.value() << "): " << static_cast<void *>(buffer)
+ << std::endl;
+
+ // If we do not make tensor here currently, kernel generation would cause segmentation fault.
+ // See also : Comments in `allocate` method.
+ }
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun