summaryrefslogtreecommitdiff
path: root/runtimes/neurun/backend/srcn/TensorManager.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/backend/srcn/TensorManager.cc')
-rw-r--r--runtimes/neurun/backend/srcn/TensorManager.cc100
1 files changed, 100 insertions, 0 deletions
diff --git a/runtimes/neurun/backend/srcn/TensorManager.cc b/runtimes/neurun/backend/srcn/TensorManager.cc
new file mode 100644
index 000000000..d0c80273e
--- /dev/null
+++ b/runtimes/neurun/backend/srcn/TensorManager.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TensorManager.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+
+TensorManager::TensorManager() : _const_mgr{new MemoryManager()}, _nonconst_mgr{new MemoryManager()}
+{
+ // DO NOTHING
+}
+
+void TensorManager::allocateConsts(void) { _const_mgr->allocate(); }
+
+void TensorManager::allocateNonconsts(void) { _nonconst_mgr->allocate(); }
+
+void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); }
+
+void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); }
+
+void TensorManager::buildTensor(const model::OperandIndex &ind,
+ const model::OperandInfo &tensor_info, bool as_const)
+{
+ assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
+ if (as_const)
+ {
+ _const_mgr->buildTensor(ind, tensor_info);
+ _ind_to_mgr.insert({ind, *_const_mgr});
+ }
+ else
+ {
+ _nonconst_mgr->buildTensor(ind, tensor_info);
+ _ind_to_mgr.insert({ind, *_nonconst_mgr});
+ }
+}
+
+void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ _ind_to_mgr.at(ind).claimPlan(ind, size);
+}
+
+void TensorManager::releasePlan(const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ _ind_to_mgr.at(ind).releasePlan(ind);
+}
+
+std::shared_ptr<backend::operand::IObject> TensorManager::wrapTensor(const model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ return _ind_to_mgr.at(ind).wrapTensor(ind);
+}
+
+std::shared_ptr<operand::Tensor> TensorManager::at(const ::neurun::model::OperandIndex &ind)
+{
+ assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+ return _ind_to_mgr.at(ind).tensors().at(ind);
+}
+
+model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::constTensors(void)
+{
+ return _const_mgr->tensors();
+}
+
+model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::nonconstTensors(void)
+{
+ return _nonconst_mgr->tensors();
+}
+
+void TensorManager::iterate(const std::function<void(const model::OperandIndex &)> &fn)
+{
+ for (auto it : _nonconst_mgr->tensors())
+ fn(it.first);
+
+ for (auto it : _const_mgr->tensors())
+ fn(it.first);
+}
+
+} // namespace srcn
+} // namespace backend
+} // namespace neurun