summaryrefslogtreecommitdiff
path: root/runtimes/neurun/backend/cpu/TensorBuilder.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/backend/cpu/TensorBuilder.cc')
-rw-r--r--runtimes/neurun/backend/cpu/TensorBuilder.cc141
1 files changed, 141 insertions, 0 deletions
diff --git a/runtimes/neurun/backend/cpu/TensorBuilder.cc b/runtimes/neurun/backend/cpu/TensorBuilder.cc
new file mode 100644
index 000000000..cf91e5e61
--- /dev/null
+++ b/runtimes/neurun/backend/cpu/TensorBuilder.cc
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TensorBuilder.h"
+
+#include <cassert>
+
+#include "util/logging.h"
+
+namespace
+{
+
+using namespace neurun;
+
+// NOTE This backend support only NHWC now
+model::OperandInfo asTensorInfo(const model::OperandInfo &info, model::Layout frontend_layout)
+{
+ const auto &shape = info.shape();
+ const auto &rank = shape.rank();
+ assert(rank <= 4);
+
+ auto ret = info;
+ if (frontend_layout == model::Layout::NCHW && rank == 4)
+ {
+ // NCHW -> NHWC
+ uint32_t permutation[4] = {0, 2, 3, 1};
+ ret = model::OperandInfo{{shape.dim(permutation[0]), shape.dim(permutation[1]),
+ shape.dim(permutation[2]), shape.dim(permutation[3])},
+ info.typeInfo()};
+ }
+ return ret;
+}
+
+} // namespace
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()}
+{
+ // DO NOTHING
+}
+
+void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
+ const model::OperandInfo &info,
+ model::Layout frontend_layout, model::Layout backend_layout,
+ bool as_const)
+{
+ _tensor_info_map.emplace(ind, info);
+ _tensor_layouts_map.insert({ind, std::make_pair(frontend_layout, backend_layout)});
+
+ if (as_const)
+ _constants.append(ind);
+}
+
+void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &,
+ const compiler::SubTensorInfo &)
+{
+ // Not supported yet
+ assert(false);
+}
+
+void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind)
+{
+ assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
+ const auto tensor_info = asTensorInfo(_tensor_info_map.at(ind), _tensor_layouts_map[ind].first);
+ const auto size = tensor_info.total_size();
+ _tensor_mgr->buildTensor(ind, tensor_info, _constants.contains(ind));
+ _tensor_mgr->claimPlan(ind, size);
+}
+
+void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); }
+
+void TensorBuilder::prepare(void)
+{
+ _tensor_mgr->allocateConsts();
+ _tensor_mgr->allocateNonconsts();
+}
+
+// TODO Remove this
+void TensorBuilder::allocate(void)
+{
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
+ // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
+}
+
+void TensorBuilder::allocateConsts()
+{
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
+ // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
+}
+
+void TensorBuilder::allocateNonconsts()
+{
+ // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
+ // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
+}
+
+std::shared_ptr<::neurun::backend::operand::ITensor>
+TensorBuilder::tensorAt(const model::OperandIndex &ind)
+{
+ return _tensor_mgr->at(ind);
+}
+
+std::shared_ptr<backend::operand::IObject> TensorBuilder::wrapTensor(const model::OperandIndex &ind)
+{
+ return _tensor_mgr->wrapTensor(ind);
+}
+
+void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); }
+
+std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::OperandIndex &ind)
+{
+ return _tensor_mgr->at(ind);
+}
+
+std::unique_ptr<ITensorManager> TensorBuilder::releaseTensorManager(void)
+{
+ return std::move(_tensor_mgr);
+}
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun