summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/backend/cpu/TensorBuilder.cc')
-rw-r--r--runtimes/neurun/src/backend/cpu/TensorBuilder.cc124
1 files changed, 0 insertions, 124 deletions
diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc b/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
deleted file mode 100644
index 9c39b9c00..000000000
--- a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TensorBuilder.h"
-
-#include <cassert>
-
-#include "operand/Object.h"
-#include "util/logging.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-TensorBuilder::TensorBuilder() : _mem_planner(std::make_shared<FirstFitPlanner>())
-{
- // DO NOTHING
-}
-
-void TensorBuilder::registerTensorInfo(const model::operand::Index &ind,
- const compiler::TensorInfo &info)
-{
- _tensor_info_map.insert({ind, info});
-}
-
-void TensorBuilder::registerSubTensorInfo(const model::operand::Index &,
- const compiler::SubTensorInfo &)
-{
- // Not supported yet
- assert(false);
-}
-
-void TensorBuilder::notifyFirstUse(const model::operand::Index &ind)
-{
- assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
- const auto &info = _tensor_info_map.at(ind);
-
- const auto size = info.total_size();
- _mem_planner->claim(ind, size);
-}
-
-void TensorBuilder::notifyLastUse(const model::operand::Index &ind) { _mem_planner->release(ind); }
-
-void TensorBuilder::prepare(void)
-{
- assert(_tensors.size() == 0);
-
- _mem_alloc = std::make_shared<Allocator>(_mem_planner->capacity());
- assert(_mem_alloc->base());
-
- for (auto &mem_plan : _mem_planner->memory_plans())
- {
- auto ind = mem_plan.first;
- auto mem_blk = mem_plan.second;
- const auto &info = _tensor_info_map[ind];
-
- uint8_t *buffer = _mem_alloc->base() + mem_blk.offset;
- auto tensor = std::make_shared<operand::Tensor>(info);
- tensor->setBuffer(buffer);
- _tensors[ind] = tensor;
-
- VERBOSE(CPU_TENSORBUILDER) << "TENSOR(#" << ind.value() << "): " << static_cast<void *>(buffer)
- << std::endl;
-
- // If we do not make tensor here currently, stages would cause segment fault
- }
-}
-
-void TensorBuilder::allocate(void)
-{
- // NOTE For now nothing to do. Allocation is done in prepare stage, which is wrong
-}
-
-std::shared_ptr<::neurun::backend::operand::ITensor>
-TensorBuilder::tensorAt(const model::operand::Index &ind)
-{
- return _tensors.at(ind);
-}
-
-std::shared_ptr<backend::operand::IObject>
-TensorBuilder::wrapTensor(const model::operand::Index &ind)
-{
- if (_objects.find(ind) != _objects.end())
- {
- return _objects.at(ind);
- }
- else
- {
- return _objects[ind] = std::make_shared<operand::Object>(_tensors.at(ind));
- }
-}
-
-void TensorBuilder::iterate(const IterateFunction &fn)
-{
- for (auto it : _tensors)
- {
- fn(it.first);
- }
-}
-
-std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::operand::Index &ind)
-{
- return _tensors.at(ind);
-}
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun