summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/backend/cpu/TensorBuilder.cc')
-rw-r--r--runtimes/neurun/src/backend/cpu/TensorBuilder.cc83
1 files changed, 67 insertions, 16 deletions
diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc b/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
index 1b972a830..9c39b9c00 100644
--- a/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
+++ b/runtimes/neurun/src/backend/cpu/TensorBuilder.cc
@@ -19,6 +19,7 @@
#include <cassert>
#include "operand/Object.h"
+#include "util/logging.h"
namespace neurun
{
@@ -27,43 +28,93 @@ namespace backend
namespace cpu
{
-TensorBuilder::TensorBuilder()
+TensorBuilder::TensorBuilder() : _mem_planner(std::make_shared<FirstFitPlanner>())
{
// DO NOTHING
}
-void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind)
+void TensorBuilder::registerTensorInfo(const model::operand::Index &ind,
+ const compiler::TensorInfo &info)
{
- assert(_tensors.size() == 0);
+ _tensor_info_map.insert({ind, info});
+}
+
+void TensorBuilder::registerSubTensorInfo(const model::operand::Index &,
+ const compiler::SubTensorInfo &)
+{
+ // Not supported yet
+ assert(false);
+}
+
+void TensorBuilder::notifyFirstUse(const model::operand::Index &ind)
+{
+ assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
+ const auto &info = _tensor_info_map.at(ind);
- _inds.insert(ind);
+ const auto size = info.total_size();
+ _mem_planner->claim(ind, size);
}
-void TensorBuilder::prepare(codegen::Plan &plan,
- const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
+void TensorBuilder::notifyLastUse(const model::operand::Index &ind) { _mem_planner->release(ind); }
+
+void TensorBuilder::prepare(void)
{
assert(_tensors.size() == 0);
- for (auto ind_int : _inds)
+ _mem_alloc = std::make_shared<Allocator>(_mem_planner->capacity());
+ assert(_mem_alloc->base());
+
+ for (auto &mem_plan : _mem_planner->memory_plans())
{
- ::neurun::graph::operand::Index ind{ind_int};
- auto tensor = std::make_shared<operand::Tensor>(tensor_info_ctx.at(ind.asInt()));
- // TODO Fix allocation here. When Tensor object is created the memory for tensor is also
- // allocated, and this must be fixed.
- plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
+ auto ind = mem_plan.first;
+ auto mem_blk = mem_plan.second;
+ const auto &info = _tensor_info_map[ind];
+
+ uint8_t *buffer = _mem_alloc->base() + mem_blk.offset;
+ auto tensor = std::make_shared<operand::Tensor>(info);
+ tensor->setBuffer(buffer);
_tensors[ind] = tensor;
+
+ VERBOSE(CPU_TENSORBUILDER) << "TENSOR(#" << ind.value() << "): " << static_cast<void *>(buffer)
+ << std::endl;
+
+ // If we do not make tensor here currently, stages would cause segment fault
}
}
void TensorBuilder::allocate(void)
{
- assert(_inds.size() == _tensors.size());
-
// NOTE For now nothing to do. Allocation is done in prepare stage, which is wrong
- // See also: comment in `prepare()`
}
-std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::graph::operand::Index &ind)
+std::shared_ptr<::neurun::backend::operand::ITensor>
+TensorBuilder::tensorAt(const model::operand::Index &ind)
+{
+ return _tensors.at(ind);
+}
+
+std::shared_ptr<backend::operand::IObject>
+TensorBuilder::wrapTensor(const model::operand::Index &ind)
+{
+ if (_objects.find(ind) != _objects.end())
+ {
+ return _objects.at(ind);
+ }
+ else
+ {
+ return _objects[ind] = std::make_shared<operand::Object>(_tensors.at(ind));
+ }
+}
+
+void TensorBuilder::iterate(const IterateFunction &fn)
+{
+ for (auto it : _tensors)
+ {
+ fn(it.first);
+ }
+}
+
+std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::operand::Index &ind)
{
return _tensors.at(ind);
}