summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/backend/cpu/TensorBuilder.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/backend/cpu/TensorBuilder.h')
-rw-r--r--runtimes/neurun/src/backend/cpu/TensorBuilder.h47
1 files changed, 36 insertions, 11 deletions
diff --git a/runtimes/neurun/src/backend/cpu/TensorBuilder.h b/runtimes/neurun/src/backend/cpu/TensorBuilder.h
index f61a930fe..2715d57f0 100644
--- a/runtimes/neurun/src/backend/cpu/TensorBuilder.h
+++ b/runtimes/neurun/src/backend/cpu/TensorBuilder.h
@@ -18,11 +18,12 @@
#define __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
#include <unordered_map>
-#include <unordered_set>
-#include "backend/ITensorBuilder.h"
+#include "backend/interface/ITensorBuilder.h"
#include "backend/cpu/operand/Tensor.h"
-#include "graph/operand/Index.h"
+#include "backend/cpu/operand/Object.h"
+#include "model/operand/Index.h"
+#include "MemoryPlanner.h"
namespace neurun
{
@@ -31,23 +32,47 @@ namespace backend
namespace cpu
{
-class Plan;
-
class TensorBuilder : public ITensorBuilder
{
public:
TensorBuilder();
- virtual void mark(const ::neurun::graph::operand::Index &ind) override;
- virtual void prepare(codegen::Plan &plan,
- const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
+ /**
+ * @brief Register tensor information to allocate on CPU backend
+ * @param[in] ind Operand index
+ * @param[in] info Tensor information
+ */
+ virtual void registerTensorInfo(const model::operand::Index &ind,
+ const compiler::TensorInfo &info) override;
+ /**
+ * @brief Register subtensor information to allocate on CPU backend
+ * @param[in] ind Operand index
+ * @param[in] info Tensor information
+ */
+ virtual void registerSubTensorInfo(const model::operand::Index &ind,
+ const compiler::SubTensorInfo &info) override;
+
+ virtual void notifyFirstUse(const model::operand::Index &) override;
+ virtual void notifyLastUse(const model::operand::Index &) override;
+
+ virtual void prepare(void) override;
virtual void allocate(void) override;
- std::shared_ptr<operand::Tensor> at(const ::neurun::graph::operand::Index &ind);
+ virtual std::shared_ptr<::neurun::backend::operand::ITensor>
+ tensorAt(const model::operand::Index &ind) override;
+ virtual std::shared_ptr<backend::operand::IObject>
+ wrapTensor(const model::operand::Index &ind) override;
+ virtual void iterate(const IterateFunction &fn) override;
+
+ std::shared_ptr<operand::Tensor> at(const ::neurun::model::operand::Index &ind);
private:
- std::unordered_set<graph::operand::Index> _inds;
- std::unordered_map<graph::operand::Index, std::shared_ptr<operand::Tensor>> _tensors;
+ std::unordered_map<model::operand::Index, compiler::TensorInfo> _tensor_info_map;
+ std::unordered_map<model::operand::Index, std::shared_ptr<operand::Tensor>> _tensors;
+ std::unordered_map<model::operand::Index, std::shared_ptr<operand::Object>> _objects;
+ std::unordered_map<model::operand::Index, Block> _tensor_mem_map;
+ std::shared_ptr<IMemoryPlanner> _mem_planner;
+ std::shared_ptr<Allocator> _mem_alloc;
};
} // namespace cpu