summaryrefslogtreecommitdiff
path: root/runtimes/neurun/backend/cpu/TensorBuilder.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/backend/cpu/TensorBuilder.h')
-rw-r--r--runtimes/neurun/backend/cpu/TensorBuilder.h92
1 files changed, 92 insertions, 0 deletions
diff --git a/runtimes/neurun/backend/cpu/TensorBuilder.h b/runtimes/neurun/backend/cpu/TensorBuilder.h
new file mode 100644
index 000000000..efafbd97b
--- /dev/null
+++ b/runtimes/neurun/backend/cpu/TensorBuilder.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
+#define __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
+
+#include <unordered_map>
+
+#include <backend/ITensorBuilder.h>
+#include <backend/operand/Object.h>
+#include "operand/Tensor.h"
+#include "model/OperandIndexMap.h"
+#include "TensorManager.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+class TensorBuilder : public ITensorBuilder
+{
+public:
+ TensorBuilder();
+
+ /**
+ * @brief Register tensor information to allocate on CPU backend
+ * @param[in] ind Operand index
+ * @param[in] info Operand information
+ * @param[in] layout Operand data layout
+ */
+ void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
+ model::Layout frontend_layout, model::Layout backend_layout,
+ bool as_const) override;
+ /**
+ * @brief Register subtensor information to allocate on CPU backend
+ * @param[in] ind Operand index
+ * @param[in] info Tensor information
+ */
+ void registerSubTensorInfo(const model::OperandIndex &ind,
+ const compiler::SubTensorInfo &info) override;
+
+ void notifyFirstUse(const model::OperandIndex &) override;
+ void notifyLastUse(const model::OperandIndex &) override;
+
+ void prepare(void) override;
+ void allocate(void) override; // TODO Remove this
+ void allocateConsts() override;
+ void allocateNonconsts() override;
+ void postFunctionPrepare() override { /* DO NOTHING */}
+ void finalize() override { /* DO NOTHING */}
+
+ std::shared_ptr<::neurun::backend::operand::ITensor>
+ tensorAt(const model::OperandIndex &ind) override;
+
+ std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) override;
+
+ void iterate(const IterateFunction &fn) override;
+
+ void preVisit(const model::Operation &) override { /* DO NOTHING */}
+ void postVisit(const model::Operation &) override { /* DO NOTHING */}
+
+ std::unique_ptr<ITensorManager> releaseTensorManager(void) override;
+
+ std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
+
+private:
+ std::unique_ptr<TensorManager> _tensor_mgr;
+ model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
+ model::OperandIndexMap<std::pair<model::Layout, model::Layout>> _tensor_layouts_map;
+ model::OperandIndexSequence _constants;
+};
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__