summaryrefslogtreecommitdiff
path: root/runtime/neurun/backend/acl_common/TemplTensorBuilder.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/neurun/backend/acl_common/TemplTensorBuilder.h')
-rw-r--r--runtime/neurun/backend/acl_common/TemplTensorBuilder.h612
1 files changed, 612 insertions, 0 deletions
diff --git a/runtime/neurun/backend/acl_common/TemplTensorBuilder.h b/runtime/neurun/backend/acl_common/TemplTensorBuilder.h
new file mode 100644
index 000000000..bb43823ed
--- /dev/null
+++ b/runtime/neurun/backend/acl_common/TemplTensorBuilder.h
@@ -0,0 +1,612 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_COMMON_TEMPL_TENSOR_BUILDER_H__
+#define __NEURUN_BACKEND_ACL_COMMON_TEMPL_TENSOR_BUILDER_H__
+
+#include <memory>
+#include <queue>
+
+#include <arm_compute/core/Types.h>
+#include <backend/ITensorBuilder.h>
+#include "ir/OperandIndexMap.h"
+#include "AclTensorManager.h"
+#include "cpp14/memory.h"
+#include <util/Utils.h>
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_common
+{
+
+enum class UsesType
+{
+ FIRST,
+ LAST
+};
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+class TemplTensorBuilder : public ITensorBuilder
+{
+public:
+ using T_AclTensorManager = AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>;
+
+ TemplTensorBuilder(T_AclTensorManager *tensor_mgr);
+
+ /**
+ * @brief Register tensor information to allocate on ACL-CL backend
+ * @param[in] ind Operand index
+ * @param[in] info Tensor information
+ * @param[in] layout Tensor data layout
+ */
+ void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
+ ir::Layout backend_layout, bool as_const) override;
+ /**
+ * @brief Register subtensor information to allocate on ACL-CL backend
+ * @param[in] ind Operand index
+ * @param[in] info Tensor information
+ */
+ void registerSubTensorInfo(const ir::OperandIndex &ind,
+ const compiler::SubTensorInfo &info) override;
+
+ void notifyFirstUse(const ir::OperandIndex &) override;
+ void notifyLastUse(const ir::OperandIndex &) override;
+
+ bool isRegistered(const ir::OperandIndex &) const override;
+
+ void prepare(void) override;
+ void allocateConsts() override;
+ void allocateNonconsts() override;
+ void postFunctionPrepare() override;
+ void finalize() override;
+
+ std::shared_ptr<::neurun::backend::operand::ITensor>
+ tensorAt(const ir::OperandIndex &ind) override;
+ void iterate(const IterateFunction &fn) override;
+
+ void preVisit(const ir::Operation &node) override;
+ void postVisit(const ir::Operation &node) override;
+
+ std::unique_ptr<ITensorManager> releaseTensorManager(void) override;
+
+ std::shared_ptr<T_ITensor> at(const ir::OperandIndex &ind);
+ /**
+ * @brief Check child tensor is allocated as subtensor of parent tensor
+ * @param[in] parent Index of parent
+ * @param[in] child Index of child
+ * @return @c true if child is allocated as subtensor of parent, otherwise @c false
+ */
+ bool isSubTensorOf(const ir::OperandIndex &parent, const ir::OperandIndex &child);
+
+ void dimCorrection(const ir::OperandIndex &index, bool apply_dim_correction);
+
+ T_AclTensorManager *acl_tensor_manager(void) { return _tensor_mgr.get(); }
+
+ void setUsesCount(const ir::OperandIndex &index, size_t num_uses)
+ {
+ assert(_uses_count_map.find(index) != _uses_count_map.end() ? _uses_count_map[index] == num_uses
+ : true);
+ _uses_count_map[index] = num_uses;
+ }
+
+private:
+ void buildTensors(void);
+ void buildSubtensors(void);
+ void validate(void);
+ ir::OperandIndex findRootParent(ir::OperandIndex index);
+
+private:
+ ir::OperandIndexMap<ir::OperandInfo> _tensor_info_map;
+ ir::OperandIndexMap<compiler::SubTensorInfo> _subtensor_info_map;
+ ir::OperandIndexMap<bool> _apply_dim_correction_map;
+ ir::OperandIndexMap<ir::Layout> _tensor_layout_map;
+ ir::OperandIndexMap<size_t> _uses_count_map;
+
+ std::unique_ptr<T_AclTensorManager> _tensor_mgr;
+ ir::OperandIndexSequence _constants;
+
+ // TODO Consider dividing TensorBuilder into Linear and others
+ const std::string _executor_str;
+
+ // for linear executor
+ std::queue<std::pair<UsesType, ir::OperandIndex>> _uses_queue;
+ uint32_t _first_uses_num;
+ ir::OperandIndexMap<bool> _first_uses_visit;
+
+ // for subtensors
+ ir::OperandIndexMap<uint32_t> _parent_def;
+ ir::OperandIndexMap<uint32_t> _parent_uses;
+};
+
+} // namespace acl_common
+} // namespace backend
+} // namespace neurun
+
+#include <cassert>
+#include <stack>
+
+#include "Convert.h"
+
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_common
+{
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::TemplTensorBuilder(
+ T_AclTensorManager *tensor_mgr)
+ : _tensor_mgr{tensor_mgr}, _executor_str(util::getConfigString(util::config::EXECUTOR)),
+ _first_uses_num(0)
+{
+ assert(_tensor_mgr);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::registerTensorInfo(
+ const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout,
+ bool as_const)
+{
+ assert(_tensor_mgr->constTensors().size() == 0);
+ assert(_tensor_mgr->nonconstTensors().size() == 0);
+
+ _tensor_info_map.emplace(ind, info);
+ _apply_dim_correction_map.emplace(ind, true);
+ _tensor_layout_map.insert({ind, backend_layout});
+ if (as_const)
+ _constants.append(ind);
+
+ assert(_first_uses_visit.find(ind) == _first_uses_visit.end());
+ _first_uses_visit[ind] = false;
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::registerSubTensorInfo(
+ const ir::OperandIndex &ind, const compiler::SubTensorInfo &info)
+{
+ assert(_tensor_mgr->constTensors().size() == 0);
+ assert(_tensor_mgr->nonconstTensors().size() == 0);
+
+ _subtensor_info_map.emplace(ind, info);
+ _apply_dim_correction_map.emplace(ind, true);
+
+ assert(_first_uses_visit.find(ind) == _first_uses_visit.end());
+ _first_uses_visit[ind] = false;
+
+ const auto &parent_ind = info.parent();
+
+ // parent_def
+ _parent_def[parent_ind] = 1;
+
+ // parent_use
+ if (_parent_uses.find(parent_ind) == _parent_uses.end())
+ _parent_uses[parent_ind] = 1; // 1 means including parent it-self
+ _parent_uses[parent_ind]++;
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::notifyFirstUse(
+ const ir::OperandIndex &ind)
+{
+ _first_uses_num++;
+ _uses_queue.emplace(UsesType::FIRST, ind);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::notifyLastUse(
+ const ir::OperandIndex &ind)
+{
+ _uses_queue.emplace(UsesType::LAST, ind);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+bool TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::isRegistered(
+ const ir::OperandIndex &ind) const
+{
+ return _tensor_info_map.find(ind) != _tensor_info_map.end() ||
+ _subtensor_info_map.find(ind) != _subtensor_info_map.end();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::prepare(void)
+{
+ buildTensors();
+ buildSubtensors();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::allocateConsts(void)
+{
+ assert(_constants.size() == _tensor_mgr->constTensors().size());
+ _tensor_mgr->allocateConsts();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::allocateNonconsts(void)
+{
+ assert(_tensor_info_map.size() == _tensor_mgr->nonconstTensors().size() + _constants.size());
+ _tensor_mgr->allocateNonconsts();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::postFunctionPrepare(void)
+{
+ _tensor_mgr->tryDeallocConstants();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::finalize(void)
+{
+ validate();
+ _tensor_mgr->allocateInternalBufferManager();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+std::shared_ptr<::neurun::backend::operand::ITensor>
+TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::tensorAt(const ir::OperandIndex &ind)
+{
+ return _tensor_mgr->at(ind);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::iterate(const IterateFunction &fn)
+{
+ _tensor_mgr->iterate(fn);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+std::shared_ptr<T_ITensor>
+TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::at(const ir::OperandIndex &ind)
+{
+ return _tensor_mgr->at(ind);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+bool TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::isSubTensorOf(
+ const ir::OperandIndex &parent, const ir::OperandIndex &child)
+{
+ if (_subtensor_info_map.find(child) == _subtensor_info_map.end())
+ {
+ return false;
+ }
+
+ auto &subtensors = _tensor_mgr->nonconstSubtensors();
+ if (subtensors.find(child) == subtensors.end())
+ {
+ return false;
+ }
+
+ if (_subtensor_info_map.at(child).parent() != parent)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::dimCorrection(
+ const ir::OperandIndex &index, bool apply_dim_correction)
+{
+ _apply_dim_correction_map[index] = apply_dim_correction;
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+std::unique_ptr<ITensorManager>
+TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::releaseTensorManager(void)
+{
+ return std::move(_tensor_mgr);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::buildTensors(void)
+{
+ assert(_tensor_mgr->constTensors().size() == 0);
+ assert(_tensor_mgr->nonconstTensors().size() == 0);
+
+ for (auto &entry : _tensor_info_map)
+ {
+ auto ind = entry.first;
+ const auto &info = entry.second;
+ // NOTE SubTensor's layout must be the same with layout of parent tensor
+ const auto &root_parent = findRootParent(ind);
+ const auto &backend_layout = _tensor_layout_map[root_parent];
+ auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), ir::Layout::UNKNOWN,
+ backend_layout, _apply_dim_correction_map[ind]);
+ _tensor_mgr->buildTensor(ind, tensor_info, info.shape().rank(), _constants.contains(ind),
+ _uses_count_map[ind]);
+ }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::buildSubtensors(void)
+{
+ // TODO Handle SubTensor(subsumption)
+ // Currently this TemplTensorBuilder does not have subsumption info yet
+ // Allocated subtensor will be mapped to _subtensors instead of _tensors
+ assert(_tensor_mgr->nonconstSubtensors().size() == 0);
+
+ // To make subtensor, parent tensor must be made first
+ // For this condition, use stack
+ // 1) Push one subtensor index to stack (iterate subtensors)
+ // 2) If tensor at stack top is already made, pop and go to 4)
+ // 3) If tensor pushed at 1) is not made, check parent tensor
+ // 3-1) If parent tensor is already made, we can make child tensor
+ // Make child tensor and pop, go to 4)
+ // 3-2) If parent tensor is not made, we can't make child tensor yet
+ // Push parent tensor index to stack and return to 4)
+ // 4) If stack is empty, return to 1), else return to 2)
+ auto &subtensors = _tensor_mgr->nonconstSubtensors();
+ for (auto &entry : _subtensor_info_map)
+ {
+ ir::OperandIndex ind = entry.first;
+
+ std::stack<ir::OperandIndex> stack;
+ stack.push(ind);
+
+ while (!stack.empty())
+ {
+ const auto current = stack.top();
+ const auto &info = _subtensor_info_map.at(current);
+
+ // Already generated SubTensor
+ if (subtensors.find(current) != subtensors.end())
+ {
+ stack.pop();
+ continue;
+ }
+
+ auto parent = info.parent();
+ std::shared_ptr<T_ITensor> parent_tensor = _tensor_mgr->findTensorAsParent(parent);
+ if (!parent_tensor)
+ {
+ // Cannot find allocated parent tensor: allocate parent first
+ assert(_subtensor_info_map.find(parent) != _subtensor_info_map.end());
+ stack.push(parent);
+ continue;
+ }
+ assert(parent_tensor != nullptr);
+
+ // Child's type should be same with parent
+ assert(info.type().offset() == parent_tensor->info()->quantization_info().offset);
+ assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
+ assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
+
+ // NOTE SubTensor's layout must be the same with layout of parent tensor
+ const auto &root_parent = findRootParent(parent);
+ const auto &backend_layout = _tensor_layout_map[root_parent];
+
+ auto shape = asTensorShape(info.shape(), ir::Layout::UNKNOWN, backend_layout,
+ _apply_dim_correction_map[current]);
+ ::arm_compute::Coordinates coordinates =
+ asTensorCoordinate(info.offset(), ir::Layout::UNKNOWN, backend_layout);
+ _tensor_mgr->buildSubtensor(parent, current, shape, coordinates, info.shape().rank(), true);
+ stack.pop();
+ }
+ }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::preVisit(const ir::Operation &node)
+{
+ // For now others executor doesn't need this step
+ if (_executor_str != "Linear")
+ {
+ return;
+ }
+
+ std::function<void(const ir::OperandIndex &ind)> def_handler =
+ [this, &def_handler](const ir::OperandIndex &ind) {
+ bool is_subtensor = _subtensor_info_map.find(ind) != _subtensor_info_map.end();
+ bool is_parent = _parent_def.find(ind) != _parent_def.end();
+ if (!is_subtensor && !is_parent)
+ {
+ _tensor_mgr->startLifetime(ind);
+ return;
+ }
+
+ if (is_parent)
+ {
+ if (_parent_def[ind] == 0)
+ return;
+
+ _parent_def[ind] = 0;
+
+ if (is_subtensor)
+ {
+ const auto &it = _parent_def.find(ind);
+ _parent_def.erase(it);
+ def_handler(ind);
+ }
+ else
+ {
+ _tensor_mgr->startLifetime(ind);
+ }
+ }
+ else if (is_subtensor)
+ {
+ const ir::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent();
+ if (_parent_def[parent_ind] == 0)
+ return;
+ def_handler(parent_ind);
+ }
+ };
+
+ // See #5642
+ ir::OperandIndexMap<bool> outputs_map;
+ for (const auto &ind : node.getOutputs())
+ {
+ assert(_first_uses_visit.find(ind) != _first_uses_visit.end());
+ outputs_map[ind] = _first_uses_visit[ind];
+ }
+
+ // outputs_map's all elements are true?
+ auto outputs_map_all_check = [&outputs_map]() {
+ return std::all_of(outputs_map.begin(), outputs_map.end(),
+ [](std::pair<const ir::OperandIndex, bool> it) { return it.second; });
+ };
+
+ std::pair<UsesType, ir::OperandIndex> peak;
+ while (!outputs_map_all_check() && (peak = _uses_queue.front()).first == UsesType::FIRST)
+ {
+ _uses_queue.pop();
+ _first_uses_num--;
+
+ const auto &popped_idx = peak.second;
+ def_handler(popped_idx);
+
+ outputs_map[popped_idx] = true;
+ _first_uses_visit[popped_idx] = true;
+ }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::postVisit(const ir::Operation &node)
+{
+ // For now others executor doesn't need this step
+ if (_executor_str != "Linear")
+ {
+ return;
+ }
+
+ std::function<void(const ir::OperandIndex &ind)> use_handler =
+ [this, &use_handler](const ir::OperandIndex &ind) {
+ bool is_subtensor = _subtensor_info_map.find(ind) != _subtensor_info_map.end();
+ bool is_parent = _parent_uses.find(ind) != _parent_uses.end();
+ if (!is_subtensor && !is_parent)
+ {
+ _tensor_mgr->finishLifetime(ind);
+ return;
+ }
+
+ // This handler shall be executed by the linear executor so that
+ // The parent operand will always be done after the subtensor
+ if (is_parent)
+ {
+ --_parent_uses[ind];
+ assert(_parent_uses[ind] == 0);
+
+ if (is_subtensor)
+ {
+ const auto &it = _parent_uses.find(ind);
+ _parent_uses.erase(it);
+ use_handler(ind);
+ }
+ else
+ {
+ _tensor_mgr->finishLifetime(ind);
+ }
+ }
+ else if (is_subtensor)
+ {
+ const ir::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent();
+ --_parent_uses[parent_ind];
+ assert(_parent_uses[parent_ind] > 0);
+ }
+ };
+
+ // See #5642
+ const auto &inputs = node.getInputs();
+ std::pair<UsesType, ir::OperandIndex> peak;
+ while ((peak = _uses_queue.front()).first == UsesType::LAST)
+ {
+ const auto &popped_idx = peak.second;
+ if (inputs.contains(popped_idx))
+ {
+ _uses_queue.pop();
+ use_handler(popped_idx);
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ if (_first_uses_num == 0)
+ {
+ while (!_uses_queue.empty())
+ {
+ peak = _uses_queue.front();
+ assert(peak.first == UsesType::LAST);
+
+ _uses_queue.pop();
+
+ use_handler(peak.second);
+ }
+ }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::validate(void)
+{
+ // For now others executor doesn't need this step
+ if (_executor_str != "Linear")
+ {
+ return;
+ }
+
+ for (auto it : _tensor_info_map)
+ {
+ assert(_first_uses_visit.find(it.first) != _first_uses_visit.end());
+ assert(_first_uses_visit[it.first]);
+ }
+
+ for (auto it : _subtensor_info_map)
+ {
+ assert(_first_uses_visit.find(it.first) != _first_uses_visit.end());
+ assert(_first_uses_visit[it.first]);
+ }
+
+ for (auto it : _tensor_layout_map)
+ {
+ assert(_first_uses_visit.find(it.first) != _first_uses_visit.end());
+ assert(_first_uses_visit[it.first]);
+ UNUSED_RELEASE(it);
+ }
+
+ assert(_uses_queue.size() == 0);
+ assert(_first_uses_num == 0);
+
+ assert(
+ std::all_of(_parent_def.begin(), _parent_def.end(),
+ [](std::pair<const ir::OperandIndex, uint32_t> it) { return it.second == 0; }));
+
+ assert(
+ std::all_of(_parent_uses.begin(), _parent_uses.end(),
+ [](std::pair<const ir::OperandIndex, uint32_t> it) { return it.second == 0; }));
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
+ir::OperandIndex
+TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::findRootParent(ir::OperandIndex ind)
+{
+ if (_subtensor_info_map.find(ind) == _subtensor_info_map.end())
+ return ind;
+
+ const auto &parent_ind = _subtensor_info_map.at(ind).parent();
+ return findRootParent(parent_ind);
+}
+
+} // namespace acl_common
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_COMMON_TEMPL_TENSOR_BUILDER_H__