diff options
Diffstat (limited to 'runtime/onert/backend/cpu/TensorBuilder.cc')
-rw-r--r-- | runtime/onert/backend/cpu/TensorBuilder.cc | 93 |
1 files changed, 93 insertions, 0 deletions
diff --git a/runtime/onert/backend/cpu/TensorBuilder.cc b/runtime/onert/backend/cpu/TensorBuilder.cc new file mode 100644 index 000000000..cbf7c9e5c --- /dev/null +++ b/runtime/onert/backend/cpu/TensorBuilder.cc @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "TensorBuilder.h" + +#include <util/logging.h> + +#include <cassert> + +namespace onert +{ +namespace backend +{ +namespace cpu +{ + +TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()} +{ + // DO NOTHING +} + +void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info, + ir::Layout, bool as_const) +{ + _tensor_info_map.emplace(ind, info); + + if (as_const) + _constants.append(ind); +} + +void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind) +{ + assert(_tensor_info_map.find(ind) != _tensor_info_map.end()); + const auto tensor_info = _tensor_info_map.at(ind); + const auto size = tensor_info.total_size(); + _tensor_mgr->buildTensor(ind, tensor_info, _constants.contains(ind)); + _tensor_mgr->claimPlan(ind, size); +} + +void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); } + +bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const +{ + return _tensor_info_map.find(ind) != _tensor_info_map.end(); +} + +void TensorBuilder::prepare(void) +{ + _tensor_mgr->allocateConsts(); + _tensor_mgr->allocateNonconsts(); +} + +void TensorBuilder::allocate() +{ + // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate + // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation. +} + +std::shared_ptr<ITensor> TensorBuilder::tensorAt(const ir::OperandIndex &ind) +{ + return _tensor_mgr->at(ind); +} + +void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); } + +std::shared_ptr<operand::Tensor> TensorBuilder::at(const ir::OperandIndex &ind) +{ + auto ret = _tensor_mgr->at(ind); + assert(ret != nullptr); + return ret; +} + +std::unique_ptr<ITensorManager> TensorBuilder::releaseTensorManager(void) +{ + return std::move(_tensor_mgr); +} + +} // namespace cpu +} // namespace backend +} // namespace onert |