summaryrefslogtreecommitdiff
path: root/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc')
-rw-r--r--runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc246
1 files changed, 0 insertions, 246 deletions
diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
deleted file mode 100644
index b5c038200..000000000
--- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "backend/acl_cl/TensorBuilder.h"
-
-#include <cassert>
-#include <stack>
-
-#include "operand/Object.h"
-#include "Convert.h"
-
-#include "util/logging.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-TensorBuilder::TensorBuilder()
-{
- // DO NOTHING
-}
-
-void TensorBuilder::registerTensorInfo(const model::operand::Index &ind,
- const compiler::TensorInfo &info)
-{
- assert(_tensors.size() == 0);
-
- _tensor_info_map.insert({ind, info});
-}
-
-void TensorBuilder::registerSubTensorInfo(const model::operand::Index &ind,
- const compiler::SubTensorInfo &info)
-{
- assert(_tensors.size() == 0);
-
- _subtensor_info_map.insert({ind, info});
-}
-
-void TensorBuilder::notifyFirstUse(const model::operand::Index &)
-{
- // DO NOTHING
-}
-
-void TensorBuilder::notifyLastUse(const model::operand::Index &)
-{
- // DO NOTHING
-}
-
-void TensorBuilder::prepare(void)
-{
- assert(_tensors.size() == 0);
-
- // TODO Handle SubTensor(subsumption)
- // Currently this TensorBuilder does not have subsumption info yet
- // Allocated subtensor will be mapped to _subtensors instead of _tensors
- assert(_subtensors.size() == 0);
-
- for (auto &entry : _tensor_info_map)
- {
- auto ind = entry.first;
- const auto &info = entry.second;
- auto tensor = std::make_shared<::neurun::backend::acl_cl::operand::CLTensor>(info);
- _tensors[ind] = tensor;
- }
-
- // To make subtensor, parent tensor must be made first
- // For this condition, use stack
- // 1) Push one subtensor index to stack (iterate subtensors)
- // 2) If tensor at stack top is already made, pop and go to 4)
- // 3) If tensor pushed at 1) is not made, check parent tensor
- // 3-1) If parent tensor is already made, we can make child tensor
- // Make child tensor and pop, go to 4)
- // 3-2) If parent tensor is not made, we can't make child tensor yet
- // Push parent tensor index to stack and return to 4)
- // 4) If stack is empty, return to 1), else return to 2)
- for (auto &entry : _subtensor_info_map)
- {
- model::operand::Index ind = entry.first;
-
- std::stack<model::operand::Index> stack;
- stack.push(ind);
-
- while (!stack.empty())
- {
- const auto current = stack.top();
- const auto &info = _subtensor_info_map.at(current);
-
- // Already generated CLSubTensor
- if (_subtensors.find(current) != _subtensors.end())
- {
- stack.pop();
- continue;
- }
-
- auto parent = info.parent();
- std::shared_ptr<::neurun::backend::acl_cl::operand::ICLTensor> parent_tensor;
-
- if (_tensors.find(parent) != _tensors.end())
- {
- // Parent is allocated as tensor
- parent_tensor = _tensors[parent];
- }
- else if (_subtensors.find(parent) != _subtensors.end())
- {
- // Parent is allocated as subtensor
- parent_tensor = _subtensors[parent];
- }
- else
- {
- // Cannot find allocated parent tensor: allocate parent first
- assert(_subtensor_info_map.find(parent) != _subtensor_info_map.end());
- stack.push(parent);
- continue;
- }
- assert(parent_tensor != nullptr);
-
- // Child's type should be same with parent
- assert(info.type().offset() == parent_tensor->info()->quantization_info().offset);
- assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
- assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
- auto shape = asTensorShape(info.shape());
-
- // Only support axis: 3 (channel)
- ::arm_compute::Coordinates coordinates;
- coordinates.set_num_dimensions(4);
- assert(info.offset().h() == 0);
- assert(info.offset().n() == 0);
- assert(info.offset().w() == 0);
- coordinates[2] = info.offset().c();
- auto tensor = std::make_shared<::neurun::backend::acl_cl::operand::CLSubTensor>(
- parent_tensor.get(), shape, coordinates, true);
- _subtensors[current] = tensor;
- stack.pop();
- }
- }
-}
-
-void TensorBuilder::allocate(void)
-{
- assert(_tensor_info_map.size() == _tensors.size());
-
- for (const auto &tensor_entry : _tensors)
- {
- auto tensor = tensor_entry.second;
- tensor->allocator()->allocate();
- }
-}
-
-std::shared_ptr<::neurun::backend::operand::ITensor>
-TensorBuilder::tensorAt(const model::operand::Index &ind)
-{
- if (_tensors.find(ind) != _tensors.end())
- {
- return _tensors.at(ind);
- }
- else
- {
- return _subtensors.at(ind);
- }
-}
-
-std::shared_ptr<backend::operand::IObject>
-TensorBuilder::wrapTensor(const model::operand::Index &ind)
-{
- if (_objects.find(ind) != _objects.end())
- {
- return _objects.at(ind);
- }
- else
- {
- if (_tensors.find(ind) != _tensors.end())
- {
- return _objects[ind] = std::make_shared<operand::Object>(_tensors.at(ind));
- }
- else
- {
- return _objects[ind] = std::make_shared<operand::Object>(_subtensors.at(ind));
- }
- }
-}
-
-void TensorBuilder::iterate(const IterateFunction &fn)
-{
- for (auto it : _tensors)
- {
- fn(it.first);
- }
- for (auto it : _subtensors)
- {
- fn(it.first);
- }
-}
-
-std::shared_ptr<::neurun::backend::acl_cl::operand::ICLTensor>
-TensorBuilder::at(const ::neurun::model::operand::Index &ind)
-{
- if (_tensors.find(ind) != _tensors.end())
- {
- return _tensors.at(ind);
- }
- else
- {
- return _subtensors.at(ind);
- }
-}
-
-bool TensorBuilder::isSubTensorOf(const model::operand::Index &parent,
- const model::operand::Index &child)
-{
- if (_subtensor_info_map.find(child) == _subtensor_info_map.end())
- {
- return false;
- }
-
- if (_subtensors.find(child) == _subtensors.end())
- {
- return false;
- }
-
- if (_subtensor_info_map.at(child).parent() != parent)
- {
- return false;
- }
-
- return true;
-}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun