summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>2019-04-15 03:55:03 (GMT)
committer이춘석/On-Device Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>2019-04-15 03:55:03 (GMT)
commite30c0a5fa1c0ef23ae271f7b905ae076b8153429 (patch)
tree57ca52b471a6a9a8723d14d7311ffdf63d4c4f7c
parent6de33522d41516658196f26a68da6e1b0730afc3 (diff)
downloadnnfw-e30c0a5fa1c0ef23ae271f7b905ae076b8153429.zip
nnfw-e30c0a5fa1c0ef23ae271f7b905ae076b8153429.tar.gz
nnfw-e30c0a5fa1c0ef23ae271f7b905ae076b8153429.tar.bz2
Interpreter use info structure in model (#4988)
Instead of using interpreter's own structure, use tensor info structure in model Move comment and copy constructor Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
-rw-r--r--runtimes/neurun/core/include/model/operand/Info.h41
-rw-r--r--runtimes/neurun/core/src/exec/interp/ExecManager.cc12
-rw-r--r--runtimes/neurun/core/src/exec/interp/Tensor.h16
-rw-r--r--runtimes/neurun/core/src/exec/interp/TensorInfo.h106
4 files changed, 51 insertions, 124 deletions
diff --git a/runtimes/neurun/core/include/model/operand/Info.h b/runtimes/neurun/core/include/model/operand/Info.h
index cda2ddf..717a5d4 100644
--- a/runtimes/neurun/core/include/model/operand/Info.h
+++ b/runtimes/neurun/core/include/model/operand/Info.h
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+/**
+ * @file Info.h
+ * @brief This file contains Info class
+ */
#ifndef __NEURUN_MODEL_OPERAND_INFO_H__
#define __NEURUN_MODEL_OPERAND_INFO_H__
@@ -29,22 +33,53 @@ namespace model
namespace operand
{
+/**
+ * @brief Class to save tensor's shape and type
+ */
class Info
{
public:
+ /**
+ * @brief Construct a new Info object (deleted)
+ */
Info() = delete;
-
-public:
+ /**
+ * @brief Construct a new Info object
+ * @param[in] shape Tensor shape
+ * @param[in] typeInfo Tensor data type
+ */
Info(const Shape &shape, const TypeInfo &typeInfo) : _shape(shape), _typeInfo(typeInfo)
{
// DO NOTHING
}
+ /**
+ * @brief Construct a new Info object
+ * @param[in] origin info for copy
+ */
+ Info(const Info &origin) : _shape(origin.shape()), _typeInfo(origin.typeInfo())
+ {
+ // DO NOTHING
+ }
public:
+ /**
+ * @brief Return tensor shape
+ * @return Tensor shape
+ */
const Shape &shape() const { return _shape; }
+ /**
+ * @brief Return tensor data type info
+ * @return Tensor data type
+ */
const TypeInfo &typeInfo() const { return _typeInfo; }
+ /**
+ * @brief Set tensor data type
+ */
void type(const DataType &type) { _typeInfo.type(type); }
-
+ /**
+ * @brief Return size of tensor (bytes)
+ * @return Tensor size
+ */
size_t total_size() const
{
const auto &dims = _shape.dims();
diff --git a/runtimes/neurun/core/src/exec/interp/ExecManager.cc b/runtimes/neurun/core/src/exec/interp/ExecManager.cc
index 4436842..0fdf1b7 100644
--- a/runtimes/neurun/core/src/exec/interp/ExecManager.cc
+++ b/runtimes/neurun/core/src/exec/interp/ExecManager.cc
@@ -33,7 +33,7 @@ void ExecManager::setInput(const neurun::model::operand::IO::Index &index,
size_t length)
{
const auto input_index = _model->inputs.at(index);
- const TensorInfo info{shape, type};
+ const model::operand::Info info{shape, type};
if (length < info.total_size())
{
@@ -49,8 +49,7 @@ void ExecManager::setInput(const neurun::model::operand::IO::Index &index, const
size_t length)
{
const auto input_index = _model->inputs.at(index);
- const TensorInfo info{_model->operands.at(input_index).shape(),
- _model->operands.at(input_index).typeInfo()};
+ const auto info = _model->operands.at(input_index).info();
if (length < info.total_size())
{
@@ -67,7 +66,7 @@ void ExecManager::setOutput(const neurun::model::operand::IO::Index &index,
const neurun::model::operand::Shape &shape, void *buffer, size_t length)
{
const auto output_index = _model->outputs.at(index);
- const TensorInfo info{shape, type};
+ const model::operand::Info info{shape, type};
if (length < info.total_size())
{
@@ -83,8 +82,7 @@ void ExecManager::setOutput(const neurun::model::operand::IO::Index &index, void
size_t length)
{
const auto output_index = _model->outputs.at(index);
- const TensorInfo info{_model->operands.at(output_index).shape(),
- _model->operands.at(output_index).typeInfo()};
+ const auto info = _model->operands.at(output_index).info();
if (length < info.total_size())
{
@@ -141,7 +139,7 @@ void ExecManager::execute(void)
VERBOSE(INTERPRETER) << "Allocate and assign constant tensor. operand index:" << ind.value()
<< std::endl;
- auto const_tensor = std::make_shared<ROTensor>(TensorInfo(obj.info()));
+ auto const_tensor = std::make_shared<ROTensor>(obj.info());
interp_env->assignTensor(ind, const_tensor);
// Assume that interpreter's tensor layout is same with model (NHWC)
const_tensor->setBuffer(obj.data().base());
diff --git a/runtimes/neurun/core/src/exec/interp/Tensor.h b/runtimes/neurun/core/src/exec/interp/Tensor.h
index 5989848..0170a97 100644
--- a/runtimes/neurun/core/src/exec/interp/Tensor.h
+++ b/runtimes/neurun/core/src/exec/interp/Tensor.h
@@ -22,7 +22,7 @@
#define __NEURUN_EXEC_INTERP_TENSOR_H__
#include "util/feature/Coordinate4D.h"
-#include "TensorInfo.h"
+#include "model/operand/Info.h"
namespace neurun
{
@@ -71,7 +71,7 @@ public:
* @brief Return TensorInfo
* @return TensorInfo
*/
- virtual const TensorInfo &tensorInfo() const = 0;
+ virtual const model::operand::Info &tensorInfo() const = 0;
/**
* @brief Return number of elements
* @return Number of elements
@@ -86,7 +86,7 @@ class ROTensor final : public ITensor
{
public:
ROTensor() = delete;
- ROTensor(const TensorInfo &info) : _info(info)
+ ROTensor(const model::operand::Info &info) : _info(info)
{
// DO NOTHING
}
@@ -102,11 +102,11 @@ public:
size_t calcOffset(const util::feature::Coordinate4D &coords) override;
bool has_padding() const override { return false; }
model::operand::DataType data_type() const override { return _info.typeInfo().type(); }
- const TensorInfo &tensorInfo() const override { return _info; }
+ const model::operand::Info &tensorInfo() const override { return _info; }
uint64_t element_nums() const override { return _info.shape().element_nums(); };
private:
- const TensorInfo _info;
+ const model::operand::Info _info;
const uint8_t *_buffer{nullptr};
};
@@ -117,7 +117,7 @@ class Tensor final : public ITensor
{
public:
Tensor() = delete;
- Tensor(const TensorInfo &info) : _info(info)
+ Tensor(const model::operand::Info &info) : _info(info)
{
// DO NOTHING
}
@@ -133,11 +133,11 @@ public:
size_t calcOffset(const util::feature::Coordinate4D &coords) override;
bool has_padding() const override { return false; }
model::operand::DataType data_type() const override { return _info.typeInfo().type(); }
- const interp::TensorInfo &tensorInfo() const override { return _info; }
+ const model::operand::Info &tensorInfo() const override { return _info; }
uint64_t element_nums() const override { return _info.shape().element_nums(); };
private:
- const TensorInfo _info;
+ const model::operand::Info _info;
uint8_t *_buffer{nullptr};
};
diff --git a/runtimes/neurun/core/src/exec/interp/TensorInfo.h b/runtimes/neurun/core/src/exec/interp/TensorInfo.h
deleted file mode 100644
index 4fa4ca0..0000000
--- a/runtimes/neurun/core/src/exec/interp/TensorInfo.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file TensorInfo.h
- * @brief This file contains TensorInfo class
- * @note Current implementation is copy of compiler::TensorInfo
- */
-#ifndef __NEURUN_EXEC_INTERP_TENSOR_INFO_H__
-#define __NEURUN_EXEC_INTERP_TENSOR_INFO_H__
-
-#include "model/operand/Info.h"
-
-#include <numeric>
-
-namespace neurun
-{
-namespace exec
-{
-namespace interp
-{
-
-/**
- * @brief Class to save tensor's shape and type
- */
-class TensorInfo
-{
-public:
- /**
- * @brief Construct a new Tensor Info object (deleted)
- */
- TensorInfo() = delete;
- /**
- * @brief Construct a new Tensor Info object
- * @param[in] shape Tensor shape
- * @param[in] typeInfo Tensor data type
- */
- TensorInfo(const ::neurun::model::operand::Shape &shape,
- const ::neurun::model::operand::TypeInfo &typeInfo)
- : _shape(shape), _typeInfo(typeInfo)
- {
- // DO NOTHING
- }
- /**
- * @brief Construct a new Tensor Info object
- * @param[in] info Model operand info
- */
- TensorInfo(const ::neurun::model::operand::Info &info)
- : _shape(info.shape()), _typeInfo(info.typeInfo())
- {
- // DO NOTHING
- }
- /**
- * @brief Construct a new Tensor Info object
- * @param[in] origin Tensor info for copy
- */
- TensorInfo(const TensorInfo &origin) : _shape(origin.shape()), _typeInfo(origin.typeInfo())
- {
- // DO NOTHING
- }
-
-public:
- /**
- * @brief Return tensor shape
- * @return Tensor shape
- */
- const ::neurun::model::operand::Shape &shape() const { return _shape; }
- /**
- * @brief Return tensor data type info
- * @return Tensor data type
- */
- const ::neurun::model::operand::TypeInfo &typeInfo() const { return _typeInfo; }
- /**
- * @brief Return size of tensor (bytes)
- * @return Tensor size
- */
- size_t total_size() const
- {
- const auto &dims = _shape.dims();
- return std::accumulate(dims.begin(), dims.end(), sizeOfDataType(_typeInfo.type()),
- std::multiplies<size_t>());
- }
-
-private:
- ::neurun::model::operand::Shape _shape;
- ::neurun::model::operand::TypeInfo _typeInfo;
-};
-
-} // namespace interp
-} // namespace exec
-} // namespace neurun
-
-#endif // __NEURUN_EXEC_INTERP_TENSOR_INFO_H__