summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSanggyu Lee <takepencil@naver.com>2024-01-08 13:54:23 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2024-01-08 15:22:15 +0900
commitb0af54419e892853c00ab204acdee56d39c15b2b (patch)
tree5e5784c031bce36993093cc98a23fe22ea5b6dd3
parent4dbced9c5c7b5a2dfaf5e7f0f549eaa6f83fbc6a (diff)
downloadnnfw-b0af54419e892853c00ab204acdee56d39c15b2b.tar.gz
nnfw-b0af54419e892853c00ab204acdee56d39c15b2b.tar.bz2
nnfw-b0af54419e892853c00ab204acdee56d39c15b2b.zip
Add & to auto to make static analyzer happy
It adds & to make static analyzer happy. It may help remove copy for non-primitive type. In addition, some variables of auto turned out not-used variables. All un-used variables of auto are removed also. ONE-DCO-1.0-Signed-off-by: Sanggyu Lee <sg5.lee@samsung.com>
-rw-r--r--runtime/onert/backend/cl_common/include/cl_common/BackendContext.h2
-rw-r--r--runtime/onert/backend/cpu/KernelGenerator.cc2
-rw-r--r--runtime/onert/backend/cpu/ops/DetectionPostProcessLayer.cc4
-rw-r--r--runtime/onert/backend/gpu_cl/ClConstantInitializer.h2
-rw-r--r--runtime/onert/backend/gpu_cl/KernelGenerator.cc8
-rw-r--r--runtime/onert/backend/gpu_cl/operand/ICLTensor.cc4
-rw-r--r--runtime/onert/backend/ruy/KernelGenerator.cc4
-rw-r--r--runtime/onert/backend/xnnpack/KernelGenerator.cc4
-rw-r--r--runtime/onert/core/include/backend/basic/BackendContextHelpers.h2
-rw-r--r--runtime/onert/core/src/backend/basic/TensorBuilder.cc2
-rw-r--r--runtime/onert/core/src/backend/builtin/TensorBuilder.cc2
-rw-r--r--runtime/onert/core/src/compiler/HEScheduler.cc2
-rw-r--r--runtime/onert/core/src/compiler/ShapeValidator.cc14
-rw-r--r--runtime/onert/core/src/compiler/StaticShapeInferer.cc26
-rw-r--r--runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc2
-rw-r--r--runtime/onert/core/src/exec/DynamicShapeInferer.cc10
-rw-r--r--runtime/onert/core/src/exec/Execution.cc6
-rw-r--r--runtime/onert/core/src/exec/ExecutorBase.cc4
-rw-r--r--runtime/onert/core/src/ir/Graph.cc2
-rw-r--r--runtime/onert/core/src/ir/OperationValidator.cc2
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc9
21 files changed, 55 insertions, 58 deletions
diff --git a/runtime/onert/backend/cl_common/include/cl_common/BackendContext.h b/runtime/onert/backend/cl_common/include/cl_common/BackendContext.h
index 76d403949..06aafa1b9 100644
--- a/runtime/onert/backend/cl_common/include/cl_common/BackendContext.h
+++ b/runtime/onert/backend/cl_common/include/cl_common/BackendContext.h
@@ -121,7 +121,7 @@ protected:
if (!tensor_builder->isRegistered(ind))
{
// These tensors do not exist in any operation (No use and def)
- const auto info = obj.info();
+ const auto &info = obj.info();
const auto layout = _data.operand_layouts.at(ind);
// TODO Change tensor info to have permuted shape
registerTensorInfo(ind, info, layout);
diff --git a/runtime/onert/backend/cpu/KernelGenerator.cc b/runtime/onert/backend/cpu/KernelGenerator.cc
index c927bf5d4..d462daf5c 100644
--- a/runtime/onert/backend/cpu/KernelGenerator.cc
+++ b/runtime/onert/backend/cpu/KernelGenerator.cc
@@ -1266,7 +1266,7 @@ void KernelGenerator::visit(const ir::operation::FusedBatchNorm &node)
const auto epsilon = node.param().epsilon;
const auto is_training = node.param().is_training;
- const auto data_format = node.param().data_format;
+ const auto &data_format = node.param().data_format;
auto fn = std::make_unique<ops::FusedBatchNormLayer>();
diff --git a/runtime/onert/backend/cpu/ops/DetectionPostProcessLayer.cc b/runtime/onert/backend/cpu/ops/DetectionPostProcessLayer.cc
index d89741c86..dc9e20e0a 100644
--- a/runtime/onert/backend/cpu/ops/DetectionPostProcessLayer.cc
+++ b/runtime/onert/backend/cpu/ops/DetectionPostProcessLayer.cc
@@ -106,7 +106,7 @@ Array<const CornerBox> decodeBoxes(const Array<float> &raw_boxes, const Array<fl
for (size_t i = 0; i < num_boxes; ++i)
{
- auto anchor = anchors.at(i);
+ const auto &anchor = anchors.at(i);
auto &box = decoded_boxes_a.at(i);
float yc = in_boxes.at(i).y / scales.y * anchor.h + anchor.y;
float xc = in_boxes.at(i).x / scales.x * anchor.w + anchor.x;
@@ -121,7 +121,7 @@ Array<const CornerBox> decodeBoxes(const Array<float> &raw_boxes, const Array<fl
assert(box.y2 > box.y1);
}
- auto decoded_boxes_a_shape = decoded_boxes_a.shape();
+ const auto &decoded_boxes_a_shape = decoded_boxes_a.shape();
return array_cast<const CornerBox>(std::move(decoded_boxes_a), decoded_boxes_a_shape);
}
diff --git a/runtime/onert/backend/gpu_cl/ClConstantInitializer.h b/runtime/onert/backend/gpu_cl/ClConstantInitializer.h
index 95e228acd..ad5b47d19 100644
--- a/runtime/onert/backend/gpu_cl/ClConstantInitializer.h
+++ b/runtime/onert/backend/gpu_cl/ClConstantInitializer.h
@@ -39,7 +39,7 @@ template <typename T>
static void Init(const onert::ir::Operand &model_obj, onert::backend::ITensor &obj, const bool copy,
const onert::ir::Layout frontend_layout = onert::ir::Layout::UNKNOWN)
{
- const auto shape = model_obj.shape();
+ const auto &shape = model_obj.shape();
assert(model_obj.data());
obj.access([&](::onert::backend::ITensor &tensor) {
switch (shape.rank())
diff --git a/runtime/onert/backend/gpu_cl/KernelGenerator.cc b/runtime/onert/backend/gpu_cl/KernelGenerator.cc
index a24c4f59c..31d3134e6 100644
--- a/runtime/onert/backend/gpu_cl/KernelGenerator.cc
+++ b/runtime/onert/backend/gpu_cl/KernelGenerator.cc
@@ -166,7 +166,7 @@ absl::Status KernelGenerator::readConstTensor(
absl::variant<tflite::gpu::Tensor<tflite::gpu::Linear, tflite::gpu::DataType::FLOAT32>,
tflite::gpu::Tensor<tflite::gpu::HWC, tflite::gpu::DataType::FLOAT32>> *alpha)
{
- const auto shape = _ctx.at(index).shape();
+ const auto &shape = _ctx.at(index).shape();
if (CheckIfLinearConvertible(&shape))
{
tflite::gpu::Tensor<tflite::gpu::Linear, tflite::gpu::DataType::FLOAT32> tensor;
@@ -304,7 +304,7 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node)
auto kernel{node.getInputs().at(ir::operation::Conv2D::KERNEL)};
auto bias{node.getInputs().at(ir::operation::Conv2D::BIAS)};
- const auto param = node.param();
+ const auto &param = node.param();
tflite::gpu::OperationDef op_def;
op_def.precision = tflite::gpu::CalculationsPrecision::F32;
@@ -375,7 +375,7 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node)
{
std::unique_ptr<tflite::gpu::GPUOperation> gpu_op_1;
tflite::gpu::OperationDef op_def_1;
- const auto shape = _ctx.at(output).shape();
+ const auto &shape = _ctx.at(output).shape();
auto new_ind = _tensor_reg->addNewClTensor(shape);
addClNode({input}, {new_ind}, std::move(gpu_op));
@@ -418,7 +418,7 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node)
const auto stride = node.param().stride;
const auto dilation = node.param().dilation;
- const auto padding = node.param().padding;
+ const auto &padding = node.param().padding;
const auto multiplier = node.param().multiplier;
diff --git a/runtime/onert/backend/gpu_cl/operand/ICLTensor.cc b/runtime/onert/backend/gpu_cl/operand/ICLTensor.cc
index ef71bbc13..1e61b9928 100644
--- a/runtime/onert/backend/gpu_cl/operand/ICLTensor.cc
+++ b/runtime/onert/backend/gpu_cl/operand/ICLTensor.cc
@@ -60,7 +60,7 @@ void ICLTensor::writeConvertInit(tflite::gpu::TensorObjectConverterBuilder *conv
TensorObjectDef permute_def = input_def;
permute_def.object_def.object_type = ToObjectType(handle()->GetStorageType());
- auto dims = permute_def.dimensions;
+ const auto &dims = permute_def.dimensions;
const BHWC shape(dims.b, dims.h, dims.w, dims.c);
const TensorDescriptor desc{
permute_def.object_def.data_type,
@@ -105,7 +105,7 @@ void ICLTensor::readConvertInit(tflite::gpu::TensorObjectConverterBuilder *conve
permute_def.object_def.data_type = DataType::FLOAT32;
permute_def.object_def.user_provided = true;
- auto dims = permute_def.dimensions;
+ const auto &dims = permute_def.dimensions;
const BHWC shape(dims.b, dims.h, dims.w, dims.c);
const TensorDescriptor desc{
permute_def.object_def.data_type,
diff --git a/runtime/onert/backend/ruy/KernelGenerator.cc b/runtime/onert/backend/ruy/KernelGenerator.cc
index ae7ec28fd..735a948f6 100644
--- a/runtime/onert/backend/ruy/KernelGenerator.cc
+++ b/runtime/onert/backend/ruy/KernelGenerator.cc
@@ -55,7 +55,7 @@ std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationI
assert(_return_fn); // _return_fn must have been generated
ret->append(std::move(_return_fn));
- for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
+ for (const auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
{
auto portable_tensor = _tensor_reg->getPortableTensor(ind);
if (portable_tensor)
@@ -101,7 +101,7 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node)
const auto stride = node.param().stride;
const auto activation = node.param().activation;
- const auto param_padding = node.param().padding;
+ const auto &param_padding = node.param().padding;
const auto dilation = node.param().dilation;
auto fn = std::make_unique<ops::ConvolutionLayer>();
diff --git a/runtime/onert/backend/xnnpack/KernelGenerator.cc b/runtime/onert/backend/xnnpack/KernelGenerator.cc
index 25f3fd238..b72149131 100644
--- a/runtime/onert/backend/xnnpack/KernelGenerator.cc
+++ b/runtime/onert/backend/xnnpack/KernelGenerator.cc
@@ -102,7 +102,7 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node)
const auto stride = node.param().stride;
const auto activation = node.param().activation;
- const auto param_padding = node.param().padding;
+ const auto &param_padding = node.param().padding;
const auto dilation = node.param().dilation;
auto fn = std::make_unique<ops::ConvolutionLayer>(_external_context);
@@ -142,7 +142,7 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node)
const auto ker_width = ker_shape.dim(2);
const auto dilation_width = node.param().dilation.width_factor;
const auto dilation_height = node.param().dilation.height_factor;
- const auto param_padding = node.param().padding;
+ const auto &param_padding = node.param().padding;
const auto padding = ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width,
ker_height, dilation_width, dilation_height);
const auto multiplier = node.param().multiplier;
diff --git a/runtime/onert/core/include/backend/basic/BackendContextHelpers.h b/runtime/onert/core/include/backend/basic/BackendContextHelpers.h
index 9992ca140..7588d42f0 100644
--- a/runtime/onert/core/include/backend/basic/BackendContextHelpers.h
+++ b/runtime/onert/core/include/backend/basic/BackendContextHelpers.h
@@ -63,7 +63,7 @@ template <typename T_BackendContext> void planTensors(const T_BackendContext &ct
if (!tensor_builder->isRegistered(ind))
{
// These tensors do not exist in any (No use and def)
- const auto info = obj.info();
+ const auto &info = obj.info();
// NOTE Currently we only support NHWC tensors for cpu-common tensors.
// There is no way to get the layout info from the backend context for now.
// When we support NCHW tensors as well, we also need to change tensor info to be
diff --git a/runtime/onert/core/src/backend/basic/TensorBuilder.cc b/runtime/onert/core/src/backend/basic/TensorBuilder.cc
index f9d83875d..4912af1f5 100644
--- a/runtime/onert/core/src/backend/basic/TensorBuilder.cc
+++ b/runtime/onert/core/src/backend/basic/TensorBuilder.cc
@@ -62,7 +62,7 @@ void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, const ir::Op
void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind)
{
assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
- const auto tensor_info = _tensor_info_map.at(ind);
+ const auto &tensor_info = _tensor_info_map.at(ind);
if (!_tensor_reg->getNativeTensor(ind)->is_dynamic())
{
diff --git a/runtime/onert/core/src/backend/builtin/TensorBuilder.cc b/runtime/onert/core/src/backend/builtin/TensorBuilder.cc
index fefae40d8..a2f7af3ea 100644
--- a/runtime/onert/core/src/backend/builtin/TensorBuilder.cc
+++ b/runtime/onert/core/src/backend/builtin/TensorBuilder.cc
@@ -57,7 +57,7 @@ void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind)
if (_tensor_info_map.find(ind) == _tensor_info_map.end()) // Do not proceed for user tensors
return;
- const auto tensor_info = _tensor_info_map.at(ind);
+ const auto &tensor_info = _tensor_info_map.at(ind);
if (!nativeOwnTensorAt(ind)->is_dynamic())
{
diff --git a/runtime/onert/core/src/compiler/HEScheduler.cc b/runtime/onert/core/src/compiler/HEScheduler.cc
index f662ef5b9..56e2208d6 100644
--- a/runtime/onert/core/src/compiler/HEScheduler.cc
+++ b/runtime/onert/core/src/compiler/HEScheduler.cc
@@ -409,7 +409,7 @@ int64_t HEScheduler::DFSChildrenMaxRank(const ir::OperationIndex &index)
int64_t HEScheduler::backendAvailableTime(const backend::Backend *backend,
const int64_t &starting_time, const int64_t &time_amount)
{
- const auto backend_times = _backends_avail_time.at(backend);
+ const auto &backend_times = _backends_avail_time.at(backend);
// finishing and starting times of an op, that will come after current op
auto next_op_fst = backend_times.upper_bound(starting_time);
// finishing time of an op, that will come before current op
diff --git a/runtime/onert/core/src/compiler/ShapeValidator.cc b/runtime/onert/core/src/compiler/ShapeValidator.cc
index 3e940f037..5c25ea1d1 100644
--- a/runtime/onert/core/src/compiler/ShapeValidator.cc
+++ b/runtime/onert/core/src/compiler/ShapeValidator.cc
@@ -227,9 +227,9 @@ void ShapeValidator::visit(const ir::operation::Reduce &node)
if (operands.at(output_index).info().isDynamic())
return;
- const auto input_index{node.getInputs().at(ir::operation::Reduce::Input::INPUT)};
- const auto input_shape = operands.at(input_index).shape();
- const auto output_shape = operands.at(output_index).shape();
+ const auto &input_index{node.getInputs().at(ir::operation::Reduce::Input::INPUT)};
+ const auto &input_shape = operands.at(input_index).shape();
+ const auto &output_shape = operands.at(output_index).shape();
OP_REQUIRES(input_shape.rank() <= 4);
OP_REQUIRES(output_shape.rank() <= input_shape.rank());
@@ -516,9 +516,9 @@ void ShapeValidator::visit(const ir::operation::Gather &node)
const auto ifm_index{node.getInputs().at(ir::operation::Gather::Input::INPUT)};
const auto indices_index{node.getInputs().at(ir::operation::Gather::Input::INDICES)};
- const auto ifm_shape = operands.at(ifm_index).shape();
- const auto indices_shape = operands.at(indices_index).shape();
- const auto ofm_shape = operands.at(ofm_index).shape();
+ const auto &ifm_shape = operands.at(ifm_index).shape();
+ const auto &indices_shape = operands.at(indices_index).shape();
+ const auto &ofm_shape = operands.at(ofm_index).shape();
OP_REQUIRES(ifm_shape.rank() <= 4);
OP_REQUIRES(indices_shape.rank() <= 3);
@@ -566,7 +566,7 @@ void ShapeValidator::visit(const ir::operation::Pack &node)
const auto output_rank = static_cast<int32_t>(output_shape.rank());
const auto input1_index{node.getInputs().at(0)};
- const auto input_shape = operands.at(input1_index).shape();
+ const auto &input_shape = operands.at(input1_index).shape();
OP_REQUIRES(axis >= -output_rank && axis < output_rank);
for (const auto &index : node.getInputs())
diff --git a/runtime/onert/core/src/compiler/StaticShapeInferer.cc b/runtime/onert/core/src/compiler/StaticShapeInferer.cc
index a25b326f1..68cff7e3a 100644
--- a/runtime/onert/core/src/compiler/StaticShapeInferer.cc
+++ b/runtime/onert/core/src/compiler/StaticShapeInferer.cc
@@ -524,11 +524,11 @@ void StaticShapeInferer::visit(const ir::operation::Fill &op)
assert(dims_buf);
const auto &dims_shape = shape.info().shape();
- auto new_shape = ((dims_type == ir::DataType::INT32)
- ? shape_inference::inferFillShape<int32_t>(
- dims_shape, reinterpret_cast<const int32_t *>(dims_buf))
- : shape_inference::inferFillShape<int64_t>(
- dims_shape, reinterpret_cast<const int64_t *>(dims_buf)));
+ const auto &new_shape = ((dims_type == ir::DataType::INT32)
+ ? shape_inference::inferFillShape<int32_t>(
+ dims_shape, reinterpret_cast<const int32_t *>(dims_buf))
+ : shape_inference::inferFillShape<int64_t>(
+ dims_shape, reinterpret_cast<const int64_t *>(dims_buf)));
output.info().shape(new_shape);
}
@@ -1088,8 +1088,8 @@ void StaticShapeInferer::visit(const ir::operation::SpaceToBatchND &op)
const auto output_index = op.getOutputs().at(0);
const auto input_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::INPUT)};
- const auto block_shape_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
- const auto padding_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
+ const auto &block_shape_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)};
+ const auto &padding_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)};
ir::Operand &output = operands.at(output_index);
const auto &input = operands.at(input_idx);
@@ -1103,9 +1103,9 @@ void StaticShapeInferer::visit(const ir::operation::SpaceToBatchND &op)
return;
}
- auto input_shape = input.info().shape();
- auto block_shape_shape = block_shape.info().shape();
- auto padding_shape = padding.info().shape();
+ const auto &input_shape = input.info().shape();
+ const auto &block_shape_shape = block_shape.info().shape();
+ const auto &padding_shape = padding.info().shape();
auto block_shape_data = reinterpret_cast<const int32_t *>(block_shape.data()->base());
auto padding_data = reinterpret_cast<const int32_t *>(padding.data()->base());
@@ -1325,7 +1325,7 @@ void StaticShapeInferer::visit(const ir::operation::While &op)
auto body_input_observer = _subg_input_observers.at(op.param().body_subg_index).get();
auto cond_input_observer = _subg_input_observers.at(op.param().cond_subg_index).get();
// re-sizing input shapes of body subgraph
- const auto inputs = op.getInputs();
+ const auto &inputs = op.getInputs();
std::vector<ir::OperandInfo> inputs_info;
const auto &graph = _lowered_subg->graph();
for (size_t i = 0; i < inputs.size(); ++i)
@@ -1401,9 +1401,7 @@ void StaticShapeInferer::visit(const ir::operation::Bulk &op)
const auto output_idx = op.getOutputs().at(0);
ir::Operand &output = operands.at(output_idx);
- auto cur_input_shape = input.info().shape();
- auto origin_input_shape = op.param().origin_input_shapes[0];
- auto cur_output_shape = output.info().shape();
+ const auto &cur_input_shape = input.info().shape();
auto origin_output_shape = op.param().origin_output_shapes[0];
// TODO: more check for valid batch request
diff --git a/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc b/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc
index 39eb803f5..1657c0c8f 100644
--- a/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc
+++ b/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc
@@ -87,7 +87,7 @@ void PermutationInsertionPass::callback(const ir::OperandIndex &index, ir::Opera
const auto op_layout = op_li->layout();
const backend::Backend *backend = op_li->backend();
assert(backend);
- auto use_node_inputs = operation.getInputs();
+ const auto &use_node_inputs = operation.getInputs();
assert(use_node_inputs.contains(index));
auto new_index = factor_to_index.at({backend, op_layout});
diff --git a/runtime/onert/core/src/exec/DynamicShapeInferer.cc b/runtime/onert/core/src/exec/DynamicShapeInferer.cc
index 78b21cf49..4cbf2fe64 100644
--- a/runtime/onert/core/src/exec/DynamicShapeInferer.cc
+++ b/runtime/onert/core/src/exec/DynamicShapeInferer.cc
@@ -423,11 +423,11 @@ void DynamicShapeInferer::visit(const ir::operation::Fill &op)
assert(dims_buf);
const auto &dims_shape = shape->getShape();
- auto output_shape = ((dims_type == ir::DataType::INT32)
- ? shape_inference::inferFillShape<int32_t>(
- dims_shape, reinterpret_cast<const int32_t *>(dims_buf))
- : shape_inference::inferFillShape<int64_t>(
- dims_shape, reinterpret_cast<const int64_t *>(dims_buf)));
+ const auto &output_shape = ((dims_type == ir::DataType::INT32)
+ ? shape_inference::inferFillShape<int32_t>(
+ dims_shape, reinterpret_cast<const int32_t *>(dims_buf))
+ : shape_inference::inferFillShape<int64_t>(
+ dims_shape, reinterpret_cast<const int64_t *>(dims_buf)));
output->applyShape(output_shape);
assert(output->buffer() != nullptr);
diff --git a/runtime/onert/core/src/exec/Execution.cc b/runtime/onert/core/src/exec/Execution.cc
index 1384c9fdc..f51bed820 100644
--- a/runtime/onert/core/src/exec/Execution.cc
+++ b/runtime/onert/core/src/exec/Execution.cc
@@ -48,7 +48,7 @@ void Execution::changeInputShape(const ir::IOIndex &index, const ir::Shape &new_
void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t length,
ir::Layout layout)
{
- const auto info = _executors->inputInfo(index);
+ const auto &info = _executors->inputInfo(index);
// TODO handle when (!buffer && length != 0) : setting the input as an optional tensor
@@ -88,7 +88,7 @@ void Execution::setInput(const ir::IOIndex &index, const ir::TypeInfo &type, con
// TODO Remove default parameter
void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout)
{
- const auto info = _executors->outputInfo(index);
+ const auto &info = _executors->outputInfo(index);
if (length < info.total_size())
{
@@ -102,7 +102,7 @@ void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length,
void Execution::setOutput(const ir::IOIndex &index, const ir::TypeInfo &type,
const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout)
{
- auto info = ir::OperandInfo::createStaticInfo(shape, type);
+ const auto &info = ir::OperandInfo::createStaticInfo(shape, type);
if (length < info.total_size())
{
diff --git a/runtime/onert/core/src/exec/ExecutorBase.cc b/runtime/onert/core/src/exec/ExecutorBase.cc
index ad0073477..0bc088b02 100644
--- a/runtime/onert/core/src/exec/ExecutorBase.cc
+++ b/runtime/onert/core/src/exec/ExecutorBase.cc
@@ -66,8 +66,8 @@ void ExecutorBase::execute(const std::vector<backend::IPortableTensor *> &inputs
assert(input_tensor != nullptr);
if (input != nullptr)
{
- const auto orig_input_shape = input_tensor->orig_info().shape();
- const auto changed_input_shape =
+ const auto &orig_input_shape = input_tensor->orig_info().shape();
+ const auto &changed_input_shape =
convertShape(input->getShape(), input->layout(), input_tensor->orig_layout());
if (input_tensor->get_info().shape() != changed_input_shape)
{
diff --git a/runtime/onert/core/src/ir/Graph.cc b/runtime/onert/core/src/ir/Graph.cc
index ef0f988fa..306572c99 100644
--- a/runtime/onert/core/src/ir/Graph.cc
+++ b/runtime/onert/core/src/ir/Graph.cc
@@ -168,7 +168,7 @@ void Graph::verify(void) const
void Graph::initializeUseDef()
{
operations().iterate([&](const OperationIndex &index, const IOperation &node) -> void {
- auto outputs = node.getOutputs();
+ const auto &outputs = node.getOutputs();
for (auto &&output : outputs | ir::Remove::UNDEFINED)
{
operands().at(output).setDef(index);
diff --git a/runtime/onert/core/src/ir/OperationValidator.cc b/runtime/onert/core/src/ir/OperationValidator.cc
index cf7323d77..09f773cf0 100644
--- a/runtime/onert/core/src/ir/OperationValidator.cc
+++ b/runtime/onert/core/src/ir/OperationValidator.cc
@@ -213,7 +213,7 @@ void OperationValidator::visit(const operation::DepthToSpace &node)
void OperationValidator::visit(const operation::DetectionPostProcess &node)
{
- auto param = node.param();
+ const auto &param = node.param();
// FIXME: number of classes should be 1 for now.
OP_REQUIRES(param.num_classes == 1);
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc
index 21c7cdd6f..2265e990f 100644
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc
+++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc
@@ -64,7 +64,7 @@ bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType
{
try
{
- const auto operand_type = _execution->primary_subgraph().operands().at(index).typeInfo();
+ const auto &operand_type = _execution->primary_subgraph().operands().at(index).typeInfo();
const auto typeInfo = NNAPIConvert::getTypeInfo(type);
if (operand_type != typeInfo)
@@ -111,7 +111,7 @@ bool ANeuralNetworksExecution::IsOptionalInput(const onert::ir::OperandIndex ind
bool ANeuralNetworksExecution::hasUnspecifiedDims(const onert::ir::OperandIndex index) noexcept
{
- const auto operand_shape = _execution->primary_subgraph().operands().at(index).shape();
+ const auto &operand_shape = _execution->primary_subgraph().operands().at(index).shape();
return operand_shape.hasUnspecifiedDims();
}
@@ -138,7 +138,7 @@ bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOpe
onert::ir::IOIndex input_index{index};
const auto operand_index = getInputOperandIndex(index);
- const auto type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo();
+ const auto &type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo();
const auto shape = (type != nullptr)
? NNAPIConvert::getShape(type)
: _execution->primary_subgraph().operands().at(operand_index).shape();
@@ -171,7 +171,6 @@ bool ANeuralNetworksExecution::setOptionalInput(uint32_t index,
onert::ir::IOIndex input_index{index};
const auto operand_index = getInputOperandIndex(index);
- const auto type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo();
const auto shape = (type != nullptr)
? NNAPIConvert::getShape(type)
: _execution->primary_subgraph().operands().at(operand_index).shape();
@@ -206,7 +205,7 @@ bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOp
onert::ir::IOIndex output_index{index};
const auto operand_index = getOutputOperandIndex(index);
- const auto type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo();
+ const auto &type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo();
const auto shape = (type != nullptr)
? NNAPIConvert::getShape(type)
: _execution->primary_subgraph().operands().at(operand_index).shape();