diff options
15 files changed, 24 insertions, 24 deletions
diff --git a/compiler/luci/export/src/CircleExportMetadata.cpp b/compiler/luci/export/src/CircleExportMetadata.cpp index 017002f5c..25d0168ec 100644 --- a/compiler/luci/export/src/CircleExportMetadata.cpp +++ b/compiler/luci/export/src/CircleExportMetadata.cpp @@ -56,7 +56,7 @@ const std::vector<uint8_t> CircleExportMetadata::encoded_execution_plan_table() const auto id = kv.first; write_u32(data, id); - const auto plan_vector = kv.second; + const auto &plan_vector = kv.second; const auto size = plan_vector.size(); write_u32(data, size); @@ -81,7 +81,7 @@ const std::vector<uint8_t> CircleExportMetadata::encoded_source_table(void) const auto id = kv.first; write_u32(data, id); - const auto origin_name = kv.second; + const auto &origin_name = kv.second; const auto length = origin_name.length(); write_u32(data, length + 1); // name + '\0 @@ -107,7 +107,7 @@ const std::vector<uint8_t> CircleExportMetadata::encoded_op_table(void) const auto id = kv.first; write_u32(data, id); - const auto origins = kv.second; + const auto &origins = kv.second; const auto node_num = origins.size(); write_u32(data, node_num); diff --git a/compiler/luci/import/src/CircleImportMetadata.cpp b/compiler/luci/import/src/CircleImportMetadata.cpp index 9c1fe7356..fbdea8a7c 100644 --- a/compiler/luci/import/src/CircleImportMetadata.cpp +++ b/compiler/luci/import/src/CircleImportMetadata.cpp @@ -236,7 +236,7 @@ const OriginTable CircleImportMetadata::origin_table(void) std::vector<std::shared_ptr<CircleNodeOrigin>> origins; for (auto source_id : source_ids) { - const auto source_name = _source_table.at(source_id); + const auto &source_name = _source_table.at(source_id); origins.push_back(single_origin(source_id, source_name)); } diff --git a/compiler/luci/partition/src/PartitionIRDump.cpp b/compiler/luci/partition/src/PartitionIRDump.cpp index 0fabfc416..5a78d99c0 100644 --- a/compiler/luci/partition/src/PartitionIRDump.cpp +++ b/compiler/luci/partition/src/PartitionIRDump.cpp @@ -56,7 +56,7 @@ void dump(std::ostream &os, const PGroups *pgroups) for (auto it = pgroups->node2group.begin(); it != pgroups->node2group.end(); ++it) { auto node = it->first; - auto group = it->second; + auto &group = it->second; os << " Node: " << node << "(" << luci::opcode_name(node) << "," << node->name() << "): " << group << std::endl; } diff --git a/compiler/luci/pass/src/CircleQuantizer.cpp b/compiler/luci/pass/src/CircleQuantizer.cpp index 9039a839f..6db26d179 100644 --- a/compiler/luci/pass/src/CircleQuantizer.cpp +++ b/compiler/luci/pass/src/CircleQuantizer.cpp @@ -352,7 +352,7 @@ void CircleQuantizer::quantize(loco::Graph *g) const // Check dtype/granularity of layer params for (auto layer_param : layer_params) { - auto name = layer_param->name; + const auto &name = layer_param->name; if (!in_array(to_lower_case(layer_param->dtype), fakeq_supported_output_model_dtype)) { throw std::runtime_error("Unsupported dtype in " + name + ". List of supported dtype: " + diff --git a/compiler/luci/pass/src/CopyQuantParamPass.cpp b/compiler/luci/pass/src/CopyQuantParamPass.cpp index 9b1bb0ea9..0984fe85c 100644 --- a/compiler/luci/pass/src/CopyQuantParamPass.cpp +++ b/compiler/luci/pass/src/CopyQuantParamPass.cpp @@ -59,8 +59,8 @@ bool CopyQuantParamPass::run(loco::Graph *g) for (uint32_t i = 0; i < _src_tensors.size(); i++) { - auto src = _src_tensors[i]; - auto dst = _dst_tensors[i]; + auto &src = _src_tensors[i]; + auto &dst = _dst_tensors[i]; auto nodes = get_src_dst(src, dst); if (not nodes.src) diff --git a/compiler/luci/pass/src/SubstituteSqueezeToReshapePass.cpp b/compiler/luci/pass/src/SubstituteSqueezeToReshapePass.cpp index df7266df9..9bc764f92 100644 --- a/compiler/luci/pass/src/SubstituteSqueezeToReshapePass.cpp +++ b/compiler/luci/pass/src/SubstituteSqueezeToReshapePass.cpp @@ -119,7 +119,7 @@ bool substitute_squeeze_to_reshape(luci::CircleSqueeze *squeeze) if (squeeze->shape_status() != luci::ShapeStatus::VALID) return false; - auto squeeze_dims = squeeze->squeeze_dims(); + auto &squeeze_dims = squeeze->squeeze_dims(); if (not is_valid_input(input, squeeze_dims)) throw std::runtime_error("Invalid values in squeeze_dims: " + squeeze->name()); diff --git a/compiler/luci/pass/src/helpers/LayerInfoMap.cpp b/compiler/luci/pass/src/helpers/LayerInfoMap.cpp index ac07f9ec9..37d8e18e9 100644 --- a/compiler/luci/pass/src/helpers/LayerInfoMap.cpp +++ b/compiler/luci/pass/src/helpers/LayerInfoMap.cpp @@ -145,7 +145,7 @@ LayerInfoMap layer_info_map(loco::Graph *g, std::vector<LayerInfo> &layers_info) for (auto &&info : layers_info) { - auto name = info.name; + auto &name = info.name; bool found = false; for (auto node : loco::active_nodes(loco::output_nodes(g))) { diff --git a/compiler/luci/service/src/ChangeOutputs.cpp b/compiler/luci/service/src/ChangeOutputs.cpp index 1f8000061..65175530c 100644 --- a/compiler/luci/service/src/ChangeOutputs.cpp +++ b/compiler/luci/service/src/ChangeOutputs.cpp @@ -72,7 +72,7 @@ void change_outputs(loco::Graph *graph, const std::vector<std::string> &new_outp auto output = luci::output_node(graph, out); // output is CircleOutput assert(output != nullptr); - auto node_name = new_outputs.at(out); + auto &node_name = new_outputs.at(out); auto node = named_nodes[node_name]; assert(node != nullptr); diff --git a/compiler/luci/service/src/ShapeInfer_StridedSlice.cpp b/compiler/luci/service/src/ShapeInfer_StridedSlice.cpp index 33490db19..5a22da319 100644 --- a/compiler/luci/service/src/ShapeInfer_StridedSlice.cpp +++ b/compiler/luci/service/src/ShapeInfer_StridedSlice.cpp @@ -398,7 +398,7 @@ loco::TensorShape infer_output_shape(const CircleStridedSlice *node) StridedSliceContext op_context(node); auto op_params = BuildStridedSliceParams(&op_context); - auto effective_input_shape = op_context.effective_input_shape; + auto &effective_input_shape = op_context.effective_input_shape; std::vector<int64_t> output_shape_vector; for (int32_t idx = effective_input_shape.rank() - 1; idx >= 0; --idx) diff --git a/compute/cker/include/cker/operation/Helper/MatmulBCast.h b/compute/cker/include/cker/operation/Helper/MatmulBCast.h index b80ccc0d0..b7d639433 100644 --- a/compute/cker/include/cker/operation/Helper/MatmulBCast.h +++ b/compute/cker/include/cker/operation/Helper/MatmulBCast.h @@ -62,13 +62,13 @@ public: if (!_batch_bcast->IsValid()) return; - auto x_reshaped = _batch_bcast->x_reshape(); - auto y_reshaped = _batch_bcast->y_reshape(); + const auto &x_reshaped = _batch_bcast->x_reshape(); + const auto &y_reshaped = _batch_bcast->y_reshape(); auto output_shape = _batch_bcast->output_shape(); _x_batch_size = std::accumulate(x_reshaped.cbegin(), x_reshaped.cend(), INT32_C(1), std::multiplies<int32_t>()); - _y_batch_size = std::accumulate(x_reshaped.cbegin(), x_reshaped.cend(), INT32_C(1), + _y_batch_size = std::accumulate(y_reshaped.cbegin(), y_reshaped.cend(), INT32_C(1), std::multiplies<int32_t>()); _output_shape.ReplaceWith(output_shape.size(), output_shape.data()); _output_batch_size = _output_shape.FlatSize(); diff --git a/runtime/libs/benchmark/src/Result.cpp b/runtime/libs/benchmark/src/Result.cpp index 04925f4d2..8c1e2d2ea 100644 --- a/runtime/libs/benchmark/src/Result.cpp +++ b/runtime/libs/benchmark/src/Result.cpp @@ -157,16 +157,16 @@ namespace benchmark Result::Result(const Phases &phases) { - const auto option = phases.option(); + const auto &option = phases.option(); { for (int i = PhaseEnum::MODEL_LOAD; i <= PhaseEnum::PREPARE; ++i) { - auto phase = phases.at(gPhaseStrings[i]); + const auto &phase = phases.at(gPhaseStrings[i]); time[i][FigureType::MEAN] = averageTimeMs(phase); } int i = PhaseEnum::EXECUTE; - auto exec_phase = phases.at(gPhaseStrings[i]); + const auto &exec_phase = phases.at(gPhaseStrings[i]); time[i][FigureType::MEAN] = averageTimeMs(exec_phase); time[i][FigureType::MAX] = maxTimeMs(exec_phase); time[i][FigureType::MIN] = minTimeMs(exec_phase); diff --git a/runtime/onert/backend/cpu/KernelGenerator.cc b/runtime/onert/backend/cpu/KernelGenerator.cc index d462daf5c..dff54c1de 100644 --- a/runtime/onert/backend/cpu/KernelGenerator.cc +++ b/runtime/onert/backend/cpu/KernelGenerator.cc @@ -307,7 +307,7 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) const auto stride = node.param().stride; const auto activation = node.param().activation; - const auto param_padding = node.param().padding; + const auto ¶m_padding = node.param().padding; const auto dilation = node.param().dilation; auto fn = std::make_unique<ops::ConvolutionLayer>(); @@ -629,7 +629,7 @@ void KernelGenerator::visit(const ir::operation::Einsum &node) for (const auto &ifm_idx : node.getInputs()) input_tensors.emplace_back(_tensor_reg->getPortableTensor(ifm_idx)); - const auto equation = node.param().equation; + const auto &equation = node.param().equation; auto fn = std::make_unique<ops::EinsumLayer>(); diff --git a/runtime/onert/backend/gpu_cl/KernelGenerator.cc b/runtime/onert/backend/gpu_cl/KernelGenerator.cc index 31d3134e6..de8d3b463 100644 --- a/runtime/onert/backend/gpu_cl/KernelGenerator.cc +++ b/runtime/onert/backend/gpu_cl/KernelGenerator.cc @@ -110,7 +110,7 @@ void KernelGenerator::get_operation(FunctionMap &Functions) absl::Status KernelGenerator::readConstTensor(const ir::OperandIndex &index, tflite::gpu::TensorOrScalar *param) { - const auto shape = _ctx.at(index).shape(); + const auto &shape = _ctx.at(index).shape(); if (shape.rank() == 0 && shape.num_elements() == 1) { tflite::gpu::Tensor<tflite::gpu::Scalar, tflite::gpu::DataType::FLOAT32> tensor; @@ -514,7 +514,7 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node) { std::unique_ptr<tflite::gpu::GPUOperation> gpu_op_1; tflite::gpu::OperationDef op_def_1; - const auto shape = _ctx.at(ofm_index).shape(); + const auto &shape = _ctx.at(ofm_index).shape(); auto new_ind = _tensor_reg->addNewClTensor(shape); addClNode({ifm_index}, {new_ind}, std::move(gpu_op)); diff --git a/runtime/onert/core/src/compiler/StaticShapeInferer.cc b/runtime/onert/core/src/compiler/StaticShapeInferer.cc index 68cff7e3a..ec5d2146b 100644 --- a/runtime/onert/core/src/compiler/StaticShapeInferer.cc +++ b/runtime/onert/core/src/compiler/StaticShapeInferer.cc @@ -803,7 +803,7 @@ void StaticShapeInferer::visit(const ir::operation::Permute &op) // However, it is not applied here, so input/output have the same layout of frontend. Because // "ExecutorFactory" would convert shape of input/output accoding to the layouts when registering // operand info to "TensorBuilder" after calling "StaticShapeInferer" - const auto new_shape = input.info().shape(); + const auto &new_shape = input.info().shape(); output.info().shape(new_shape); } diff --git a/runtime/onert/core/src/dumper/dot/OperandNode.cc b/runtime/onert/core/src/dumper/dot/OperandNode.cc index 88f5254f3..49319d595 100644 --- a/runtime/onert/core/src/dumper/dot/OperandNode.cc +++ b/runtime/onert/core/src/dumper/dot/OperandNode.cc @@ -35,7 +35,7 @@ Operand::Operand(const ir::OperandIndex &index, Type type) : Node{"operand" + std::to_string(index.value())} { { - auto type_to_shape = [](Type type) { + auto type_to_shape = [](Type type) -> const auto & { switch (type) { case Type::MODEL_INPUT: |