summaryrefslogtreecommitdiff
path: root/compiler/nnc
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/nnc')
-rw-r--r--compiler/nnc/backends/acl_soft_backend/AclCppGenerator.cpp2
-rw-r--r--compiler/nnc/backends/acl_soft_backend/AclCppOpGenerator.cpp90
-rw-r--r--compiler/nnc/backends/acl_soft_backend/ArtifactModel.cpp4
-rw-r--r--compiler/nnc/backends/acl_soft_backend/ArtifactModel.h18
-rw-r--r--compiler/nnc/backends/interpreter/InterpreterBackend.cpp10
-rw-r--r--compiler/nnc/backends/soft_backend/CPPGenerator.cpp69
-rw-r--r--compiler/nnc/backends/soft_backend/ModelAnalyzer.cpp4
-rw-r--r--compiler/nnc/backends/soft_backend/ModelAnalyzer.h6
-rw-r--r--compiler/nnc/backends/soft_backend/SequencedIR.h6
-rw-r--r--compiler/nnc/driver/Options.cpp22
-rw-r--r--compiler/nnc/include/pass/PassData.h15
-rw-r--r--compiler/nnc/include/passes/optimizations/CombineTransposes.h1
-rw-r--r--compiler/nnc/include/passes/optimizations/OptimizationUtils.h10
-rw-r--r--compiler/nnc/include/support/CommandLine.h6
-rw-r--r--compiler/nnc/passes/optimizations/CombineTransposes.cpp4
-rw-r--r--compiler/nnc/passes/optimizations/DeadCodeElimination.cpp4
-rw-r--r--compiler/nnc/passes/optimizations/FuseArithmeticOps.cpp4
-rw-r--r--compiler/nnc/passes/transformations/DataFormatSwitcher.cpp10
-rw-r--r--compiler/nnc/passes/transformations/LowerConv2D.cpp4
-rw-r--r--compiler/nnc/tests/acl_soft_backend/AclCppOperations.cpp2
-rw-r--r--compiler/nnc/tests/acl_soft_backend/artifact_cmake/main.cpp26
-rw-r--r--compiler/nnc/tests/soft_backend/CompileCPP.cpp2
-rw-r--r--compiler/nnc/unittests/acl_backend/DOMToText.cpp38
-rw-r--r--compiler/nnc/unittests/acl_backend/MIRToDOM.cpp30
-rw-r--r--compiler/nnc/unittests/optimizations/SinkTest.cpp4
-rw-r--r--compiler/nnc/unittests/soft_backend/CPPOperations.cpp45
-rw-r--r--compiler/nnc/unittests/support/CommandLineTest.cpp48
-rw-r--r--compiler/nnc/unittests/transformations/Switcher.cpp4
28 files changed, 251 insertions, 237 deletions
diff --git a/compiler/nnc/backends/acl_soft_backend/AclCppGenerator.cpp b/compiler/nnc/backends/acl_soft_backend/AclCppGenerator.cpp
index 3a5b9ecaf..cad05cc1d 100644
--- a/compiler/nnc/backends/acl_soft_backend/AclCppGenerator.cpp
+++ b/compiler/nnc/backends/acl_soft_backend/AclCppGenerator.cpp
@@ -30,7 +30,7 @@ using namespace std;
namespace fs = boost::filesystem;
AclCppCodeGenerator::AclCppCodeGenerator(string output_dir, string artifact_name)
- : _output_dir(std::move(output_dir)), _artifact_name(std::move(artifact_name))
+ : _output_dir(std::move(output_dir)), _artifact_name(std::move(artifact_name))
{
}
diff --git a/compiler/nnc/backends/acl_soft_backend/AclCppOpGenerator.cpp b/compiler/nnc/backends/acl_soft_backend/AclCppOpGenerator.cpp
index b5e3734ae..0abe3ec72 100644
--- a/compiler/nnc/backends/acl_soft_backend/AclCppOpGenerator.cpp
+++ b/compiler/nnc/backends/acl_soft_backend/AclCppOpGenerator.cpp
@@ -33,8 +33,8 @@ using namespace std;
using namespace mir;
AclCppOpGenerator::AclCppOpGenerator(const string &name, ostream &par_out)
- : _parOut(par_out), _module(name), _constrBlock(nullptr), _infBlock(nullptr),
- _clScheduler(AF::id("arm_compute::CLScheduler"))
+ : _parOut(par_out), _module(name), _constrBlock(nullptr), _infBlock(nullptr),
+ _clScheduler(AF::id("arm_compute::CLScheduler"))
{
}
@@ -60,13 +60,14 @@ const ArtifactModule &AclCppOpGenerator::generate(mir::Graph *g)
_parInVar = _artifactClass->var(false, "std::ifstream", "_parIn");
_parIn = _parInVar->use();
string par_file_name = _module.name() + ".par";
- _constrBlock->call("open", {AF::lit("\"" + par_file_name + "\""),
- AF::lit("std::ios_base::in | std::ios_base::binary")},
- _parIn);
+ _constrBlock->call(
+ "open",
+ {AF::lit("\"" + par_file_name + "\""), AF::lit("std::ios_base::in | std::ios_base::binary")},
+ _parIn);
auto file_fail = _constrBlock->ifCond(AF::call("fail", {}, _parIn));
auto file_fail_block = file_fail->getBlock();
file_fail_block->addStatement(
- AF::lit("throw std::string(\"Failed to open file: " + par_file_name + " for reading\")"));
+ AF::lit("throw std::string(\"Failed to open file: " + par_file_name + " for reading\")"));
// Traverse the computational graph.
g->accept(this);
@@ -89,8 +90,8 @@ void AclCppOpGenerator::visit(ops::ConcatOp &op)
const auto *ir_output = op.getOutput(0);
static const char *axis_names[] = {
- "arm_compute::DataLayoutDimension::BATCHES", "arm_compute::DataLayoutDimension::CHANNEL",
- "arm_compute::DataLayoutDimension::HEIGHT", "arm_compute::DataLayoutDimension::WIDTH"};
+ "arm_compute::DataLayoutDimension::BATCHES", "arm_compute::DataLayoutDimension::CHANNEL",
+ "arm_compute::DataLayoutDimension::HEIGHT", "arm_compute::DataLayoutDimension::WIDTH"};
int axis = op.getAxis();
assert(axis >= 0 && axis < static_cast<int>(sizeof(axis_names) / sizeof(axis_names[0])) &&
@@ -105,8 +106,8 @@ void AclCppOpGenerator::visit(ops::ConcatOp &op)
for (const Operation::Output *ir_input : ir_inputs)
_constrBlock->call("push_back", {AF::ref(AF::id(tensorName(ir_input)))}, inputs);
- auto layer = genLayer("arm_compute::CLConcatenateLayer", prefix,
- {inputs, AF::ref(out), AF::lit(axis_name)});
+ auto layer =
+ genLayer("arm_compute::CLConcatenateLayer", prefix, {inputs, AF::ref(out), AF::lit(axis_name)});
addToPersistentTensors(out);
genLayerExecution(layer);
@@ -214,13 +215,13 @@ shared_ptr<ArtifactVariable> AclCppOpGenerator::genPadStrideInfo(const Op &op, c
string var_name = prefix + "_pad_stride_info";
list<std::shared_ptr<ArtifactExpr>> var_init_params = {
- AF::lit(to_string(strides.dim(1))),
- AF::lit(to_string(strides.dim(0))),
- AF::lit(to_string(padding_before.at(1))),
- AF::lit(to_string(padding_after.at(1))),
- AF::lit(to_string(padding_before.at(0))),
- AF::lit(to_string(padding_after.at(0))),
- AF::lit("arm_compute::DimensionRoundingType::FLOOR")};
+ AF::lit(to_string(strides.dim(1))),
+ AF::lit(to_string(strides.dim(0))),
+ AF::lit(to_string(padding_before.at(1))),
+ AF::lit(to_string(padding_after.at(1))),
+ AF::lit(to_string(padding_before.at(0))),
+ AF::lit(to_string(padding_after.at(0))),
+ AF::lit("arm_compute::DimensionRoundingType::FLOOR")};
auto pad_stride_info_var = block->var(type_name, var_name, {}, var_init_params);
@@ -316,7 +317,7 @@ static bool shouldSerializeConstant(const ops::ConstantOp &op)
// themselves,
// so we don't serialize them here, also we don't serialize tensors from dangling ConstantOp
static std::map<Operation::Type, std::size_t> self_serializing_ops_to_inputs{
- {Operation::Type::conv2D, 1}, {Operation::Type::fullyConnected, 1}};
+ {Operation::Type::conv2D, 1}, {Operation::Type::fullyConnected, 1}};
for (Operation::Use use : op.getOutput(0)->getUses())
{
@@ -420,8 +421,8 @@ void AclCppOpGenerator::visit(ops::PadOp &op)
for (int i = 0; i < ir_input->getShape().rank(); ++i)
{
auto pad_var = _constrBlock->var(
- "arm_compute::PaddingInfo", prefix + "_pad_" + to_string(i), {},
- {AF::lit(to_string(padding_before[i])), AF::lit(to_string(padding_after[i]))});
+ "arm_compute::PaddingInfo", prefix + "_pad_" + to_string(i), {},
+ {AF::lit(to_string(padding_before[i])), AF::lit(to_string(padding_after[i]))});
auto pad = pad_var->use();
_constrBlock->call("push_back", {pad}, pad_list);
}
@@ -430,7 +431,7 @@ void AclCppOpGenerator::visit(ops::PadOp &op)
// FIXME Set up the `constant_value` parameter.
assert(op.getPaddingValue() == 0.0f);
auto layer =
- genLayer("arm_compute::CLPadLayer", prefix, {AF::ref(input), AF::ref(out), pad_list});
+ genLayer("arm_compute::CLPadLayer", prefix, {AF::ref(input), AF::ref(out), pad_list});
genLayerExecution(layer);
}
@@ -449,7 +450,7 @@ void AclCppOpGenerator::genPooling(Op &op, const std::string &pooling_type, bool
// Transpose data from MIR format to format compatible with ACL
const string transposed_input_name = output_tensor_name + "transposed_input";
shared_ptr<ArtifactId> transposed_input =
- genTransposeMIRtoACL(transposed_input_name, ir_input->getShape(), in_id);
+ genTransposeMIRtoACL(transposed_input_name, ir_input->getShape(), in_id);
const string layer_name = output_tensor_name + "_pooling_layer";
@@ -459,31 +460,31 @@ void AclCppOpGenerator::genPooling(Op &op, const std::string &pooling_type, bool
// Create kernel window info
shared_ptr<ArtifactVariable> kernel_window_var = _constrBlock->var(
- "arm_compute::Size2D", layer_name + "_kernel_window", {},
- {AF::lit(to_string(op.getWindowSize()[1])), AF::lit(to_string(op.getWindowSize()[0]))});
+ "arm_compute::Size2D", layer_name + "_kernel_window", {},
+ {AF::lit(to_string(op.getWindowSize()[1])), AF::lit(to_string(op.getWindowSize()[0]))});
shared_ptr<ArtifactId> kernel_window = kernel_window_var->use();
// Create pooling info: pooling type, kernel info, strides, etc
shared_ptr<ArtifactVariable> pooling_info_var =
- _constrBlock->var("arm_compute::PoolingLayerInfo", layer_name + "_pooling_info", {},
- {AF::lit(pooling_type), kernel_window, pad_stride_info,
- AF::lit(exclude_padding ? "true" : "false")});
+ _constrBlock->var("arm_compute::PoolingLayerInfo", layer_name + "_pooling_info", {},
+ {AF::lit(pooling_type), kernel_window, pad_stride_info,
+ AF::lit(exclude_padding ? "true" : "false")});
shared_ptr<ArtifactId> pooling_info = pooling_info_var->use();
// Generate auxiliary tensor to hold transposed output of pool in NCHW format
Shape transposed_output_shape = transposeShape<0, 3, 1, 2>(ir_output->getShape());
shared_ptr<ArtifactId> transposed_output =
- genTensor(layer_name + "_out_transpose", transposed_output_shape);
+ genTensor(layer_name + "_out_transpose", transposed_output_shape);
// Actual layer creation
shared_ptr<ArtifactId> layer =
- genLayer("arm_compute::CLPoolingLayer", layer_name,
- {AF::ref(transposed_input), AF::ref(transposed_output), pooling_info});
+ genLayer("arm_compute::CLPoolingLayer", layer_name,
+ {AF::ref(transposed_input), AF::ref(transposed_output), pooling_info});
genTensorAllocation(_infBlock, transposed_output);
genLayerExecution(layer);
shared_ptr<ArtifactId> output =
- genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output);
+ genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output);
genTensorDeallocation(_infBlock, transposed_input);
genTensorDeallocation(_infBlock, transposed_output);
@@ -521,13 +522,13 @@ void AclCppOpGenerator::genConvolution(Op &op, const string &acl_func_name, cons
// Generate auxiliary tensor to hold transposed input of convolution in NCHW format
shared_ptr<ArtifactId> transposed_input =
- genTransposeMIRtoACL(output_tensor_name + "_transposed_input", ir_input->getShape(), input);
+ genTransposeMIRtoACL(output_tensor_name + "_transposed_input", ir_input->getShape(), input);
// Create the transposed output tensor in the DOM.
const string transposed_output_name = output_tensor_name + "_transposed_output";
Shape transposed_output_shape = transposeShape<0, 3, 1, 2>(ir_output->getShape());
shared_ptr<ArtifactId> transposed_output =
- genTensor(transposed_output_name, transposed_output_shape);
+ genTensor(transposed_output_name, transposed_output_shape);
string operation_name = output_tensor_name + suffix;
@@ -564,7 +565,7 @@ void AclCppOpGenerator::genConvolution(Op &op, const string &acl_func_name, cons
// Generate auxiliar tensor to hold transposed output of convolution in NHWC format
shared_ptr<ArtifactId> output =
- genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output);
+ genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output);
genTensorDeallocation(_infBlock, transposed_input);
genTensorDeallocation(_infBlock, transposed_output);
@@ -589,9 +590,9 @@ void AclCppOpGenerator::genActivation(const Operation &op, const std::string &ac
// constructor. This instance profide information about the concrete activation function,
// like: ReLU, Tanh etc and two optional parameter (alpha and betha) needed by some activations.
auto activation_info_var = _constrBlock->var(
- "arm_compute::ActivationLayerInfo", prefix + "_activation_info", {},
- {AF::lit("arm_compute::ActivationLayerInfo::ActivationFunction::" + activation_name),
- AF::lit(to_string(a)), AF::lit(to_string(b))});
+ "arm_compute::ActivationLayerInfo", prefix + "_activation_info", {},
+ {AF::lit("arm_compute::ActivationLayerInfo::ActivationFunction::" + activation_name),
+ AF::lit(to_string(a)), AF::lit(to_string(b))});
auto activation_info = activation_info_var->use();
// Create an instance of the CLActivationLayer class as a member of the artifact class.
@@ -619,9 +620,10 @@ shared_ptr<ArtifactId> AclCppOpGenerator::genAddition(const string &prefix, size
auto arithmetic_add_layer = arithmetic_add_layer_var->use();
// Generate the call: arithmetic_add_layer.configure(&in1, &in2, &out);
- _constrBlock->call("configure", {AF::ref(in1), AF::ref(in2), AF::ref(out),
- AF::lit("arm_compute::ConvertPolicy::WRAP")},
- arithmetic_add_layer);
+ _constrBlock->call(
+ "configure",
+ {AF::ref(in1), AF::ref(in2), AF::ref(out), AF::lit("arm_compute::ConvertPolicy::WRAP")},
+ arithmetic_add_layer);
// Generate the call: arithmetic_add_layer.run();
_infBlock->call("run", {}, arithmetic_add_layer);
@@ -696,8 +698,8 @@ string AclCppOpGenerator::tensorName(const Operation::Output *ir_tensor) const
if (!tensor_name.empty())
{
tensor_name = "_" + tensor_name;
- replace_if(tensor_name.begin(), tensor_name.end(), [](char c) { return std::isalnum(c) == 0; },
- '_');
+ replace_if(
+ tensor_name.begin(), tensor_name.end(), [](char c) { return std::isalnum(c) == 0; }, '_');
}
else
{
@@ -740,7 +742,7 @@ shared_ptr<ArtifactId> AclCppOpGenerator::genTensor(const string &name, const Sh
const char *type_name = "arm_compute::TensorShape";
shared_ptr<ArtifactId> shape =
- genVectorInitializedVar(_constrBlock, type_name, name + "_shape", shape_vectorized);
+ genVectorInitializedVar(_constrBlock, type_name, name + "_shape", shape_vectorized);
_constrBlock->call("initializeTensor", {id, shape});
if (gen_accessor)
@@ -903,7 +905,7 @@ void AclCppOpGenerator::genTranspose(const std::shared_ptr<nnc::ArtifactId> &inp
// Create operation parameter containing permutation vector
shared_ptr<ArtifactId> perm_vector = genVectorInitializedVar(
- _constrBlock, "arm_compute::PermutationVector", out_name + "_perm_param", acl_perm);
+ _constrBlock, "arm_compute::PermutationVector", out_name + "_perm_param", acl_perm);
// Instantiate the CLPermute object.
string layer_name = out_name + "_transpose_layer";
diff --git a/compiler/nnc/backends/acl_soft_backend/ArtifactModel.cpp b/compiler/nnc/backends/acl_soft_backend/ArtifactModel.cpp
index 8888697e7..bbaa1f523 100644
--- a/compiler/nnc/backends/acl_soft_backend/ArtifactModel.cpp
+++ b/compiler/nnc/backends/acl_soft_backend/ArtifactModel.cpp
@@ -25,8 +25,8 @@ using namespace std;
ArtifactFunctionCall::ArtifactFunctionCall(string func_name,
list<shared_ptr<ArtifactExpr>> param_list,
shared_ptr<ArtifactExpr> on, ArtifactCallType call_type)
- : _funcName(std::move(func_name)), _callType(call_type), _on(std::move(on)),
- _paramList(std::move(param_list))
+ : _funcName(std::move(func_name)), _callType(call_type), _on(std::move(on)),
+ _paramList(std::move(param_list))
{
}
diff --git a/compiler/nnc/backends/acl_soft_backend/ArtifactModel.h b/compiler/nnc/backends/acl_soft_backend/ArtifactModel.h
index 106c9bec3..89d803021 100644
--- a/compiler/nnc/backends/acl_soft_backend/ArtifactModel.h
+++ b/compiler/nnc/backends/acl_soft_backend/ArtifactModel.h
@@ -204,7 +204,7 @@ class ArtifactUnaryExpr : public ArtifactExpr
{
public:
ArtifactUnaryExpr(ArtifactUnOp op, std::shared_ptr<ArtifactExpr> expr)
- : _op(op), _expr(std::move(expr))
+ : _op(op), _expr(std::move(expr))
{
}
@@ -248,7 +248,7 @@ class ArtifactBinaryExpr : public ArtifactExpr
public:
ArtifactBinaryExpr(ArtifactBinOp op, std::shared_ptr<ArtifactExpr> left,
std::shared_ptr<ArtifactExpr> right)
- : _op(op), _left(std::move(left)), _right(std::move(right))
+ : _op(op), _left(std::move(left)), _right(std::move(right))
{
}
@@ -271,7 +271,7 @@ class ArtifactIndex : public ArtifactExpr
{
public:
ArtifactIndex(std::shared_ptr<ArtifactExpr> expr, std::shared_ptr<ArtifactExpr> ind)
- : _expr(std::move(expr)), _ind(std::move(ind))
+ : _expr(std::move(expr)), _ind(std::move(ind))
{
}
@@ -328,8 +328,8 @@ public:
ArtifactVariable(std::string type_name, std::string var_name,
std::list<std::shared_ptr<ArtifactExpr>> dimensions = {},
std::list<std::shared_ptr<ArtifactExpr>> initializers = {})
- : _typeName(std::move(type_name)), _dimensions(std::move(dimensions)),
- _initializers(std::move(initializers)), ArtifactNamed(std::move(var_name))
+ : _typeName(std::move(type_name)), _dimensions(std::move(dimensions)),
+ _initializers(std::move(initializers)), ArtifactNamed(std::move(var_name))
{
}
@@ -469,7 +469,7 @@ public:
explicit ArtifactForLoop(std::shared_ptr<ArtifactVariable> init = nullptr,
std::shared_ptr<ArtifactExpr> cond = nullptr,
std::shared_ptr<ArtifactExpr> iter = nullptr)
- : _init(std::move(init)), _cond(std::move(cond)), _iter(std::move(iter))
+ : _init(std::move(init)), _cond(std::move(cond)), _iter(std::move(iter))
{
}
@@ -527,7 +527,7 @@ public:
*/
ArtifactFunction(std::string ret_type_name, const std::string &func_name,
std::list<std::shared_ptr<ArtifactVariable>> params = {})
- : ArtifactNamed(func_name), _params(std::move(params)), _retTypeName(std::move(ret_type_name))
+ : ArtifactNamed(func_name), _params(std::move(params)), _retTypeName(std::move(ret_type_name))
{
}
@@ -568,7 +568,7 @@ public:
const std::string &var_name,
const std::list<std::shared_ptr<ArtifactExpr>> &dimensions = {},
const std::list<std::shared_ptr<ArtifactExpr>> &initializers = {})
- : ArtifactClassMember(owner), ArtifactVariable(type_name, var_name, dimensions, initializers)
+ : ArtifactClassMember(owner), ArtifactVariable(type_name, var_name, dimensions, initializers)
{
}
@@ -584,7 +584,7 @@ public:
ArtifactClassFunction(const ArtifactClass *owner, const std::string &ret_type_name,
const std::string &func_name,
const std::list<std::shared_ptr<ArtifactVariable>> &params = {})
- : ArtifactClassMember(owner), ArtifactFunction(ret_type_name, func_name, params)
+ : ArtifactClassMember(owner), ArtifactFunction(ret_type_name, func_name, params)
{
}
diff --git a/compiler/nnc/backends/interpreter/InterpreterBackend.cpp b/compiler/nnc/backends/interpreter/InterpreterBackend.cpp
index 923a7cfc7..895daa115 100644
--- a/compiler/nnc/backends/interpreter/InterpreterBackend.cpp
+++ b/compiler/nnc/backends/interpreter/InterpreterBackend.cpp
@@ -104,7 +104,7 @@ static void writeTensorToHDF5File(const TensorVariant &tensor, std::string tenso
static TensorVariant readTensorFromFile(const std::string &filename, const TensorType &type)
{
const std::size_t input_data_size =
- type.getShape().numElements() * getDataTypeSize(type.getElementType());
+ type.getShape().numElements() * getDataTypeSize(type.getElementType());
std::ifstream stream(filename, std::ios::in | std::ios::binary);
if (stream.fail())
@@ -117,9 +117,9 @@ static TensorVariant readTensorFromFile(const std::string &filename, const Tenso
int64_t file_size = end - begin;
if (static_cast<std::size_t>(file_size) != input_data_size)
- throw std::runtime_error("File \"" + filename + "\" has incorrect size: " +
- std::to_string(file_size) + "(expected: " +
- std::to_string(input_data_size) + ").");
+ throw std::runtime_error("File \"" + filename +
+ "\" has incorrect size: " + std::to_string(file_size) +
+ "(expected: " + std::to_string(input_data_size) + ").");
std::unique_ptr<char[]> data(new char[input_data_size]);
stream.read(data.get(), input_data_size);
@@ -130,7 +130,7 @@ static TensorVariant readTensorFromFile(const std::string &filename, const Tenso
}
InterpreterBackend::InterpreterBackend(std::string input_dir, std::string output_dir)
- : _input_dir(std::move(input_dir)), _output_dir(std::move(output_dir))
+ : _input_dir(std::move(input_dir)), _output_dir(std::move(output_dir))
{
}
diff --git a/compiler/nnc/backends/soft_backend/CPPGenerator.cpp b/compiler/nnc/backends/soft_backend/CPPGenerator.cpp
index 236881b80..097122882 100644
--- a/compiler/nnc/backends/soft_backend/CPPGenerator.cpp
+++ b/compiler/nnc/backends/soft_backend/CPPGenerator.cpp
@@ -80,7 +80,7 @@ static unique_ptr<ofstream> getStream(const string &path)
}
CPPCodeGenerator::CPPCodeGenerator(std::string output_dir, std::string artifact_name)
- : _output_dir(std::move(output_dir)), _artifact_name(std::move(artifact_name))
+ : _output_dir(std::move(output_dir)), _artifact_name(std::move(artifact_name))
{
}
@@ -187,12 +187,14 @@ void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma)
string class_name = ma.getModelName() + "Model";
out.write(cpp_header_types, sizeof(cpp_header_types));
- out << "class " << class_name << "\n"
- "{\n"
- "public:\n"
- " "
- << class_name << "(const std::string& parametersPath);\n"
- " ~"
+ out << "class " << class_name
+ << "\n"
+ "{\n"
+ "public:\n"
+ " "
+ << class_name
+ << "(const std::string& parametersPath);\n"
+ " ~"
<< class_name << "();\n";
// generate input setters
if (ma.getInputs().size() == 1)
@@ -215,10 +217,12 @@ void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma)
out << " void doInference();\n\n"
"private:\n"
" "
- << class_name << "() = delete;\n"
- " "
- << class_name << "(const " << class_name << "& orig) = delete;\n"
- " "
+ << class_name
+ << "() = delete;\n"
+ " "
+ << class_name << "(const " << class_name
+ << "& orig) = delete;\n"
+ " "
<< class_name << "& operator=(const " << class_name << "& orig) = delete;\n";
// generate input/output tensors
for (const size_t in_tensor_id : ma.getInputs())
@@ -273,8 +277,9 @@ void CPPCodeGenerator::printSetter(ostream &out, const string &class_name,
{
const string &var_name = _formattedTensors[td.id];
- out << "bool " << class_name << "::set" << setter_name << "(const Tensor& t)\n"
- "{\n";
+ out << "bool " << class_name << "::set" << setter_name
+ << "(const Tensor& t)\n"
+ "{\n";
// need to insert input correctness check
const mir::Shape expected = td.shape;
int rank = expected.rank();
@@ -286,9 +291,10 @@ void CPPCodeGenerator::printSetter(ostream &out, const string &class_name,
out << " "
<< "if (t.getShape()[" << i << "] != " << expected.dim(i) << ") return false;\n";
}
- out << " " << var_name << " = t;\n"
- " return true;\n"
- "}\n\n";
+ out << " " << var_name
+ << " = t;\n"
+ " return true;\n"
+ "}\n\n";
}
void CPPCodeGenerator::printGetter(ostream &out, const string &class_name,
@@ -296,11 +302,13 @@ void CPPCodeGenerator::printGetter(ostream &out, const string &class_name,
{
const string &var_name = _formattedTensors[td.id];
- out << "shared_ptr<Tensor> " << class_name << "::get" << getter_name << "()\n"
- "{\n"
- " return "
- << var_name << ";\n"
- "}\n\n";
+ out << "shared_ptr<Tensor> " << class_name << "::get" << getter_name
+ << "()\n"
+ "{\n"
+ " return "
+ << var_name
+ << ";\n"
+ "}\n\n";
}
void CPPCodeGenerator::materializeCall(ostream &out, const ModelAnalyzer &ma,
@@ -435,13 +443,15 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, co
<< "(const string& parametersPath)\n"
"{\n"
" readParameters(_parameters, _paramSize, parametersPath, "
- << s.getFormatVersion() << ", " << s.getModelHash() << ");\n"
- "}\n\n";
+ << s.getFormatVersion() << ", " << s.getModelHash()
+ << ");\n"
+ "}\n\n";
// gen NN destructor
- out << class_name << "::~" << class_name << "()\n"
- "{\n"
- " releaseParameters(_parameters, _paramSize);\n"
- "}\n\n";
+ out << class_name << "::~" << class_name
+ << "()\n"
+ "{\n"
+ " releaseParameters(_parameters, _paramSize);\n"
+ "}\n\n";
// generate input setters
// generate main setter if network has only one
const auto &inputs = ma.getInputs();
@@ -473,8 +483,9 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, co
const TensorDescriptor &td = tensors[output_tensor_id];
printGetter(out, class_name, output_tensor_name, td);
}
- out << "void " << class_name << "::doInference()\n"
- "{\n";
+ out << "void " << class_name
+ << "::doInference()\n"
+ "{\n";
for (size_t output_tensor_id : ma.getPersistentTensors())
{
const string &output_tensor_name = _formattedTensors[output_tensor_id];
diff --git a/compiler/nnc/backends/soft_backend/ModelAnalyzer.cpp b/compiler/nnc/backends/soft_backend/ModelAnalyzer.cpp
index 82e62b531..2d555d0a9 100644
--- a/compiler/nnc/backends/soft_backend/ModelAnalyzer.cpp
+++ b/compiler/nnc/backends/soft_backend/ModelAnalyzer.cpp
@@ -62,7 +62,7 @@ void ModelAnalyzer::appendOperationToInference(Operation *op, const string &func
{
const auto &tensor_name = output.getName();
const auto tensor_id =
- tensor_name.empty() ? declareTemporaryTensor() : declarePersistentTensor(tensor_name);
+ tensor_name.empty() ? declareTemporaryTensor() : declarePersistentTensor(tensor_name);
node_output_tensors.push_back(tensor_id);
}
}
@@ -82,7 +82,7 @@ void ModelAnalyzer::appendOperationToInference(Operation *op, const string &func
std::copy(aux_args.begin(), aux_args.end(), std::back_inserter(node_input_tensors));
unique_ptr<Action> operation_call(new CallFunction(
- op, function_name, std::move(node_input_tensors), std::move(node_output_tensors)));
+ op, function_name, std::move(node_input_tensors), std::move(node_output_tensors)));
_inferenceSequence.push_back(std::move(operation_call));
_opToDescr[op] = _inferenceSequence.back().get();
}
diff --git a/compiler/nnc/backends/soft_backend/ModelAnalyzer.h b/compiler/nnc/backends/soft_backend/ModelAnalyzer.h
index 471c31011..6522bc655 100644
--- a/compiler/nnc/backends/soft_backend/ModelAnalyzer.h
+++ b/compiler/nnc/backends/soft_backend/ModelAnalyzer.h
@@ -42,9 +42,9 @@ class ModelAnalyzer : public mir::Visitor
{
public:
/**
- * @brief contructs inference sequence
- * @param g pointer to graph to linearize
- */
+ * @brief contructs inference sequence
+ * @param g pointer to graph to linearize
+ */
void analyze(const mir::Graph *g);
void visit(mir::ops::AbsOp &) override;
diff --git a/compiler/nnc/backends/soft_backend/SequencedIR.h b/compiler/nnc/backends/soft_backend/SequencedIR.h
index 9a761243e..ff062e043 100644
--- a/compiler/nnc/backends/soft_backend/SequencedIR.h
+++ b/compiler/nnc/backends/soft_backend/SequencedIR.h
@@ -91,7 +91,7 @@ struct TransposeTensor : public Action
{
TransposeTensor(size_t input, size_t output, std::vector<int32_t> &&perm)
- : Action(Type::transposeTensor), perm(std::move(perm)), input(input), output(output)
+ : Action(Type::transposeTensor), perm(std::move(perm)), input(input), output(output)
{
}
@@ -121,8 +121,8 @@ struct CallFunction : public Action
CallFunction(mir::Operation *op, std::string func_name, std::vector<size_t> &&inputs,
std::vector<size_t> &&outputs)
- : Action(Type::callFunction), mirOp(op), funcName(std::move(func_name)), inputs(inputs),
- outputs(outputs), paramStartOffset(0)
+ : Action(Type::callFunction), mirOp(op), funcName(std::move(func_name)), inputs(inputs),
+ outputs(outputs), paramStartOffset(0)
{
}
diff --git a/compiler/nnc/driver/Options.cpp b/compiler/nnc/driver/Options.cpp
index e22d01847..c1997fe6a 100644
--- a/compiler/nnc/driver/Options.cpp
+++ b/compiler/nnc/driver/Options.cpp
@@ -35,7 +35,7 @@ Option<bool> caffeFrontend(optname("--caffe"), overview("treat input file as Caf
#else
showopt(false)
#endif // NNC_FRONTEND_CAFFE_ENABLED
- );
+);
Option<bool> onnxFrontend(optname("--onnx"), overview("treat input file as ONNX model"), false,
optional(true), optvalues(""), nullptr, separators(""),
#ifdef NNC_FRONTEND_ONNX_ENABLED
@@ -43,7 +43,7 @@ Option<bool> onnxFrontend(optname("--onnx"), overview("treat input file as ONNX
#else
showopt(false)
#endif // NNC_FRONTEND_ONNX_ENABLED
- );
+);
Option<bool> caffe2Frontend(optname("--caffe2"),
overview("treat input file as Caffe2 model (predict_net.pb)"), false,
@@ -83,16 +83,16 @@ Option<bool> tflFrontend(optname("--tflite"),
#else
showopt(false)
#endif // NNC_FRONTEND_TFLITE_ENABLED
- );
+);
Option<std::string>
- target(optname("--target"),
- overview("select target language to emit for given architecture."
- "Valid values are '" NNC_TARGET_ARM_CPP "', '" NNC_TARGET_X86_CPP
- "', '" NNC_TARGET_ARM_GPU_CPP "', '" NNC_TARGET_INTERPRETER "'"),
- std::string(), optional(false),
- optvalues(NNC_TARGET_ARM_CPP "," NNC_TARGET_X86_CPP "," NNC_TARGET_ARM_GPU_CPP
- "," NNC_TARGET_INTERPRETER),
- nullptr, separators("="));
+ target(optname("--target"),
+ overview("select target language to emit for given architecture."
+ "Valid values are '" NNC_TARGET_ARM_CPP "', '" NNC_TARGET_X86_CPP
+ "', '" NNC_TARGET_ARM_GPU_CPP "', '" NNC_TARGET_INTERPRETER "'"),
+ std::string(), optional(false),
+ optvalues(NNC_TARGET_ARM_CPP "," NNC_TARGET_X86_CPP "," NNC_TARGET_ARM_GPU_CPP
+ "," NNC_TARGET_INTERPRETER),
+ nullptr, separators("="));
/**
* Options for *frontend*
diff --git a/compiler/nnc/include/pass/PassData.h b/compiler/nnc/include/pass/PassData.h
index e2c0b8129..1ff8af927 100644
--- a/compiler/nnc/include/pass/PassData.h
+++ b/compiler/nnc/include/pass/PassData.h
@@ -30,9 +30,8 @@ class PassData
{
public:
/* implicit */ PassData(std::nullptr_t data)
- : // NOLINT(google-explicit-constructor, hicpp-explicit-conversions)
- _dataContainer{.unknown = data},
- _dataType(PDT::UNKNOWN)
+ : // NOLINT(google-explicit-constructor, hicpp-explicit-conversions)
+ _dataContainer{.unknown = data}, _dataType(PDT::UNKNOWN)
{
}
@@ -40,9 +39,8 @@ public:
* @brief Implicit conversion from Graph* to PassData
*/
/* implicit */ PassData(mir::Graph *graph)
- : // NOLINT(google-explicit-constructor, hicpp-explicit-conversions)
- _dataContainer{.graph = graph},
- _dataType(PDT::GRAPH)
+ : // NOLINT(google-explicit-constructor, hicpp-explicit-conversions)
+ _dataContainer{.graph = graph}, _dataType(PDT::GRAPH)
{
}
@@ -60,9 +58,8 @@ public:
* @brief Implicit conversion from Graph* to PassData
*/
/* implicit */ PassData(mir::TensorVariant *tv)
- : // NOLINT(google-explicit-constructor, hicpp-explicit-conversions)
- _dataContainer{.tensorVariant = tv},
- _dataType(PDT::TENSOR_VARIANT)
+ : // NOLINT(google-explicit-constructor, hicpp-explicit-conversions)
+ _dataContainer{.tensorVariant = tv}, _dataType(PDT::TENSOR_VARIANT)
{
}
diff --git a/compiler/nnc/include/passes/optimizations/CombineTransposes.h b/compiler/nnc/include/passes/optimizations/CombineTransposes.h
index 7d227cd5d..a08676e47 100644
--- a/compiler/nnc/include/passes/optimizations/CombineTransposes.h
+++ b/compiler/nnc/include/passes/optimizations/CombineTransposes.h
@@ -33,6 +33,7 @@ public:
PassData run(PassData data) override;
std::string getName() override { return "opt_combine_transposes"; };
+
private:
};
diff --git a/compiler/nnc/include/passes/optimizations/OptimizationUtils.h b/compiler/nnc/include/passes/optimizations/OptimizationUtils.h
index 9a9212c12..83f455b2d 100644
--- a/compiler/nnc/include/passes/optimizations/OptimizationUtils.h
+++ b/compiler/nnc/include/passes/optimizations/OptimizationUtils.h
@@ -25,11 +25,11 @@ namespace nnc
namespace opt_util
{
/**
-* @brief Swap adjacent nodes in Graph. Creates new nodes and replaces the old ones with new.
-* @param g MIR Graph
-* @param top Node
-* @param bottom Node
-*/
+ * @brief Swap adjacent nodes in Graph. Creates new nodes and replaces the old ones with new.
+ * @param g MIR Graph
+ * @param top Node
+ * @param bottom Node
+ */
void swapAdjacent(mir::Graph *g, mir::Operation *top, mir::Operation *bottom);
// TODO: this function and it's usages should be removed, after DCE optimization will be implemented
diff --git a/compiler/nnc/include/support/CommandLine.h b/compiler/nnc/include/support/CommandLine.h
index 40777ff46..66466276d 100644
--- a/compiler/nnc/include/support/CommandLine.h
+++ b/compiler/nnc/include/support/CommandLine.h
@@ -38,7 +38,7 @@ class BadOption : public std::logic_error
{
public:
explicit BadOption(const std::string &msg, std::string optname = "", std::string value = "")
- : std::logic_error(msg), _option_name(std::move(optname)), _option_value(std::move(value))
+ : std::logic_error(msg), _option_name(std::move(optname)), _option_value(std::move(value))
{
}
@@ -387,7 +387,7 @@ private:
std::map<std::string, IOption *> _options_name; // map of name -> option
std::vector<IOption *> _options; // options
std::map<IOption::Group, std::vector<IOption *>>
- _grouped_options; // map of groups: group -> vector of options
+ _grouped_options; // map of groups: group -> vector of options
std::string _prog_name; // name of program
int _args_num = 0; // number of command line arguments
};
@@ -530,7 +530,7 @@ Option<T>::Option(const std::vector<std::string> &optnames, const std::string &d
_group = group;
_can_have_several_vals =
- std::is_same<T, std::vector<std::string>>::value || std::is_same<T, std::vector<int>>::value;
+ std::is_same<T, std::vector<std::string>>::value || std::is_same<T, std::vector<int>>::value;
assert(!(_can_have_several_vals && !_seps.empty()) &&
"option with several values can't have separators");
diff --git a/compiler/nnc/passes/optimizations/CombineTransposes.cpp b/compiler/nnc/passes/optimizations/CombineTransposes.cpp
index e381a9cae..8a584d2d5 100644
--- a/compiler/nnc/passes/optimizations/CombineTransposes.cpp
+++ b/compiler/nnc/passes/optimizations/CombineTransposes.cpp
@@ -72,12 +72,12 @@ nnc::PassData nnc::CombineTransposes::run(nnc::PassData data)
};
auto *bottom_transpose = dynamic_cast<mir::ops::TransposeOp *>(match.second);
auto combined_axis_order =
- combineAxisOrders(top_transpose->getAxisOrder(), bottom_transpose->getAxisOrder());
+ combineAxisOrders(top_transpose->getAxisOrder(), bottom_transpose->getAxisOrder());
if (!isIdentityTranspose(combined_axis_order))
{
auto new_tr_op =
- g->create<mir::ops::TransposeOp>(top_transpose->getInput(0), combined_axis_order);
+ g->create<mir::ops::TransposeOp>(top_transpose->getInput(0), combined_axis_order);
g->replaceNode(bottom_transpose, new_tr_op);
}
diff --git a/compiler/nnc/passes/optimizations/DeadCodeElimination.cpp b/compiler/nnc/passes/optimizations/DeadCodeElimination.cpp
index b89dca1b7..371d9703f 100644
--- a/compiler/nnc/passes/optimizations/DeadCodeElimination.cpp
+++ b/compiler/nnc/passes/optimizations/DeadCodeElimination.cpp
@@ -33,8 +33,8 @@ nnc::PassData nnc::DeadCodeElimination::run(PassData data)
return;
bool has_no_uses =
- std::all_of(op->getOutputs().cbegin(), op->getOutputs().cend(),
- [](const Operation::Output &output) { return output.getUses().empty(); });
+ std::all_of(op->getOutputs().cbegin(), op->getOutputs().cend(),
+ [](const Operation::Output &output) { return output.getUses().empty(); });
if (has_no_uses)
{
diff --git a/compiler/nnc/passes/optimizations/FuseArithmeticOps.cpp b/compiler/nnc/passes/optimizations/FuseArithmeticOps.cpp
index 91686ef74..d69439fc3 100644
--- a/compiler/nnc/passes/optimizations/FuseArithmeticOps.cpp
+++ b/compiler/nnc/passes/optimizations/FuseArithmeticOps.cpp
@@ -215,10 +215,10 @@ bool sinkAddThroughMul(Graph *g)
// Create new operations
auto old_add_input = old_add_op->getInput(0);
auto new_mul_op =
- g->copyOpWithInputs(old_mul_op, {old_add_input, ols_mul_const_op->getOutput(0)});
+ g->copyOpWithInputs(old_mul_op, {old_add_input, ols_mul_const_op->getOutput(0)});
auto new_add_const_op = mergeConstantOps(g, old_add_const_op, ols_mul_const_op, OpType::mul);
auto new_add_op =
- g->copyOpWithInputs(old_add_op, {new_mul_op->getOutput(0), new_add_const_op->getOutput(0)});
+ g->copyOpWithInputs(old_add_op, {new_mul_op->getOutput(0), new_add_const_op->getOutput(0)});
// Replace old mul with new add and remove old nodes
g->replaceNode(old_mul_op, new_add_op);
diff --git a/compiler/nnc/passes/transformations/DataFormatSwitcher.cpp b/compiler/nnc/passes/transformations/DataFormatSwitcher.cpp
index 8ff842660..fcdbba878 100644
--- a/compiler/nnc/passes/transformations/DataFormatSwitcher.cpp
+++ b/compiler/nnc/passes/transformations/DataFormatSwitcher.cpp
@@ -27,7 +27,7 @@
namespace nnc
{
DataFormatSwitcher::DataFormatSwitcher(const mir::DataFormat target_format)
- : _target_format(target_format)
+ : _target_format(target_format)
{
}
@@ -89,10 +89,10 @@ mir::Operation::Output *DataFormatSwitcher::insertTransposeBefore(mir::Operation
mir::Operation::Output *new_out;
if (_target_format == mir::DataFormat::NHWC)
new_out = _graph->create<mir::ops::TransposeOp>(out, std::vector<std::size_t>{0, 2, 3, 1})
- ->getOutput(0); // NCHW -> NHWC
+ ->getOutput(0); // NCHW -> NHWC
else
new_out = _graph->create<mir::ops::TransposeOp>(out, std::vector<std::size_t>{0, 3, 1, 2})
- ->getOutput(0); // NHWC -> NCHW
+ ->getOutput(0); // NHWC -> NCHW
if (out->getType().isQuantized())
new_out->setQuantization(out->getType().getQuantization());
return new_out;
@@ -103,10 +103,10 @@ mir::Operation::Output *DataFormatSwitcher::insertTransposeAfter(mir::Operation:
mir::Operation::Output *new_out;
if (_target_format == mir::DataFormat::NHWC)
new_out = _graph->create<mir::ops::TransposeOp>(out, std::vector<std::size_t>{0, 3, 1, 2})
- ->getOutput(0); // NHWC -> NCHW
+ ->getOutput(0); // NHWC -> NCHW
else
new_out = _graph->create<mir::ops::TransposeOp>(out, std::vector<std::size_t>{0, 2, 3, 1})
- ->getOutput(0); // NCHW -> NHWC
+ ->getOutput(0); // NCHW -> NHWC
if (out->getType().isQuantized())
new_out->setQuantization(out->getType().getQuantization());
return new_out;
diff --git a/compiler/nnc/passes/transformations/LowerConv2D.cpp b/compiler/nnc/passes/transformations/LowerConv2D.cpp
index 9e32978bc..9ae20527d 100644
--- a/compiler/nnc/passes/transformations/LowerConv2D.cpp
+++ b/compiler/nnc/passes/transformations/LowerConv2D.cpp
@@ -36,11 +36,11 @@ static void lowerConv2D(mir::Graph *graph, mir::ops::Conv2DOp *op)
// [O, H, W, I / M] == [M, H, W, 1] -> [H, W, M, 1]
std::vector<std::size_t> perm{1, 2, 0, 3};
mir::Operation::Output *new_kernel =
- graph->create<mir::ops::TransposeOp>(kernel, perm)->getOutput(0);
+ graph->create<mir::ops::TransposeOp>(kernel, perm)->getOutput(0);
mir::Conv2DOpAttributes attributes = op->getAttributes();
attributes.num_groups = 1;
mir::Operation::Output *new_result =
- graph->create<mir::ops::DepthwiseConv2DOp>(input, new_kernel, attributes)->getOutput(0);
+ graph->create<mir::ops::DepthwiseConv2DOp>(input, new_kernel, attributes)->getOutput(0);
graph->replaceNode(op, new_result->getNode());
}
}
diff --git a/compiler/nnc/tests/acl_soft_backend/AclCppOperations.cpp b/compiler/nnc/tests/acl_soft_backend/AclCppOperations.cpp
index 4ae020355..d39c9dcb5 100644
--- a/compiler/nnc/tests/acl_soft_backend/AclCppOperations.cpp
+++ b/compiler/nnc/tests/acl_soft_backend/AclCppOperations.cpp
@@ -157,7 +157,7 @@ static void runAclSystemTest(const string &name)
// Copy the model input HDF5 file to the remote device.
ASSERT_TRUE(
- copyToOdroid(binDir + "/" + name + "/in_" + name + "_caffe.hdf5", dir_name + "/in.hdf5"));
+ copyToOdroid(binDir + "/" + name + "/in_" + name + "_caffe.hdf5", dir_name + "/in.hdf5"));
// Switch to the artifact directory on the remote device and run the artifact.
ASSERT_TRUE(runOnOdroid("cd " + dir_name + "; ./nnc_test"));
diff --git a/compiler/nnc/tests/acl_soft_backend/artifact_cmake/main.cpp b/compiler/nnc/tests/acl_soft_backend/artifact_cmake/main.cpp
index c326b390b..ea4bddac8 100644
--- a/compiler/nnc/tests/acl_soft_backend/artifact_cmake/main.cpp
+++ b/compiler/nnc/tests/acl_soft_backend/artifact_cmake/main.cpp
@@ -31,12 +31,13 @@ static unique_ptr<char[]> getTensorData(CLTensor &tensor)
Iterator i(&tensor, window);
char *ptr = &buf[0];
- execute_window_loop(window,
- [&i, &ptr](const Coordinates &) {
- memcpy(ptr, i.ptr(), sizeof(float));
- ptr += sizeof(float);
- },
- i);
+ execute_window_loop(
+ window,
+ [&i, &ptr](const Coordinates &) {
+ memcpy(ptr, i.ptr(), sizeof(float));
+ ptr += sizeof(float);
+ },
+ i);
tensor.unmap();
return buf;
@@ -52,12 +53,13 @@ static void readTensor(CLTensor &tensor, H5::DataSet &dataset)
Iterator i(&tensor, window);
char *ptr = &buf[0];
- execute_window_loop(window,
- [&i, &ptr](const Coordinates &) {
- memcpy(i.ptr(), ptr, sizeof(float));
- ptr += sizeof(float);
- },
- i);
+ execute_window_loop(
+ window,
+ [&i, &ptr](const Coordinates &) {
+ memcpy(i.ptr(), ptr, sizeof(float));
+ ptr += sizeof(float);
+ },
+ i);
tensor.unmap();
}
diff --git a/compiler/nnc/tests/soft_backend/CompileCPP.cpp b/compiler/nnc/tests/soft_backend/CompileCPP.cpp
index 63aeb4a1b..4ede0cf05 100644
--- a/compiler/nnc/tests/soft_backend/CompileCPP.cpp
+++ b/compiler/nnc/tests/soft_backend/CompileCPP.cpp
@@ -101,7 +101,7 @@ int main()
string target_compiler = "g++ -Wall --std=c++11";
string compiler_command =
- target_compiler + " -I" + output_dir + " " + main_path + " " + code_path;
+ target_compiler + " -I" + output_dir + " " + main_path + " " + code_path;
// call compiler
int res = system(compiler_command.c_str());
diff --git a/compiler/nnc/unittests/acl_backend/DOMToText.cpp b/compiler/nnc/unittests/acl_backend/DOMToText.cpp
index be0e6713c..aaf0c2055 100644
--- a/compiler/nnc/unittests/acl_backend/DOMToText.cpp
+++ b/compiler/nnc/unittests/acl_backend/DOMToText.cpp
@@ -148,9 +148,9 @@ TEST(acl_backend_dom_to_text, ArtifactUnaryExpr)
const char *var_name = "id";
shared_ptr<ArtifactId> var = AF::id(var_name);
pair<ArtifactUnOp, const char *> test_cases[] = {
- {ArtifactUnOp::preIncr, "++id"}, {ArtifactUnOp::preDecr, "--id"},
- {ArtifactUnOp::heapNew, "new id"}, {ArtifactUnOp::heapFree, "delete id"},
- {ArtifactUnOp::postIncr, "id++"}, {ArtifactUnOp::postDecr, "id--"}};
+ {ArtifactUnOp::preIncr, "++id"}, {ArtifactUnOp::preDecr, "--id"},
+ {ArtifactUnOp::heapNew, "new id"}, {ArtifactUnOp::heapFree, "delete id"},
+ {ArtifactUnOp::postIncr, "id++"}, {ArtifactUnOp::postDecr, "id--"}};
for (auto test : test_cases)
{
@@ -181,14 +181,14 @@ TEST(acl_backend_dom_to_text, ArtifactBinaryExpr)
shared_ptr<ArtifactId> op2 = AF::id(op2_name);
pair<ArtifactBinOp, const char *> test_cases[] = {
- {ArtifactBinOp::eq, "a == b"}, {ArtifactBinOp::notEq, "a != b"},
- {ArtifactBinOp::less, "a < b"}, {ArtifactBinOp::lessOrEq, "a <= b"},
- {ArtifactBinOp::great, "a > b"}, {ArtifactBinOp::greatOrEq, "a >= b"},
- {ArtifactBinOp::assign, "a = b"}, {ArtifactBinOp::plus, "a + b"},
- {ArtifactBinOp::minus, "a - b"}, {ArtifactBinOp::mult, "a * b"},
- {ArtifactBinOp::div, "a / b"}, {ArtifactBinOp::plusAssign, "a += b"},
- {ArtifactBinOp::minusAssign, "a -= b"}, {ArtifactBinOp::multAssign, "a *= b"},
- {ArtifactBinOp::divAssign, "a /= b"}};
+ {ArtifactBinOp::eq, "a == b"}, {ArtifactBinOp::notEq, "a != b"},
+ {ArtifactBinOp::less, "a < b"}, {ArtifactBinOp::lessOrEq, "a <= b"},
+ {ArtifactBinOp::great, "a > b"}, {ArtifactBinOp::greatOrEq, "a >= b"},
+ {ArtifactBinOp::assign, "a = b"}, {ArtifactBinOp::plus, "a + b"},
+ {ArtifactBinOp::minus, "a - b"}, {ArtifactBinOp::mult, "a * b"},
+ {ArtifactBinOp::div, "a / b"}, {ArtifactBinOp::plusAssign, "a += b"},
+ {ArtifactBinOp::minusAssign, "a -= b"}, {ArtifactBinOp::multAssign, "a *= b"},
+ {ArtifactBinOp::divAssign, "a /= b"}};
for (auto test : test_cases)
{
@@ -286,12 +286,12 @@ TEST(acl_backend_dom_to_text, ArtifactForLoop)
shared_ptr<ArtifactVariable> iter = AF::var(var_type, var_name, {}, {AF::lit("0")});
shared_ptr<ArtifactExpr> step =
- AF::bin(ArtifactBinOp::plusAssign, AF::id(var_name), AF::lit("1"));
+ AF::bin(ArtifactBinOp::plusAssign, AF::id(var_name), AF::lit("1"));
shared_ptr<ArtifactExpr> cond =
- AF::bin(ArtifactBinOp::lessOrEq, AF::id(var_name), AF::lit("123"));
+ AF::bin(ArtifactBinOp::lessOrEq, AF::id(var_name), AF::lit("123"));
shared_ptr<ArtifactBinaryExpr> expr =
- AF::bin(ArtifactBinOp::plusAssign, AF::id("hello"), AF::id("world"));
+ AF::bin(ArtifactBinOp::plusAssign, AF::id("hello"), AF::id("world"));
ArtifactForLoop loop(iter, cond, step);
@@ -308,10 +308,10 @@ TEST(acl_backend_dom_to_text, ArtifactIf)
const char *var_name = "i";
shared_ptr<ArtifactExpr> cond =
- AF::bin(ArtifactBinOp::lessOrEq, AF::id(var_name), AF::lit("123"));
+ AF::bin(ArtifactBinOp::lessOrEq, AF::id(var_name), AF::lit("123"));
shared_ptr<ArtifactBinaryExpr> expr =
- AF::bin(ArtifactBinOp::plusAssign, AF::id("hello"), AF::id("world"));
+ AF::bin(ArtifactBinOp::plusAssign, AF::id("hello"), AF::id("world"));
ArtifactIf if_stmt(cond);
@@ -415,7 +415,7 @@ static shared_ptr<ArtifactClassVariable> createClsVariable(ArtifactClass &cls, c
list<shared_ptr<ArtifactExpr>> dims{dim1, dim2};
list<shared_ptr<ArtifactExpr>> initializers{AF::lit("123")};
shared_ptr<ArtifactClassVariable> var_decl =
- cls.var(is_public, var_type, var_name, dims, initializers);
+ cls.var(is_public, var_type, var_name, dims, initializers);
return var_decl;
}
@@ -483,8 +483,8 @@ TEST(acl_backend_dom_to_text, ArtifactModule)
const char *code_prefix = "#include \"module.h\"\n\n#include <list>\n\n#include \"bar.h\"\n\n";
const char *code_suffix = "\nClass::Class() {\n}\n\n";
- string ref_data = string(code_prefix) +
- string(AclArtifactUtilities, sizeof(AclArtifactUtilities)) + code_suffix;
+ string ref_data =
+ string(code_prefix) + string(AclArtifactUtilities, sizeof(AclArtifactUtilities)) + code_suffix;
m.accept(&code_gen);
ASSERT_EQ(code_out.str(), ref_data);
diff --git a/compiler/nnc/unittests/acl_backend/MIRToDOM.cpp b/compiler/nnc/unittests/acl_backend/MIRToDOM.cpp
index a9b36a145..f411fde42 100644
--- a/compiler/nnc/unittests/acl_backend/MIRToDOM.cpp
+++ b/compiler/nnc/unittests/acl_backend/MIRToDOM.cpp
@@ -117,12 +117,12 @@ void checkDomIncludes(const ArtifactModule &m)
// check ordinary includes, like '#include "artifact_data.h"'
checkHeadersSetsEqual(
- m.headerIncludes(),
- {"arm_compute/core/Types.h", "arm_compute/runtime/BlobLifetimeManager.h",
- "arm_compute/runtime/CL/CLBufferAllocator.h", "arm_compute/runtime/CL/CLFunctions.h",
- "arm_compute/runtime/CL/CLScheduler.h", "arm_compute/runtime/MemoryManagerOnDemand.h",
- "arm_compute/runtime/PoolManager.h"},
- "system header includes diverged");
+ m.headerIncludes(),
+ {"arm_compute/core/Types.h", "arm_compute/runtime/BlobLifetimeManager.h",
+ "arm_compute/runtime/CL/CLBufferAllocator.h", "arm_compute/runtime/CL/CLFunctions.h",
+ "arm_compute/runtime/CL/CLScheduler.h", "arm_compute/runtime/MemoryManagerOnDemand.h",
+ "arm_compute/runtime/PoolManager.h"},
+ "system header includes diverged");
checkHeadersSetsEqual(m.sourceSysIncludes(), {}, "system source includes diverged");
}
@@ -287,10 +287,10 @@ TEST(acl_backend_mir_to_dom, conv2d)
Graph g;
OpConstructor op_generator =
- [kernel_tensor](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- auto kernel = g.create<mir::ops::ConstantOp>(kernel_tensor)->getOutput(0);
- return g.create<mir::ops::Conv2DOp>(inputs[0], kernel, mir::Conv2DOpAttributes());
- };
+ [kernel_tensor](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ auto kernel = g.create<mir::ops::ConstantOp>(kernel_tensor)->getOutput(0);
+ return g.create<mir::ops::Conv2DOp>(inputs[0], kernel, mir::Conv2DOpAttributes());
+ };
vector<Shape> input_shapes{{1, 10, 10, channels}};
@@ -312,11 +312,11 @@ TEST(acl_backend_mir_to_dom, depthwise_conv)
Graph g;
OpConstructor op_generator =
- [kernel_tensor](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- Conv2DOpAttributes attributes;
- auto kernel = g.create<mir::ops::ConstantOp>(kernel_tensor)->getOutput(0);
- return g.create<mir::ops::DepthwiseConv2DOp>(inputs[0], kernel, attributes);
- };
+ [kernel_tensor](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ Conv2DOpAttributes attributes;
+ auto kernel = g.create<mir::ops::ConstantOp>(kernel_tensor)->getOutput(0);
+ return g.create<mir::ops::DepthwiseConv2DOp>(inputs[0], kernel, attributes);
+ };
vector<Shape> input_shapes{{1, 10, 10, channels}};
diff --git a/compiler/nnc/unittests/optimizations/SinkTest.cpp b/compiler/nnc/unittests/optimizations/SinkTest.cpp
index 8c5b2767e..be171d1cb 100644
--- a/compiler/nnc/unittests/optimizations/SinkTest.cpp
+++ b/compiler/nnc/unittests/optimizations/SinkTest.cpp
@@ -103,7 +103,7 @@ TEST(OptPass, sinkTrConcat)
Operation *tr1 = g.create<ops::TransposeOp>(in1->getOutput(0), vector<size_t>{0, 3, 1, 2});
Operation *tr2 = g.create<ops::TransposeOp>(in2->getOutput(0), vector<size_t>{0, 3, 1, 2});
Operation *conc =
- g.create<ops::ConcatOp>(vector<Operation::Output *>{tr1->getOutput(0), tr2->getOutput(0)}, 1);
+ g.create<ops::ConcatOp>(vector<Operation::Output *>{tr1->getOutput(0), tr2->getOutput(0)}, 1);
Operation *tanh = g.create<ops::TanhOp>(conc->getOutput(0));
Operation *out = g.create<ops::OutputOp>(tanh->getOutput(0));
(void)out;
@@ -141,7 +141,7 @@ TEST(OptPass, sinkReluConcat)
Operation *relu1 = g.create<ops::ReluOp>(in1->getOutput(0));
Operation *relu2 = g.create<ops::ReluOp>(in2->getOutput(0));
Operation *conc = g.create<ops::ConcatOp>(
- vector<Operation::Output *>{relu1->getOutput(0), relu2->getOutput(0)}, 1);
+ vector<Operation::Output *>{relu1->getOutput(0), relu2->getOutput(0)}, 1);
Operation *tanh = g.create<ops::TanhOp>(conc->getOutput(0));
Operation *out = g.create<ops::OutputOp>(tanh->getOutput(0));
(void)out;
diff --git a/compiler/nnc/unittests/soft_backend/CPPOperations.cpp b/compiler/nnc/unittests/soft_backend/CPPOperations.cpp
index 508ee954d..e593333fa 100644
--- a/compiler/nnc/unittests/soft_backend/CPPOperations.cpp
+++ b/compiler/nnc/unittests/soft_backend/CPPOperations.cpp
@@ -120,11 +120,10 @@ namespace
* @brief Creates graph with one operation generated by opGen function and returns this operation
* node
*/
-mir::Operation *
-fillGraph(mir::Graph &g,
- const function<mir::Operation *(mir::Graph &g, vector<mir::Operation::Output *> &inputs)>
- &op_gen,
- const vector<unique_ptr<mir::TensorVariant>> &input_ntensors)
+mir::Operation *fillGraph(
+ mir::Graph &g,
+ const function<mir::Operation *(mir::Graph &g, vector<mir::Operation::Output *> &inputs)> &op_gen,
+ const vector<unique_ptr<mir::TensorVariant>> &input_ntensors)
{
// Create operation inputs.
vector<mir::Operation::Output *> inputs;
@@ -295,8 +294,8 @@ void compareResults(const mir::TensorVariant &ref_nnc_tensor, const Tensor &test
float ref_data = mir::Tensor<float>(ref_nnc_tensor).at(nnc_idx);
float test_data = test_art_tensor.at(artifact_idx);
ASSERT_TRUE(areFloatsNear(ref_data, test_data, 32, 1e-5))
- << "Tensor element " << nnc_idx << " diverged, reference: " << ref_data
- << " test result: " << test_data;
+ << "Tensor element " << nnc_idx << " diverged, reference: " << ref_data
+ << " test result: " << test_data;
}
}
@@ -306,10 +305,10 @@ void compareResults(const mir::TensorVariant &ref_nnc_tensor, const Tensor &test
*/
template <typename TestFunc, typename... Args>
void createAndRunTestGraph(
- function<mir::Operation *(mir::Graph &, const std::vector<mir::Operation::Output *> &inputs)>
- op_generator,
- TestFunc artifactOperation, const vector<unique_ptr<mir::TensorVariant>> &input_ntensors,
- Args &... input_atensors)
+ function<mir::Operation *(mir::Graph &, const std::vector<mir::Operation::Output *> &inputs)>
+ op_generator,
+ TestFunc artifactOperation, const vector<unique_ptr<mir::TensorVariant>> &input_ntensors,
+ Args &... input_atensors)
{
mir::Graph g;
mir::Operation *actual_operation = fillGraph(g, op_generator, input_ntensors);
@@ -657,7 +656,7 @@ TEST(cpp_operations_test, resize_NN_test)
auto op_generator = [&res_shape](mir::Graph &g,
const std::vector<mir::Operation::Output *> &inputs) {
return g.create<mir::ops::ResizeOp>(
- inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
+ inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
};
createAndRunTestGraph(op_generator, resize, input_ntensors, input_atensor);
@@ -668,7 +667,7 @@ TEST(cpp_operations_test, resize_NN_test_scales)
{
cout << "\n";
std::vector<float> test_scales[] = {
- {1, 2, 2, 1}, {1, 2, 3, 1}, {1, 3, 2, 1}, {1, 2.5, 2, 1}, {1, 3, 9, 1}};
+ {1, 2, 2, 1}, {1, 2, 3, 1}, {1, 3, 2, 1}, {1, 2.5, 2, 1}, {1, 3, 9, 1}};
for (const std::vector<float> &scales : test_scales)
{
vector<int> input_shape_data{1, 4, 4, 1};
@@ -678,7 +677,7 @@ TEST(cpp_operations_test, resize_NN_test_scales)
auto op_generator = [&scales](mir::Graph &g,
const std::vector<mir::Operation::Output *> &inputs) {
return g.create<mir::ops::ResizeOp>(
- inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales);
+ inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales);
};
createAndRunTestGraph(op_generator, resize, input_ntensors, input_atensor);
}
@@ -711,10 +710,10 @@ TEST(cpp_operations_test, avgpool)
for (const auto include_pad : {false, true})
{
attributes.include_pad = include_pad;
- auto op_generator = [&attributes](
- mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::AvgPool2DOp>(inputs[0], attributes);
- };
+ auto op_generator =
+ [&attributes](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::AvgPool2DOp>(inputs[0], attributes);
+ };
createAndRunTestGraph(op_generator, avgPool, input_ntensors, input_atensor);
}
@@ -742,8 +741,9 @@ TEST(cpp_operations_test, maxpool)
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
- auto op_generator = [&window_size, &strides](
- mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ auto op_generator = [&window_size,
+ &strides](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
mir::MaxPool2DOpAttributes attributes;
attributes.window = window_size;
attributes.strides = strides;
@@ -838,7 +838,7 @@ TEST(cpp_operations_test, reduceMeanTst)
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
auto op_generator = [&axis_list, keep_dims](
- mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
auto op = g.create<mir::ops::ReduceMeanOp>(inputs[0], axis_list, keep_dims);
return op;
};
@@ -873,7 +873,8 @@ TEST(cpp_operations_test, slice4d)
vector<int> shape_data{5, 30, 40, 12};
vector<int> starts[] = {{0, 0, 0, 0}, {1, 1, 1, 1}, {1, 0, 1, 0}, {0, 1, 1, 0}};
vector<int> sizes[] = {
- {-1, -1, -1, -1}, {4, -1, 10, -1},
+ {-1, -1, -1, -1},
+ {4, -1, 10, -1},
};
for (auto st : starts)
{
diff --git a/compiler/nnc/unittests/support/CommandLineTest.cpp b/compiler/nnc/unittests/support/CommandLineTest.cpp
index 73f77aa20..993c4086f 100644
--- a/compiler/nnc/unittests/support/CommandLineTest.cpp
+++ b/compiler/nnc/unittests/support/CommandLineTest.cpp
@@ -69,8 +69,8 @@ Option<int32_t> NNegOpt(optname("-neg_val"),
// test option with default negative value
Option<int32_t>
- NDefaultNegOpt(optname("-default_neg_val"),
- overview("description of integer option with default negative value"), -33);
+ NDefaultNegOpt(optname("-default_neg_val"),
+ overview("description of integer option with default negative value"), -33);
// test option with positive values
Option<uint32_t> NPosOpt(optname("-pos_val"),
overview("description of integer option with positive value"), 1,
@@ -124,28 +124,28 @@ TEST(SUPPORT_NNC, verify_cl_options)
{
// create command line
const char *argv[] = {
- "CLTest", // program name
- // string options
- "-m", "multiopt_value", // second name for option with several names
- "--single", "single_value", // option with single name
- "-several_separators:SOME_VALUE1,SOME_VALUE2", // test option with several separators
- "--one_separarot=AAA_VALUE", // test option whit one separator
- "-default_val_opt", // test option with default value
- "--optional_opt", "/home/guest/tmp", // test optional option
- "-valid_opt", "value2", // test options with defined values
- // integer options
- "-neg_val", "-42", // test negative value for integer option
- "-default_neg_val", // test integer option with default value
- "-pos_val", "33", // test positive value for integer option
- // char options
- "-char-opt", "b", "-dash_opt", "-",
- // bool options
- "-bool_opt=false", "-bool-opt2",
- // vector of strings options
- "-vec_opt1", "1", "c", "222", "ABC", "857", "-vec_opt2", "--vec_opt_with_vals", "abc", "123",
- "xxx", "abc", "xxx",
- // grouped options
- "-group_opt1", "-group_opt2", "abc", "-group_opt3", "11", nullptr};
+ "CLTest", // program name
+ // string options
+ "-m", "multiopt_value", // second name for option with several names
+ "--single", "single_value", // option with single name
+ "-several_separators:SOME_VALUE1,SOME_VALUE2", // test option with several separators
+ "--one_separarot=AAA_VALUE", // test option whit one separator
+ "-default_val_opt", // test option with default value
+ "--optional_opt", "/home/guest/tmp", // test optional option
+ "-valid_opt", "value2", // test options with defined values
+ // integer options
+ "-neg_val", "-42", // test negative value for integer option
+ "-default_neg_val", // test integer option with default value
+ "-pos_val", "33", // test positive value for integer option
+ // char options
+ "-char-opt", "b", "-dash_opt", "-",
+ // bool options
+ "-bool_opt=false", "-bool-opt2",
+ // vector of strings options
+ "-vec_opt1", "1", "c", "222", "ABC", "857", "-vec_opt2", "--vec_opt_with_vals", "abc", "123",
+ "xxx", "abc", "xxx",
+ // grouped options
+ "-group_opt1", "-group_opt2", "abc", "-group_opt3", "11", nullptr};
int argc = (sizeof(argv) / sizeof(argv[0])) - 1;
// It must be failed if option is not passed and other options are in the same group
diff --git a/compiler/nnc/unittests/transformations/Switcher.cpp b/compiler/nnc/unittests/transformations/Switcher.cpp
index 049ac44cd..2f4793369 100644
--- a/compiler/nnc/unittests/transformations/Switcher.cpp
+++ b/compiler/nnc/unittests/transformations/Switcher.cpp
@@ -88,7 +88,7 @@ TEST(TRANSFORMATIONS, Switcher_DWConv2D_NHWC2NCHW)
attributes.padding_before = {67, 123};
attributes.padding_after = {32, 356};
auto *dw_conv =
- g.create<mir::ops::DepthwiseConv2DOp>(input->getOutput(0), kernel->getOutput(0), attributes);
+ g.create<mir::ops::DepthwiseConv2DOp>(input->getOutput(0), kernel->getOutput(0), attributes);
auto *output = g.create<mir::ops::OutputOp>(dw_conv->getOutput(0));
@@ -138,7 +138,7 @@ TEST(TRANSFORMATIONS, Switcher_DeConv2D_NHWC2NCHW)
attributes.padding_before = {31, 72};
attributes.padding_after = {32, 71};
auto *deconv =
- g.create<mir::ops::DeConv2DOp>(input->getOutput(0), kernel->getOutput(0), attributes);
+ g.create<mir::ops::DeConv2DOp>(input->getOutput(0), kernel->getOutput(0), attributes);
auto *output = g.create<mir::ops::OutputOp>(deconv->getOutput(0));