summaryrefslogtreecommitdiff
path: root/compiler/mir
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/mir')
-rw-r--r--compiler/mir/include/mir/Graph.h4
-rw-r--r--compiler/mir/include/mir/Quantization.h2
-rw-r--r--compiler/mir/include/mir/ShapeRange.h2
-rw-r--r--compiler/mir/include/mir/TensorType.h2
-rw-r--r--compiler/mir/include/mir/ops/AvgPool2DOp.h2
-rw-r--r--compiler/mir/include/mir/ops/ConcatOp.h2
-rw-r--r--compiler/mir/include/mir/ops/Conv2DOp.h4
-rw-r--r--compiler/mir/include/mir/ops/Deconv2DOp.h4
-rw-r--r--compiler/mir/include/mir/ops/DepthwiseConv2DOp.h4
-rw-r--r--compiler/mir/include/mir/ops/FullyConnectedOp.h4
-rw-r--r--compiler/mir/include/mir/ops/GatherOp.h2
-rw-r--r--compiler/mir/include/mir/ops/MaxPool2DOp.h2
-rw-r--r--compiler/mir/include/mir/ops/PadOp.h2
-rw-r--r--compiler/mir/include/mir/ops/ReduceMeanOp.h2
-rw-r--r--compiler/mir/include/mir/ops/ReduceOp.h2
-rw-r--r--compiler/mir/include/mir/ops/ResizeOp.h4
-rw-r--r--compiler/mir/include/mir/ops/SliceOp.h2
-rw-r--r--compiler/mir/include/mir/ops/SqueezeOp.h2
-rw-r--r--compiler/mir/src/Graph.cpp13
-rw-r--r--compiler/mir/src/Operation.cpp2
-rw-r--r--compiler/mir/src/Shape.cpp4
-rw-r--r--compiler/mir/src/TensorVariant.cpp8
-rw-r--r--compiler/mir/src/mir_caffe2_importer/caffe2_importer.cpp42
-rw-r--r--compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.cpp21
-rw-r--r--compiler/mir/src/mir_caffe_importer/caffe_importer.cpp120
-rw-r--r--compiler/mir/src/mir_caffe_importer/caffe_op_creator.cpp6
-rw-r--r--compiler/mir/src/mir_onnx_importer/AttributeHelpers.h4
-rw-r--r--compiler/mir/src/mir_onnx_importer/CMakeLists.txt4
-rw-r--r--compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.cpp2
-rw-r--r--compiler/mir/src/mir_onnx_importer/ONNXHelpers.cpp6
-rw-r--r--compiler/mir/src/mir_onnx_importer/ONNXImporterImpl.cpp6
-rw-r--r--compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.cpp4
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/AveragePool.cpp2
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.cpp2
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Conv.cpp2
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.cpp20
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/MaxPool.cpp2
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/ReduceMean.cpp2
-rw-r--r--compiler/mir/src/mir_onnx_importer/Op/Upsample.cpp20
-rw-r--r--compiler/mir/src/mir_tflite_importer/CMakeLists.txt2
-rw-r--r--compiler/mir/src/mir_tflite_importer/tflite_importer.cpp76
-rw-r--r--compiler/mir/src/mir_tflite_importer/tflite_op_creator.cpp8
-rw-r--r--compiler/mir/src/ops/AvgPool2DOp.cpp2
-rw-r--r--compiler/mir/src/ops/Conv2DOp.cpp2
-rw-r--r--compiler/mir/src/ops/DeConv2DOp.cpp8
-rw-r--r--compiler/mir/src/ops/DepthwiseConv2DOp.cpp2
-rw-r--r--compiler/mir/src/ops/MaxPool2DOp.cpp2
-rw-r--r--compiler/mir/src/ops/PadOp.cpp2
-rw-r--r--compiler/mir/src/ops/TransposeOp.cpp4
-rw-r--r--compiler/mir/unittests/ShapeInference.cpp4
-rw-r--r--compiler/mir/unittests/ShapeRange.cpp4
51 files changed, 235 insertions, 221 deletions
diff --git a/compiler/mir/include/mir/Graph.h b/compiler/mir/include/mir/Graph.h
index bf94cfb14..37bfdb361 100644
--- a/compiler/mir/include/mir/Graph.h
+++ b/compiler/mir/include/mir/Graph.h
@@ -103,6 +103,10 @@ private:
/**
* @brief Returns nodes of the graph sorted topologically.
+ * @note Sorting order priority
+ * 1) Graph input node (input index order)
+ * 2) Constant node (unordered - cannot predict order)
+ * 3) Ready node (unordered - cannot predict order)
*/
std::vector<Operation *> getSortedNodes(Graph *graph);
diff --git a/compiler/mir/include/mir/Quantization.h b/compiler/mir/include/mir/Quantization.h
index d266ee00d..901915a74 100644
--- a/compiler/mir/include/mir/Quantization.h
+++ b/compiler/mir/include/mir/Quantization.h
@@ -26,7 +26,7 @@ public:
AffineQuantization() = default;
AffineQuantization(float scale, int zero_point)
- : _scale(scale), _zero_point(zero_point), _empty(false)
+ : _scale(scale), _zero_point(zero_point), _empty(false)
{
}
diff --git a/compiler/mir/include/mir/ShapeRange.h b/compiler/mir/include/mir/ShapeRange.h
index a450bf090..70b29715f 100644
--- a/compiler/mir/include/mir/ShapeRange.h
+++ b/compiler/mir/include/mir/ShapeRange.h
@@ -26,7 +26,7 @@ namespace mir
{
class ShapeIter
- : public std::iterator<std::forward_iterator_tag, Index, std::size_t, Index *, Index &>
+ : public std::iterator<std::forward_iterator_tag, Index, std::size_t, Index *, Index &>
{
public:
ShapeIter &operator++()
diff --git a/compiler/mir/include/mir/TensorType.h b/compiler/mir/include/mir/TensorType.h
index 98797d687..b94a26eeb 100644
--- a/compiler/mir/include/mir/TensorType.h
+++ b/compiler/mir/include/mir/TensorType.h
@@ -34,7 +34,7 @@ public:
}
TensorType(DataType element_type, const Shape &shape, const AffineQuantization &quant)
- : _element_type(element_type), _shape(shape), _quantization(quant)
+ : _element_type(element_type), _shape(shape), _quantization(quant)
{
}
diff --git a/compiler/mir/include/mir/ops/AvgPool2DOp.h b/compiler/mir/include/mir/ops/AvgPool2DOp.h
index 47fe058ee..37fb66437 100644
--- a/compiler/mir/include/mir/ops/AvgPool2DOp.h
+++ b/compiler/mir/include/mir/ops/AvgPool2DOp.h
@@ -32,7 +32,7 @@ class AvgPool2DOp : public Operation
{
public:
AvgPool2DOp(Output *arg, const AvgPool2DOpAttributes &attributes)
- : Operation(Type::avgPool2D, {arg}), _attributes(attributes)
+ : Operation(Type::avgPool2D, {arg}), _attributes(attributes)
{
inferOutputTypes();
}
diff --git a/compiler/mir/include/mir/ops/ConcatOp.h b/compiler/mir/include/mir/ops/ConcatOp.h
index 4f46d4449..d1f9142fa 100644
--- a/compiler/mir/include/mir/ops/ConcatOp.h
+++ b/compiler/mir/include/mir/ops/ConcatOp.h
@@ -31,7 +31,7 @@ class ConcatOp : public Operation
{
public:
ConcatOp(const std::vector<Output *> &args, int32_t axis)
- : Operation(Type::concat, args), _axis(axis)
+ : Operation(Type::concat, args), _axis(axis)
{
inferOutputTypes();
}
diff --git a/compiler/mir/include/mir/ops/Conv2DOp.h b/compiler/mir/include/mir/ops/Conv2DOp.h
index ec818dae5..f8590a947 100644
--- a/compiler/mir/include/mir/ops/Conv2DOp.h
+++ b/compiler/mir/include/mir/ops/Conv2DOp.h
@@ -30,13 +30,13 @@ class Conv2DOp : public Operation
{
public:
Conv2DOp(Output *input, Output *kernel, const Conv2DOpAttributes &attributes)
- : Operation(Type::conv2D, {input, kernel}), _attributes(attributes)
+ : Operation(Type::conv2D, {input, kernel}), _attributes(attributes)
{
inferOutputTypes();
}
Conv2DOp(Output *input, Output *kernel, Output *bias, const Conv2DOpAttributes &attributes)
- : Operation(Type::conv2D, {input, kernel, bias}), _attributes(attributes)
+ : Operation(Type::conv2D, {input, kernel, bias}), _attributes(attributes)
{
inferOutputTypes();
}
diff --git a/compiler/mir/include/mir/ops/Deconv2DOp.h b/compiler/mir/include/mir/ops/Deconv2DOp.h
index a7b548028..9565eeb37 100644
--- a/compiler/mir/include/mir/ops/Deconv2DOp.h
+++ b/compiler/mir/include/mir/ops/Deconv2DOp.h
@@ -33,14 +33,14 @@ class DeConv2DOp : public Operation
{
public:
DeConv2DOp(Output *input, Output *kernel, const Deconv2DOpAttributes &attributes)
- : Operation(Type::deConv2D, {input, kernel}), _attributes(attributes)
+ : Operation(Type::deConv2D, {input, kernel}), _attributes(attributes)
{
inferOutputTypes();
}
DeConv2DOp(Output *input, Output *kernel, const Deconv2DOpAttributes &attributes,
const Shape &output_shape)
- : Operation(Type::deConv2D, {input, kernel}), _attributes(attributes)
+ : Operation(Type::deConv2D, {input, kernel}), _attributes(attributes)
{
assert(input->getElementType() == kernel->getElementType());
setOutputType(0, {input->getElementType(), output_shape});
diff --git a/compiler/mir/include/mir/ops/DepthwiseConv2DOp.h b/compiler/mir/include/mir/ops/DepthwiseConv2DOp.h
index 347b8e94f..558d60a4a 100644
--- a/compiler/mir/include/mir/ops/DepthwiseConv2DOp.h
+++ b/compiler/mir/include/mir/ops/DepthwiseConv2DOp.h
@@ -30,14 +30,14 @@ class DepthwiseConv2DOp : public Operation
{
public:
DepthwiseConv2DOp(Output *input, Output *kernel, const Conv2DOpAttributes &attributes)
- : Operation(Type::depthwiseConv, {input, kernel}), _attributes(attributes)
+ : Operation(Type::depthwiseConv, {input, kernel}), _attributes(attributes)
{
inferOutputTypes();
}
DepthwiseConv2DOp(Output *input, Output *kernel, Output *bias,
const Conv2DOpAttributes &attributes)
- : Operation(Type::depthwiseConv, {input, kernel, bias}), _attributes(attributes)
+ : Operation(Type::depthwiseConv, {input, kernel, bias}), _attributes(attributes)
{
inferOutputTypes();
}
diff --git a/compiler/mir/include/mir/ops/FullyConnectedOp.h b/compiler/mir/include/mir/ops/FullyConnectedOp.h
index 589c42df9..f937df539 100644
--- a/compiler/mir/include/mir/ops/FullyConnectedOp.h
+++ b/compiler/mir/include/mir/ops/FullyConnectedOp.h
@@ -29,13 +29,13 @@ class FullyConnectedOp : public Operation
{
public:
FullyConnectedOp(Output *input, Output *weights)
- : Operation(Type::fullyConnected, {input, weights})
+ : Operation(Type::fullyConnected, {input, weights})
{
inferOutputTypes();
}
FullyConnectedOp(Output *input, Output *weights, Output *bias)
- : Operation(Type::fullyConnected, {input, weights, bias})
+ : Operation(Type::fullyConnected, {input, weights, bias})
{
inferOutputTypes();
}
diff --git a/compiler/mir/include/mir/ops/GatherOp.h b/compiler/mir/include/mir/ops/GatherOp.h
index 899c9f169..58ea04074 100644
--- a/compiler/mir/include/mir/ops/GatherOp.h
+++ b/compiler/mir/include/mir/ops/GatherOp.h
@@ -33,7 +33,7 @@ class GatherOp : public Operation
{
public:
GatherOp(Output *data, Output *indices, int32_t axis)
- : Operation(Type::gather, {data, indices}), _axis(axis)
+ : Operation(Type::gather, {data, indices}), _axis(axis)
{
inferOutputTypes();
}
diff --git a/compiler/mir/include/mir/ops/MaxPool2DOp.h b/compiler/mir/include/mir/ops/MaxPool2DOp.h
index 7c5df4a53..4345cfc18 100644
--- a/compiler/mir/include/mir/ops/MaxPool2DOp.h
+++ b/compiler/mir/include/mir/ops/MaxPool2DOp.h
@@ -32,7 +32,7 @@ class MaxPool2DOp : public Operation
{
public:
MaxPool2DOp(Output *arg, const MaxPool2DOpAttributes &attributes)
- : Operation(Type::maxPool2D, {arg}), _attributes(attributes)
+ : Operation(Type::maxPool2D, {arg}), _attributes(attributes)
{
inferOutputTypes();
}
diff --git a/compiler/mir/include/mir/ops/PadOp.h b/compiler/mir/include/mir/ops/PadOp.h
index 76453acec..d229a97bd 100644
--- a/compiler/mir/include/mir/ops/PadOp.h
+++ b/compiler/mir/include/mir/ops/PadOp.h
@@ -29,7 +29,7 @@ class PadOp : public Operation
{
public:
PadOp(Output *arg, const PadOpAttributes &attributes)
- : Operation(Type::pad, {arg}), _attributes(attributes)
+ : Operation(Type::pad, {arg}), _attributes(attributes)
{
assert(_attributes.padding_before.size() == _attributes.padding_after.size());
inferOutputTypes();
diff --git a/compiler/mir/include/mir/ops/ReduceMeanOp.h b/compiler/mir/include/mir/ops/ReduceMeanOp.h
index add47ac75..5759b845e 100644
--- a/compiler/mir/include/mir/ops/ReduceMeanOp.h
+++ b/compiler/mir/include/mir/ops/ReduceMeanOp.h
@@ -29,7 +29,7 @@ class ReduceMeanOp : public ReduceOp
{
public:
ReduceMeanOp(Output *arg, const std::vector<int> &reduction_dims, bool keep_dims)
- : ReduceOp(Type::reduceMean, arg, reduction_dims, keep_dims)
+ : ReduceOp(Type::reduceMean, arg, reduction_dims, keep_dims)
{
}
diff --git a/compiler/mir/include/mir/ops/ReduceOp.h b/compiler/mir/include/mir/ops/ReduceOp.h
index 0f46a4596..5204a0903 100644
--- a/compiler/mir/include/mir/ops/ReduceOp.h
+++ b/compiler/mir/include/mir/ops/ReduceOp.h
@@ -29,7 +29,7 @@ class ReduceOp : public Operation
{
protected:
ReduceOp(Type type, Output *arg, const std::vector<int> &reduction_dims, bool keep_dims)
- : Operation(type, {arg}), _reduction_dims(reduction_dims), _keep_dims(keep_dims)
+ : Operation(type, {arg}), _reduction_dims(reduction_dims), _keep_dims(keep_dims)
{
inferOutputTypes();
}
diff --git a/compiler/mir/include/mir/ops/ResizeOp.h b/compiler/mir/include/mir/ops/ResizeOp.h
index 51e1b0b76..62743e396 100644
--- a/compiler/mir/include/mir/ops/ResizeOp.h
+++ b/compiler/mir/include/mir/ops/ResizeOp.h
@@ -40,7 +40,7 @@ public:
};
ResizeOp(Output *arg, ResizeMethod mode, const std::vector<float> &scales)
- : Operation(Type::resizeIm, {arg}), _mode(mode), _scales(scales)
+ : Operation(Type::resizeIm, {arg}), _mode(mode), _scales(scales)
{
// Infer output shape based on given scales.
auto &input_shape = getInputShape(0);
@@ -61,7 +61,7 @@ public:
}
ResizeOp(Output *arg, ResizeMethod mode, const Shape &output_shape)
- : Operation(Type::resizeIm, {arg}), _mode(mode)
+ : Operation(Type::resizeIm, {arg}), _mode(mode)
{
// Calculate scales based on given shape.
auto &input_shape = getInputShape(0);
diff --git a/compiler/mir/include/mir/ops/SliceOp.h b/compiler/mir/include/mir/ops/SliceOp.h
index 6370de4fa..1627d4b82 100644
--- a/compiler/mir/include/mir/ops/SliceOp.h
+++ b/compiler/mir/include/mir/ops/SliceOp.h
@@ -28,7 +28,7 @@ class SliceOp : public Operation
{
public:
SliceOp(Output *arg, const Shape &starts, const Shape &sizes)
- : Operation(Type::slice, {arg}), _starts(starts), _sizes(sizes)
+ : Operation(Type::slice, {arg}), _starts(starts), _sizes(sizes)
{
inferOutputTypes();
}
diff --git a/compiler/mir/include/mir/ops/SqueezeOp.h b/compiler/mir/include/mir/ops/SqueezeOp.h
index 8ef2a78bb..735b7d86d 100644
--- a/compiler/mir/include/mir/ops/SqueezeOp.h
+++ b/compiler/mir/include/mir/ops/SqueezeOp.h
@@ -29,7 +29,7 @@ class SqueezeOp : public Operation
{
public:
SqueezeOp(Output *arg, const std::vector<std::int32_t> &dims_to_squeeze)
- : Operation(Type::squeeze, {arg}), _dims_to_squeeze(dims_to_squeeze)
+ : Operation(Type::squeeze, {arg}), _dims_to_squeeze(dims_to_squeeze)
{
// Infer output shape.
inferOutputTypes();
diff --git a/compiler/mir/src/Graph.cpp b/compiler/mir/src/Graph.cpp
index 0eccdac2b..05d6dc9bd 100644
--- a/compiler/mir/src/Graph.cpp
+++ b/compiler/mir/src/Graph.cpp
@@ -44,9 +44,16 @@ std::vector<Operation *> getSortedNodes(Graph *graph)
std::deque<Operation *> ready_nodes;
std::unordered_map<Operation *, std::size_t> num_visited_input_edges;
+ // Use input vector first to maintain correct input order
+ for (Operation *op : graph->getInputs())
+ {
+ ready_nodes.push_back(op);
+ }
+
for (Operation *op : graph->getNodes())
{
- if (op->getNumInputs() == 0)
+ // Skip already pushed input node
+ if ((op->getNumInputs() == 0) && (op->getType() != Operation::Type::input))
{
ready_nodes.push_back(op);
}
@@ -123,11 +130,11 @@ void Graph::removeNode(Operation *op)
if (op->getType() == Operation::Type::input)
_inputs.erase(
- std::remove(_inputs.begin(), _inputs.end(), op)); // NOLINT(bugprone-inaccurate-erase)
+ std::remove(_inputs.begin(), _inputs.end(), op)); // NOLINT(bugprone-inaccurate-erase)
if (op->getType() == Operation::Type::output)
_outputs.erase(
- std::remove(_outputs.begin(), _outputs.end(), op)); // NOLINT(bugprone-inaccurate-erase)
+ std::remove(_outputs.begin(), _outputs.end(), op)); // NOLINT(bugprone-inaccurate-erase)
_ops.erase(op);
delete op;
diff --git a/compiler/mir/src/Operation.cpp b/compiler/mir/src/Operation.cpp
index 6f72acbf6..9ba395f94 100644
--- a/compiler/mir/src/Operation.cpp
+++ b/compiler/mir/src/Operation.cpp
@@ -40,7 +40,7 @@ void Operation::Output::replaceAllUsesWith(mir::Operation::Output *new_def)
}
Operation::Operation(Type type, const std::vector<Output *> &inputs, std::size_t num_outputs)
- : _type(type)
+ : _type(type)
{
for (std::size_t i = 0; i < inputs.size(); ++i)
{
diff --git a/compiler/mir/src/Shape.cpp b/compiler/mir/src/Shape.cpp
index 825420cd6..06dae0c54 100644
--- a/compiler/mir/src/Shape.cpp
+++ b/compiler/mir/src/Shape.cpp
@@ -48,9 +48,9 @@ Shape broadcastShapes(const Shape &lhs_shape, const Shape &rhs_shape)
for (int i = 0; i < num_dims; ++i)
{
const std::int32_t lhs_dim =
- (i >= num_dims - lhs_shape.rank()) ? lhs_shape.dim(i - (num_dims - lhs_shape.rank())) : 1;
+ (i >= num_dims - lhs_shape.rank()) ? lhs_shape.dim(i - (num_dims - lhs_shape.rank())) : 1;
const std::int32_t rhs_dim =
- (i >= num_dims - rhs_shape.rank()) ? rhs_shape.dim(i - (num_dims - rhs_shape.rank())) : 1;
+ (i >= num_dims - rhs_shape.rank()) ? rhs_shape.dim(i - (num_dims - rhs_shape.rank())) : 1;
if (lhs_dim == 1)
{
result_shape.dim(i) = rhs_dim;
diff --git a/compiler/mir/src/TensorVariant.cpp b/compiler/mir/src/TensorVariant.cpp
index 9e57dbaf0..516c0df73 100644
--- a/compiler/mir/src/TensorVariant.cpp
+++ b/compiler/mir/src/TensorVariant.cpp
@@ -35,7 +35,7 @@ TensorVariant::TensorVariant(const TensorType &type) : _type(type), _strides(typ
}
TensorVariant::TensorVariant(DataType element_type, const Shape &shape)
- : TensorVariant(TensorType(element_type, shape))
+ : TensorVariant(TensorType(element_type, shape))
{
}
@@ -46,7 +46,7 @@ TensorVariant::TensorVariant(const TensorType &type, const void *data) : TensorV
}
TensorVariant::TensorVariant(DataType element_type, const Shape &shape, const void *data)
- : TensorVariant(TensorType(element_type, shape), data)
+ : TensorVariant(TensorType(element_type, shape), data)
{
}
@@ -57,8 +57,8 @@ TensorVariant::TensorVariant(DataType element_type, const Shape &shape, const vo
* @param shape shape to broadcast to
*/
TensorVariant::TensorVariant(const TensorVariant &t_old, const Shape &shape)
- : _type(t_old.getType().getElementType(), shape), _data(t_old._data),
- _strides(static_cast<size_t>(shape.rank())), _element_size(t_old._element_size)
+ : _type(t_old.getType().getElementType(), shape), _data(t_old._data),
+ _strides(static_cast<size_t>(shape.rank())), _element_size(t_old._element_size)
{
int axis_old = t_old.getShape().rank() - 1;
for (int d = shape.rank() - 1; d >= 0; d--)
diff --git a/compiler/mir/src/mir_caffe2_importer/caffe2_importer.cpp b/compiler/mir/src/mir_caffe2_importer/caffe2_importer.cpp
index 812fcc5cc..abecfc88a 100644
--- a/compiler/mir/src/mir_caffe2_importer/caffe2_importer.cpp
+++ b/compiler/mir/src/mir_caffe2_importer/caffe2_importer.cpp
@@ -99,7 +99,7 @@ using mir::Shape;
Caffe2Importer::Caffe2Importer(std::string predict_net, std::string init_net,
const std::vector<std::vector<int>> &input_shapes)
- : _predictNet(std::move(predict_net)), _initNet(std::move(init_net))
+ : _predictNet(std::move(predict_net)), _initNet(std::move(init_net))
{
for (auto &shape : input_shapes)
_inputShapes.emplace_back(shape);
@@ -308,27 +308,27 @@ void Caffe2Importer::setGraphOutputs()
}
const std::map<std::string, SupportedCaffe2OpType> Caffe2Importer::_operatorTypes = {
- {"Add", SupportedCaffe2OpType::add},
- {"AveragePool", SupportedCaffe2OpType::averagePool},
- {"Conv", SupportedCaffe2OpType::conv},
- {"Concat", SupportedCaffe2OpType::concat},
- {"ConstantFill", SupportedCaffe2OpType::constantFill},
- {"Dropout", SupportedCaffe2OpType::dropout},
- {"FC", SupportedCaffe2OpType::FC},
- {"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill},
- {"MaxPool", SupportedCaffe2OpType::maxPool},
- {"Mul", SupportedCaffe2OpType::mul},
- {"Relu", SupportedCaffe2OpType::relu},
- {"ResizeNearest", SupportedCaffe2OpType::resizeNearest},
- {"Sigmoid", SupportedCaffe2OpType::sigmoid},
- {"Softmax", SupportedCaffe2OpType::softmax},
- {"SpatialBN", SupportedCaffe2OpType::spatialBN},
- {"Sum", SupportedCaffe2OpType::sum},
- {"Clip", SupportedCaffe2OpType::clip},
- {"Reshape", SupportedCaffe2OpType::reshape},
- {"GivenTensorInt64Fill", SupportedCaffe2OpType::givenTensorInt64Fill},
+ {"Add", SupportedCaffe2OpType::add},
+ {"AveragePool", SupportedCaffe2OpType::averagePool},
+ {"Conv", SupportedCaffe2OpType::conv},
+ {"Concat", SupportedCaffe2OpType::concat},
+ {"ConstantFill", SupportedCaffe2OpType::constantFill},
+ {"Dropout", SupportedCaffe2OpType::dropout},
+ {"FC", SupportedCaffe2OpType::FC},
+ {"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill},
+ {"MaxPool", SupportedCaffe2OpType::maxPool},
+ {"Mul", SupportedCaffe2OpType::mul},
+ {"Relu", SupportedCaffe2OpType::relu},
+ {"ResizeNearest", SupportedCaffe2OpType::resizeNearest},
+ {"Sigmoid", SupportedCaffe2OpType::sigmoid},
+ {"Softmax", SupportedCaffe2OpType::softmax},
+ {"SpatialBN", SupportedCaffe2OpType::spatialBN},
+ {"Sum", SupportedCaffe2OpType::sum},
+ {"Clip", SupportedCaffe2OpType::clip},
+ {"Reshape", SupportedCaffe2OpType::reshape},
+ {"GivenTensorInt64Fill", SupportedCaffe2OpType::givenTensorInt64Fill},
};
-}
+} // namespace
namespace mir_caffe2
{
diff --git a/compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.cpp b/compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.cpp
index 3390f4482..de0762dfa 100644
--- a/compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.cpp
+++ b/compiler/mir/src/mir_caffe2_importer/caffe2_op_creator.cpp
@@ -125,7 +125,7 @@ static std::vector<std::int32_t> getWindowSize(const ::caffe2::OperatorDef &op,
{
int is_global_pooling = getSingleArgument(op, "global_pooling", 0);
bool has_custom_kernel_size =
- hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
+ hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
bool has_custom_kernels_size = hasArgument(op.arg(), "kernels");
int kernel_h(0), kernel_w(0);
@@ -186,14 +186,13 @@ static void checkConvLikeOp(const ::caffe2::OperatorDef &op)
if (has_custom_pad && hasArgument(op.arg(), "pad"))
throw std::runtime_error("Custom pad can't be combined with overall pad");
- if (has_custom_pad &&
- !(hasArgument(op.arg(), "pad_l") && hasArgument(op.arg(), "pad_r") &&
- hasArgument(op.arg(), "pad_t") && hasArgument(op.arg(), "pad_b")))
+ if (has_custom_pad && !(hasArgument(op.arg(), "pad_l") && hasArgument(op.arg(), "pad_r") &&
+ hasArgument(op.arg(), "pad_t") && hasArgument(op.arg(), "pad_b")))
throw std::runtime_error("If one custom pad specified - all custom pads must be specified");
// Kernel size
bool has_custom_kernel_size =
- hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
+ hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
if (has_custom_kernel_size && hasArgument(op.arg(), "kernel"))
throw std::runtime_error("Custom kernel size can't be combined with overall kernel size");
@@ -201,7 +200,7 @@ static void checkConvLikeOp(const ::caffe2::OperatorDef &op)
if (has_custom_kernel_size &&
!(hasArgument(op.arg(), "kernel_h") && hasArgument(op.arg(), "kernel_w")))
throw std::runtime_error(
- "If one custom kernel size specified - all custom kernel sizes must be specified");
+ "If one custom kernel size specified - all custom kernel sizes must be specified");
}
static mir::TensorVariant createTensor(const OperatorDef &op)
@@ -356,7 +355,7 @@ Caffe2OpCreator::convertFC(const std::vector<mir::Operation::Output *> &inputs,
auto reshape = createOp<ops::ReshapeOp>(inputs[0], shape)->getOutput(0);
auto weights =
- createOp<ops::TransposeOp>(inputs[1], std::vector<std::size_t>{1, 0})->getOutput(0);
+ createOp<ops::TransposeOp>(inputs[1], std::vector<std::size_t>{1, 0})->getOutput(0);
auto result = createOp<ops::FullyConnectedOp>(reshape, weights)->getOutput(0);
result = createOp<ops::AddOp>(result, inputs[2])->getOutput(0);
@@ -420,8 +419,8 @@ Caffe2OpCreator::convertResizeNearest(const std::vector<mir::Operation::Output *
scales[2] = getSingleArgument(op, "height_scale", 1.0f);
scales[3] = getSingleArgument(op, "width_scale", 1.0f);
auto result =
- createOp<ops::ResizeOp>(inputs[0], ops::ResizeOp::ResizeMethod::nearestNeighbor, scales)
- ->getOutput(0);
+ createOp<ops::ResizeOp>(inputs[0], ops::ResizeOp::ResizeMethod::nearestNeighbor, scales)
+ ->getOutput(0);
return {result};
}
@@ -450,7 +449,7 @@ Caffe2OpCreator::convertSpatialBN(const std::vector<mir::Operation::Output *> &i
// Sanity checks
if (op.input_size() != 5)
throw std::runtime_error(
- "SpatialBN must have exactly 5 inputs ('sums' and 'sumsq' are not supported yet)");
+ "SpatialBN must have exactly 5 inputs ('sums' and 'sumsq' are not supported yet)");
if (getSingleArgument(op, "is_test", 1) != 1)
throw std::runtime_error("SpatialBN: only test mode supported");
@@ -462,7 +461,7 @@ Caffe2OpCreator::convertSpatialBN(const std::vector<mir::Operation::Output *> &i
auto var_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[4]->getNode());
if (scale_op == nullptr || bias_op == nullptr || mean_op == nullptr || var_op == nullptr)
throw std::runtime_error(
- "SpatialBN: non-constant 'scale', 'bias', 'mean' and 'var' inputs are not supported yet.");
+ "SpatialBN: non-constant 'scale', 'bias', 'mean' and 'var' inputs are not supported yet.");
const auto &scale_tensor = scale_op->getValue();
const auto &bias_tensor = bias_op->getValue();
diff --git a/compiler/mir/src/mir_caffe_importer/caffe_importer.cpp b/compiler/mir/src/mir_caffe_importer/caffe_importer.cpp
index 49f13fbd8..c74658299 100644
--- a/compiler/mir/src/mir_caffe_importer/caffe_importer.cpp
+++ b/compiler/mir/src/mir_caffe_importer/caffe_importer.cpp
@@ -357,66 +357,66 @@ void CaffeImporter::setGraphOutputs(mir::Graph *graph)
}
const std::map<std::string, CaffeOpType> CaffeImporter::_operatorTypes = {
- {"AbsVal", CaffeOpType::absVal},
- {"Accuracy", CaffeOpType::accuracy},
- {"ArgMax", CaffeOpType::argMax},
- {"BatchNorm", CaffeOpType::batchNorm},
- {"BatchReindex", CaffeOpType::batchReindex},
- {"Bias", CaffeOpType::bias},
- {"BNLL", CaffeOpType::BNLL},
- {"Clip", CaffeOpType::clip},
- {"Concat", CaffeOpType::concat},
- {"ContrastiveLoss", CaffeOpType::contrastiveLoss},
- {"Convolution", CaffeOpType::convolution},
- {"Crop", CaffeOpType::crop},
- {"Data", CaffeOpType::data},
- {"Deconvolution", CaffeOpType::deconvolution},
- {"Dropout", CaffeOpType::dropout},
- {"DummyData", CaffeOpType::dummyData},
- {"Eltwise", CaffeOpType::eltwise},
- {"ELU", CaffeOpType::ELU},
- {"Embed", CaffeOpType::embed},
- {"EuclidianLoss", CaffeOpType::euclidianLoss},
- {"Exp", CaffeOpType::exp},
- {"Filter", CaffeOpType::filter},
- {"Flatten", CaffeOpType::flatten},
- {"HDF5Data", CaffeOpType::HDF5Data},
- {"HDF5Output", CaffeOpType::HDF5Output},
- {"HingeLoss", CaffeOpType::hingeLoss},
- {"Im2Col", CaffeOpType::im2Col},
- {"ImageData", CaffeOpType::imageData},
- {"InfogainLoss", CaffeOpType::infogainLoss},
- {"InnerProduct", CaffeOpType::innerProduct},
- {"Input", CaffeOpType::input},
- {"Log", CaffeOpType::log},
- {"LRN", CaffeOpType::LRN},
- {"LSTM", CaffeOpType::LSTM},
- {"MemoryData", CaffeOpType::memoryData},
- {"MultinomialLogisticLoss", CaffeOpType::multinomialLogisticLoss},
- {"MVN", CaffeOpType::MVN},
- {"Parameter", CaffeOpType::parameter},
- {"Pooling", CaffeOpType::pooling},
- {"Power", CaffeOpType::power},
- {"PReLU", CaffeOpType::PReLU},
- {"Python", CaffeOpType::python},
- {"Recurrent", CaffeOpType::recurrent},
- {"Reduction", CaffeOpType::reduction},
- {"ReLU", CaffeOpType::ReLU},
- {"Reshape", CaffeOpType::reshape},
- {"RNN", CaffeOpType::RNN},
- {"Scale", CaffeOpType::scale},
- {"SigmoidCrossEntropyLoss", CaffeOpType::sigmoidCrossEntropyLoss},
- {"Sigmoid", CaffeOpType::sigmoid},
- {"Silence", CaffeOpType::silence},
- {"Softmax", CaffeOpType::softmax},
- {"SoftmaxWithLoss", CaffeOpType::softmaxWithLoss},
- {"SPP", CaffeOpType::SPP},
- {"Split", CaffeOpType::split},
- {"Slice", CaffeOpType::slice},
- {"TanH", CaffeOpType::tanh},
- {"Threshold", CaffeOpType::threshold},
- {"Tile", CaffeOpType::tile},
- {"WindowData", CaffeOpType::windowData}};
+ {"AbsVal", CaffeOpType::absVal},
+ {"Accuracy", CaffeOpType::accuracy},
+ {"ArgMax", CaffeOpType::argMax},
+ {"BatchNorm", CaffeOpType::batchNorm},
+ {"BatchReindex", CaffeOpType::batchReindex},
+ {"Bias", CaffeOpType::bias},
+ {"BNLL", CaffeOpType::BNLL},
+ {"Clip", CaffeOpType::clip},
+ {"Concat", CaffeOpType::concat},
+ {"ContrastiveLoss", CaffeOpType::contrastiveLoss},
+ {"Convolution", CaffeOpType::convolution},
+ {"Crop", CaffeOpType::crop},
+ {"Data", CaffeOpType::data},
+ {"Deconvolution", CaffeOpType::deconvolution},
+ {"Dropout", CaffeOpType::dropout},
+ {"DummyData", CaffeOpType::dummyData},
+ {"Eltwise", CaffeOpType::eltwise},
+ {"ELU", CaffeOpType::ELU},
+ {"Embed", CaffeOpType::embed},
+ {"EuclidianLoss", CaffeOpType::euclidianLoss},
+ {"Exp", CaffeOpType::exp},
+ {"Filter", CaffeOpType::filter},
+ {"Flatten", CaffeOpType::flatten},
+ {"HDF5Data", CaffeOpType::HDF5Data},
+ {"HDF5Output", CaffeOpType::HDF5Output},
+ {"HingeLoss", CaffeOpType::hingeLoss},
+ {"Im2Col", CaffeOpType::im2Col},
+ {"ImageData", CaffeOpType::imageData},
+ {"InfogainLoss", CaffeOpType::infogainLoss},
+ {"InnerProduct", CaffeOpType::innerProduct},
+ {"Input", CaffeOpType::input},
+ {"Log", CaffeOpType::log},
+ {"LRN", CaffeOpType::LRN},
+ {"LSTM", CaffeOpType::LSTM},
+ {"MemoryData", CaffeOpType::memoryData},
+ {"MultinomialLogisticLoss", CaffeOpType::multinomialLogisticLoss},
+ {"MVN", CaffeOpType::MVN},
+ {"Parameter", CaffeOpType::parameter},
+ {"Pooling", CaffeOpType::pooling},
+ {"Power", CaffeOpType::power},
+ {"PReLU", CaffeOpType::PReLU},
+ {"Python", CaffeOpType::python},
+ {"Recurrent", CaffeOpType::recurrent},
+ {"Reduction", CaffeOpType::reduction},
+ {"ReLU", CaffeOpType::ReLU},
+ {"Reshape", CaffeOpType::reshape},
+ {"RNN", CaffeOpType::RNN},
+ {"Scale", CaffeOpType::scale},
+ {"SigmoidCrossEntropyLoss", CaffeOpType::sigmoidCrossEntropyLoss},
+ {"Sigmoid", CaffeOpType::sigmoid},
+ {"Silence", CaffeOpType::silence},
+ {"Softmax", CaffeOpType::softmax},
+ {"SoftmaxWithLoss", CaffeOpType::softmaxWithLoss},
+ {"SPP", CaffeOpType::SPP},
+ {"Split", CaffeOpType::split},
+ {"Slice", CaffeOpType::slice},
+ {"TanH", CaffeOpType::tanh},
+ {"Threshold", CaffeOpType::threshold},
+ {"Tile", CaffeOpType::tile},
+ {"WindowData", CaffeOpType::windowData}};
} // namespace
std::unique_ptr<mir::Graph> importModelFromBinaryFile(const std::string &filename)
diff --git a/compiler/mir/src/mir_caffe_importer/caffe_op_creator.cpp b/compiler/mir/src/mir_caffe_importer/caffe_op_creator.cpp
index 37edc69c4..a2c881b82 100644
--- a/compiler/mir/src/mir_caffe_importer/caffe_op_creator.cpp
+++ b/compiler/mir/src/mir_caffe_importer/caffe_op_creator.cpp
@@ -374,7 +374,7 @@ static void convertPoolingParam(const caffe::PoolingParameter &params,
{
// Assuming NCHW format.
const std::int32_t padded_input =
- input_shape.dim(2 + i) + attributes.padding_before[i] + attributes.padding_after[i];
+ input_shape.dim(2 + i) + attributes.padding_before[i] + attributes.padding_after[i];
if ((padded_input - attributes.window[i]) % attributes.strides[i] != 0)
++attributes.padding_after[i];
}
@@ -449,7 +449,7 @@ CaffeOpCreator::convertSoftmax(const caffe::LayerParameter &layer,
auto input = createOp<ops::TransposeOp>(inputs[0], std::vector<std::size_t>{0, 2, 3, 1});
auto softmax = createOp<ops::SoftmaxOp>(input->getOutput(0), axis);
auto result =
- createOp<ops::TransposeOp>(softmax->getOutput(0), std::vector<std::size_t>{0, 3, 1, 2});
+ createOp<ops::TransposeOp>(softmax->getOutput(0), std::vector<std::size_t>{0, 3, 1, 2});
return {result->getOutput(0)};
}
@@ -823,7 +823,7 @@ CaffeOpCreator::convertLSTM(const caffe::LayerParameter &layer,
c_t = createOp<ops::AddOp>(createOp<ops::MulOp>(c_cont_t, f_t)->getOutput(0),
createOp<ops::MulOp>(i_t, g_t)->getOutput(0))
- ->getOutput(0);
+ ->getOutput(0);
h_t = createOp<ops::MulOp>(createOp<ops::TanhOp>(c_t)->getOutput(0), o_t)->getOutput(0);
h_slices[t] = h_t;
diff --git a/compiler/mir/src/mir_onnx_importer/AttributeHelpers.h b/compiler/mir/src/mir_onnx_importer/AttributeHelpers.h
index 9a93b5b7d..ac1c3cfad 100644
--- a/compiler/mir/src/mir_onnx_importer/AttributeHelpers.h
+++ b/compiler/mir/src/mir_onnx_importer/AttributeHelpers.h
@@ -76,8 +76,8 @@ inline const onnx::AttributeProto *findAttribute(const onnx::NodeProto &node,
{
const auto &attributes = node.attribute();
const auto it = std::find_if(
- attributes.cbegin(), attributes.cend(),
- [&name](const onnx::AttributeProto &attribute) { return attribute.name() == name; });
+ attributes.cbegin(), attributes.cend(),
+ [&name](const onnx::AttributeProto &attribute) { return attribute.name() == name; });
if (it == attributes.cend())
return nullptr;
return &*it;
diff --git a/compiler/mir/src/mir_onnx_importer/CMakeLists.txt b/compiler/mir/src/mir_onnx_importer/CMakeLists.txt
index e6eb13b93..04c22055e 100644
--- a/compiler/mir/src/mir_onnx_importer/CMakeLists.txt
+++ b/compiler/mir/src/mir_onnx_importer/CMakeLists.txt
@@ -112,6 +112,10 @@ target_include_directories(mir_onnx_importer PUBLIC ../../include/mir_onnx_impor
target_include_directories(mir_onnx_importer PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_link_libraries(mir_onnx_importer PUBLIC mir mir_onnx_proto PRIVATE mir_interpreter nncc_common)
+if(NOT ENABLE_TEST)
+ return()
+endif(NOT ENABLE_TEST)
+
nnas_find_package(GTest REQUIRED)
file(GLOB_RECURSE TEST_SOURCES "*.test.cpp")
diff --git a/compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.cpp b/compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.cpp
index d98e6deae..2091968d8 100644
--- a/compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.cpp
+++ b/compiler/mir/src/mir_onnx_importer/ConvPoolHelpers.cpp
@@ -55,7 +55,7 @@ void inferAutoPadding(const std::string &pad_type, const mir::Shape &input_shape
// Assuming input has NCHW format.
const std::int32_t residual = input_shape.dim(2 + i) % strides[i];
const std::int32_t total_pad = std::max(
- INT32_C(0), residual == 0 ? eff_window_size - strides[i] : eff_window_size - residual);
+ INT32_C(0), residual == 0 ? eff_window_size - strides[i] : eff_window_size - residual);
if (pad_type == "SAME_UPPER")
{
padding_before[i] = total_pad / 2;
diff --git a/compiler/mir/src/mir_onnx_importer/ONNXHelpers.cpp b/compiler/mir/src/mir_onnx_importer/ONNXHelpers.cpp
index f3a9d182d..77656cf48 100644
--- a/compiler/mir/src/mir_onnx_importer/ONNXHelpers.cpp
+++ b/compiler/mir/src/mir_onnx_importer/ONNXHelpers.cpp
@@ -166,9 +166,9 @@ mir::Operation *foldConstants(mir::Graph *graph, mir::Operation *op)
}
bool is_foldable =
- std::all_of(op->getInputs().begin(), op->getInputs().end(), [](mir::Operation::Output *out) {
- return out->getNode()->getType() == mir::Operation::Type::constant;
- });
+ std::all_of(op->getInputs().begin(), op->getInputs().end(), [](mir::Operation::Output *out) {
+ return out->getNode()->getType() == mir::Operation::Type::constant;
+ });
if (!is_foldable)
return op;
diff --git a/compiler/mir/src/mir_onnx_importer/ONNXImporterImpl.cpp b/compiler/mir/src/mir_onnx_importer/ONNXImporterImpl.cpp
index 8b996244f..6379b6c87 100644
--- a/compiler/mir/src/mir_onnx_importer/ONNXImporterImpl.cpp
+++ b/compiler/mir/src/mir_onnx_importer/ONNXImporterImpl.cpp
@@ -134,7 +134,7 @@ void ONNXImporterImpl::collectUnsupportedOps()
auto opset = _modelCtx->getDomainOpsetVersion(onnx_node.domain());
NodeConverterRegistry::ConverterFunc converter =
- NodeConverterRegistry::getInstance().lookup(op_type, opset);
+ NodeConverterRegistry::getInstance().lookup(op_type, opset);
if (converter == nullptr)
problems_op_set.emplace(op_type, opset);
@@ -176,7 +176,7 @@ void ONNXImporterImpl::createGraphInputs()
}
auto elem_type = onnxDataTypeToMirDataType(
- (onnx::TensorProto_DataType)input.type().tensor_type().elem_type());
+ (onnx::TensorProto_DataType)input.type().tensor_type().elem_type());
mir::TensorType type{elem_type, shape};
auto *op = _graph->create<mir::ops::InputOp>(type);
_converterCtx->setOutput(input.name(), op->getOutput(0));
@@ -199,7 +199,7 @@ std::unique_ptr<mir::Graph> ONNXImporterImpl::createIR()
auto opset = _modelCtx->getDomainOpsetVersion(onnx_node.domain());
// Get converter
NodeConverterRegistry::ConverterFunc converter =
- NodeConverterRegistry::getInstance().lookup(op_type, opset);
+ NodeConverterRegistry::getInstance().lookup(op_type, opset);
assert(converter != nullptr);
converter(onnx_node, _converterCtx.get());
}
diff --git a/compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.cpp b/compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.cpp
index a11b18e89..573b41468 100644
--- a/compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.cpp
+++ b/compiler/mir/src/mir_onnx_importer/ONNXNodeConverterRegistry.cpp
@@ -117,8 +117,8 @@ NodeConverterRegistry::ConverterFunc NodeConverterRegistry::lookup(const std::st
const VersionMap &conv_map = it->second;
auto res = std::lower_bound(
- conv_map.crbegin(), conv_map.crend(), opset,
- [](const VersionMap::value_type &pair, int64_t opset) { return pair.first > opset; });
+ conv_map.crbegin(), conv_map.crend(), opset,
+ [](const VersionMap::value_type &pair, int64_t opset) { return pair.first > opset; });
if (res == conv_map.crend())
{
diff --git a/compiler/mir/src/mir_onnx_importer/Op/AveragePool.cpp b/compiler/mir/src/mir_onnx_importer/Op/AveragePool.cpp
index 503feffc8..1ee136ea6 100644
--- a/compiler/mir/src/mir_onnx_importer/Op/AveragePool.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/AveragePool.cpp
@@ -40,7 +40,7 @@ void convertAveragePoolV1(const onnx::NodeProto &onnx_node, ConverterContext *co
constexpr int num_spatial_dims = 2;
const auto strides =
- getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
+ getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
if (strides.size() != num_spatial_dims)
throw std::runtime_error("AveragePool: attribute 'strides' has incorrect size.");
diff --git a/compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.cpp b/compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.cpp
index 8a6d8cc51..c743ee9e0 100644
--- a/compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/BatchNormalization.cpp
@@ -81,7 +81,7 @@ void convertBatchNormalizationV9(const onnx::NodeProto &onnx_node, ConverterCont
if (scale_op == nullptr || mean_op == nullptr || var_op == nullptr)
throw std::runtime_error(
- "BatchNormalization: only constant 'scale', 'mean' and 'variance' inputs are supported.");
+ "BatchNormalization: only constant 'scale', 'mean' and 'variance' inputs are supported.");
mir::Tensor<float> scale_accessor(scale_op->getValue());
mir::Tensor<float> mean_accessor(mean_op->getValue());
diff --git a/compiler/mir/src/mir_onnx_importer/Op/Conv.cpp b/compiler/mir/src/mir_onnx_importer/Op/Conv.cpp
index 7dc6ce818..7d78826a6 100644
--- a/compiler/mir/src/mir_onnx_importer/Op/Conv.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Conv.cpp
@@ -139,7 +139,7 @@ void convertConvV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
{
auto bias = inputs[2];
bias = createOp<mir::ops::ReshapeOp>(graph, bias, mir::Shape{1, bias->getShape().dim(0), 1, 1})
- ->getOutput(0);
+ ->getOutput(0);
result = createOp<mir::ops::AddOp>(graph, result, bias)->getOutput(0);
}
diff --git a/compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.cpp b/compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.cpp
index 3078a1959..ea0b6fa5e 100644
--- a/compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/ConvTranspose.cpp
@@ -49,19 +49,19 @@ void convertConvTransposeV1(const onnx::NodeProto &onnx_node, ConverterContext *
constexpr int num_spatial_dims = 2;
const auto dilations =
- getAttributeValue(onnx_node, "dilations", std::vector<std::int32_t>(num_spatial_dims, 1));
+ getAttributeValue(onnx_node, "dilations", std::vector<std::int32_t>(num_spatial_dims, 1));
if (dilations.size() != num_spatial_dims)
throw std::runtime_error("ConvTranspose: attribute 'dilations' has incorrect size.");
if (!std::all_of(dilations.cbegin(), dilations.cend(), [](std::int32_t x) { return x == 1; }))
throw std::runtime_error("ConvTranspose: attribute 'dilations' has unsupported value.");
const auto strides =
- getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
+ getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
if (strides.size() != num_spatial_dims)
throw std::runtime_error("ConvTranspose: attribute 'strides' has incorrect size.");
- const auto output_padding = getAttributeValue(onnx_node, "output_padding",
- std::vector<std::int32_t>(num_spatial_dims, 0));
+ const auto output_padding =
+ getAttributeValue(onnx_node, "output_padding", std::vector<std::int32_t>(num_spatial_dims, 0));
if (output_padding.size() != num_spatial_dims)
throw std::runtime_error("ConvTranspose: attribute 'output_padding' has incorrect size.");
if (!std::all_of(output_padding.cbegin(), output_padding.cend(),
@@ -71,8 +71,8 @@ void convertConvTransposeV1(const onnx::NodeProto &onnx_node, ConverterContext *
// Assuming kernel has IOHW format.
assert(kernel->getShape().rank() == 4);
const auto kernel_size = getAttributeValue(
- onnx_node, "kernel_shape",
- std::vector<std::int32_t>{kernel->getShape().dim(2), kernel->getShape().dim(3)});
+ onnx_node, "kernel_shape",
+ std::vector<std::int32_t>{kernel->getShape().dim(2), kernel->getShape().dim(3)});
if (kernel_size.size() != num_spatial_dims)
throw std::runtime_error("ConvTranspose: attribute 'kernel_shape' has incorrect size.");
@@ -92,14 +92,14 @@ void convertConvTransposeV1(const onnx::NodeProto &onnx_node, ConverterContext *
attributes.strides = strides;
attributes.data_format = mir::DataFormat::NCHW;
attributes.padding_type = mir::ops::PaddingType::SameUpper;
- result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, attributes, output_shape)
- ->getOutput(0);
+ result =
+ createOp<mir::ops::DeConv2DOp>(graph, input, kernel, attributes, output_shape)->getOutput(0);
}
else
{
// TODO This code was not tested.
throw std::runtime_error(
- "ConvTranspose: absence of attribute 'output_shape' is not supported.");
+ "ConvTranspose: absence of attribute 'output_shape' is not supported.");
std::vector<std::int32_t> padding_before(num_spatial_dims, 0);
std::vector<std::int32_t> padding_after(num_spatial_dims, 0);
if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
@@ -128,7 +128,7 @@ void convertConvTransposeV1(const onnx::NodeProto &onnx_node, ConverterContext *
{
auto bias = inputs[2];
bias = createOp<mir::ops::ReshapeOp>(graph, bias, mir::Shape{1, bias->getShape().dim(0), 1, 1})
- ->getOutput(0);
+ ->getOutput(0);
result = createOp<mir::ops::AddOp>(graph, result, bias)->getOutput(0);
}
diff --git a/compiler/mir/src/mir_onnx_importer/Op/MaxPool.cpp b/compiler/mir/src/mir_onnx_importer/Op/MaxPool.cpp
index 53e6e1556..6c9ef6621 100644
--- a/compiler/mir/src/mir_onnx_importer/Op/MaxPool.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/MaxPool.cpp
@@ -40,7 +40,7 @@ void convertMaxPoolV1(const onnx::NodeProto &onnx_node, ConverterContext *contex
constexpr int num_spatial_dims = 2;
const auto strides =
- getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
+ getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
if (strides.size() != num_spatial_dims)
throw std::runtime_error("MaxPool: attribute 'strides' has incorrect size.");
diff --git a/compiler/mir/src/mir_onnx_importer/Op/ReduceMean.cpp b/compiler/mir/src/mir_onnx_importer/Op/ReduceMean.cpp
index ec43bffb4..9bfe16282 100644
--- a/compiler/mir/src/mir_onnx_importer/Op/ReduceMean.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/ReduceMean.cpp
@@ -52,7 +52,7 @@ void convertReduceMeanV1(const onnx::NodeProto &onnx_node, ConverterContext *con
mir::Graph *graph = context->getGraph();
auto result =
- createOp<mir::ops::ReduceMeanOp>(graph, inputs[0], reduce_dims, keep_dims)->getOutput(0);
+ createOp<mir::ops::ReduceMeanOp>(graph, inputs[0], reduce_dims, keep_dims)->getOutput(0);
context->setNodeOutputs(onnx_node, {result});
}
diff --git a/compiler/mir/src/mir_onnx_importer/Op/Upsample.cpp b/compiler/mir/src/mir_onnx_importer/Op/Upsample.cpp
index 346e22cc2..881ec89d3 100644
--- a/compiler/mir/src/mir_onnx_importer/Op/Upsample.cpp
+++ b/compiler/mir/src/mir_onnx_importer/Op/Upsample.cpp
@@ -52,9 +52,9 @@ void convertUpsampleV1(const onnx::NodeProto &onnx_node, ConverterContext *conte
scales_vector.at(3) = w_scale;
auto result =
- createOp<mir::ops::ResizeOp>(graph, inputs[0],
- mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
- ->getOutput(0);
+ createOp<mir::ops::ResizeOp>(graph, inputs[0],
+ mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0);
context->setNodeOutputs(onnx_node, {result});
}
@@ -74,7 +74,7 @@ void convertUpsampleV7(const onnx::NodeProto &onnx_node, ConverterContext *conte
if (scales_attr->floats_size() != inputs[0]->getShape().rank())
throw std::runtime_error(
- "Number of elements of scales should be the same as the rank of input");
+ "Number of elements of scales should be the same as the rank of input");
assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
std::vector<float> scales_vector(4);
@@ -85,9 +85,9 @@ void convertUpsampleV7(const onnx::NodeProto &onnx_node, ConverterContext *conte
scales_vector.at(3) = scales_attr->floats(3);
auto result =
- createOp<mir::ops::ResizeOp>(graph, inputs[0],
- mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
- ->getOutput(0);
+ createOp<mir::ops::ResizeOp>(graph, inputs[0],
+ mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0);
context->setNodeOutputs(onnx_node, {result});
}
@@ -117,9 +117,9 @@ void convertUpsampleV9(const onnx::NodeProto &onnx_node, ConverterContext *conte
scales_vector[i] = scales_tensor.atOffset(i);
auto result =
- createOp<mir::ops::ResizeOp>(graph, inputs[0],
- mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
- ->getOutput(0);
+ createOp<mir::ops::ResizeOp>(graph, inputs[0],
+ mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
+ ->getOutput(0);
context->setNodeOutputs(onnx_node, {result});
}
diff --git a/compiler/mir/src/mir_tflite_importer/CMakeLists.txt b/compiler/mir/src/mir_tflite_importer/CMakeLists.txt
index 952857c86..6c6c28a32 100644
--- a/compiler/mir/src/mir_tflite_importer/CMakeLists.txt
+++ b/compiler/mir/src/mir_tflite_importer/CMakeLists.txt
@@ -1,4 +1,4 @@
-nnas_find_package(FlatBuffers REQUIRED)
+nnas_find_package(FlatBuffers EXACT 2.0 REQUIRED)
if (NOT FlatBuffers_FOUND)
return()
diff --git a/compiler/mir/src/mir_tflite_importer/tflite_importer.cpp b/compiler/mir/src/mir_tflite_importer/tflite_importer.cpp
index 3f245d2d4..7b91bf0ba 100644
--- a/compiler/mir/src/mir_tflite_importer/tflite_importer.cpp
+++ b/compiler/mir/src/mir_tflite_importer/tflite_importer.cpp
@@ -105,37 +105,37 @@ void TfliteImporter::import()
}
static const std::set<tflite::BuiltinOperator> supportedOperators = {
- tflite::BuiltinOperator_ADD,
- tflite::BuiltinOperator_AVERAGE_POOL_2D,
- tflite::BuiltinOperator_CONCATENATION,
- tflite::BuiltinOperator_CONV_2D,
- tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
- tflite::BuiltinOperator_DIV,
- tflite::BuiltinOperator_FULLY_CONNECTED,
- tflite::BuiltinOperator_HARD_SWISH,
- tflite::BuiltinOperator_LEAKY_RELU,
- tflite::BuiltinOperator_LOGISTIC,
- tflite::BuiltinOperator_MAX_POOL_2D,
- tflite::BuiltinOperator_MAXIMUM,
- tflite::BuiltinOperator_MEAN,
- tflite::BuiltinOperator_MUL,
- tflite::BuiltinOperator_PAD,
- tflite::BuiltinOperator_RELU,
- tflite::BuiltinOperator_RELU6,
- tflite::BuiltinOperator_RESHAPE,
- tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
- tflite::BuiltinOperator_RSQRT,
- tflite::BuiltinOperator_SHAPE,
- tflite::BuiltinOperator_SLICE,
- tflite::BuiltinOperator_SOFTMAX,
- tflite::BuiltinOperator_SQRT,
- tflite::BuiltinOperator_SQUARED_DIFFERENCE,
- tflite::BuiltinOperator_SQUEEZE,
- tflite::BuiltinOperator_STRIDED_SLICE,
- tflite::BuiltinOperator_SUB,
- tflite::BuiltinOperator_TANH,
- tflite::BuiltinOperator_TRANSPOSE,
- tflite::BuiltinOperator_TRANSPOSE_CONV,
+ tflite::BuiltinOperator_ADD,
+ tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ tflite::BuiltinOperator_CONCATENATION,
+ tflite::BuiltinOperator_CONV_2D,
+ tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
+ tflite::BuiltinOperator_DIV,
+ tflite::BuiltinOperator_FULLY_CONNECTED,
+ tflite::BuiltinOperator_HARD_SWISH,
+ tflite::BuiltinOperator_LEAKY_RELU,
+ tflite::BuiltinOperator_LOGISTIC,
+ tflite::BuiltinOperator_MAX_POOL_2D,
+ tflite::BuiltinOperator_MAXIMUM,
+ tflite::BuiltinOperator_MEAN,
+ tflite::BuiltinOperator_MUL,
+ tflite::BuiltinOperator_PAD,
+ tflite::BuiltinOperator_RELU,
+ tflite::BuiltinOperator_RELU6,
+ tflite::BuiltinOperator_RESHAPE,
+ tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
+ tflite::BuiltinOperator_RSQRT,
+ tflite::BuiltinOperator_SHAPE,
+ tflite::BuiltinOperator_SLICE,
+ tflite::BuiltinOperator_SOFTMAX,
+ tflite::BuiltinOperator_SQRT,
+ tflite::BuiltinOperator_SQUARED_DIFFERENCE,
+ tflite::BuiltinOperator_SQUEEZE,
+ tflite::BuiltinOperator_STRIDED_SLICE,
+ tflite::BuiltinOperator_SUB,
+ tflite::BuiltinOperator_TANH,
+ tflite::BuiltinOperator_TRANSPOSE,
+ tflite::BuiltinOperator_TRANSPOSE_CONV,
};
void TfliteImporter::collectUnsupportedOps()
@@ -268,8 +268,8 @@ void TfliteImporter::walkOperator(const tflite::SubGraphT *subgraph, const tflit
outputs = _opCreator->convertConv2D(op->builtin_options.AsConv2DOptions(), inputs);
break;
case tflite::BuiltinOperator_DEPTHWISE_CONV_2D:
- outputs = _opCreator->convertDepthwiseConv2D(op->builtin_options.AsDepthwiseConv2DOptions(),
- inputs);
+ outputs =
+ _opCreator->convertDepthwiseConv2D(op->builtin_options.AsDepthwiseConv2DOptions(), inputs);
break;
case tflite::BuiltinOperator_MAX_POOL_2D:
outputs = _opCreator->convertMaxPool2D(op->builtin_options.AsPool2DOptions(), inputs);
@@ -279,21 +279,21 @@ void TfliteImporter::walkOperator(const tflite::SubGraphT *subgraph, const tflit
break;
case tflite::BuiltinOperator_CONCATENATION:
outputs =
- _opCreator->convertConcatenation(op->builtin_options.AsConcatenationOptions(), inputs);
+ _opCreator->convertConcatenation(op->builtin_options.AsConcatenationOptions(), inputs);
break;
case tflite::BuiltinOperator_RESHAPE:
outputs = _opCreator->convertReshape(op->builtin_options.AsReshapeOptions(), inputs);
break;
case tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
outputs = _opCreator->convertResizeNearestNeighbor(
- op->builtin_options.AsResizeNearestNeighborOptions(), inputs);
+ op->builtin_options.AsResizeNearestNeighborOptions(), inputs);
break;
case tflite::BuiltinOperator_MEAN:
outputs = _opCreator->convertMean(op->builtin_options.AsReducerOptions(), inputs);
break;
case tflite::BuiltinOperator_FULLY_CONNECTED:
outputs =
- _opCreator->convertFullyConnected(op->builtin_options.AsFullyConnectedOptions(), inputs);
+ _opCreator->convertFullyConnected(op->builtin_options.AsFullyConnectedOptions(), inputs);
break;
case tflite::BuiltinOperator_SOFTMAX:
outputs = _opCreator->convertSoftmax(op->builtin_options.AsSoftmaxOptions(), inputs);
@@ -333,7 +333,7 @@ void TfliteImporter::walkOperator(const tflite::SubGraphT *subgraph, const tflit
break;
case tflite::BuiltinOperator_TRANSPOSE_CONV:
outputs =
- _opCreator->convertTransposeConv(op->builtin_options.AsTransposeConvOptions(), inputs);
+ _opCreator->convertTransposeConv(op->builtin_options.AsTransposeConvOptions(), inputs);
break;
case tflite::BuiltinOperator_PAD:
outputs = _opCreator->convertPad(op->builtin_options.AsPadOptions(), inputs);
@@ -352,7 +352,7 @@ void TfliteImporter::walkOperator(const tflite::SubGraphT *subgraph, const tflit
break;
case tflite::BuiltinOperator_STRIDED_SLICE:
outputs =
- _opCreator->convertStridedSlice(op->builtin_options.AsStridedSliceOptions(), inputs);
+ _opCreator->convertStridedSlice(op->builtin_options.AsStridedSliceOptions(), inputs);
break;
case tflite::BuiltinOperator_LEAKY_RELU:
outputs = _opCreator->convertLeakyReLU(op->builtin_options.AsLeakyReluOptions(), inputs);
diff --git a/compiler/mir/src/mir_tflite_importer/tflite_op_creator.cpp b/compiler/mir/src/mir_tflite_importer/tflite_op_creator.cpp
index d9f98da55..58425e9a9 100644
--- a/compiler/mir/src/mir_tflite_importer/tflite_op_creator.cpp
+++ b/compiler/mir/src/mir_tflite_importer/tflite_op_creator.cpp
@@ -92,9 +92,9 @@ static void calculatePadding(mir::ops::PaddingType padding_type, const mir::Shap
{
// Assuming NHWC format.
const std::int32_t total_padding =
- (input_shape.dim(1 + i) % strides[i] == 0)
- ? std::max(0, window_size[i] - strides[i])
- : std::max(0, window_size[i] - input_shape.dim(1 + i) % strides[i]);
+ (input_shape.dim(1 + i) % strides[i] == 0)
+ ? std::max(0, window_size[i] - strides[i])
+ : std::max(0, window_size[i] - input_shape.dim(1 + i) % strides[i]);
padding_before[i] = total_padding / 2;
padding_after[i] = total_padding - padding_before[i];
}
@@ -332,7 +332,7 @@ TFLiteOpCreator::convertResizeNearestNeighbor(const tflite::ResizeNearestNeighbo
Shape res_shape{input_shape.dim(0), size_tensor.at(mir::Index{0}), size_tensor.at(mir::Index{1}),
input_shape.dim(3)};
auto result =
- createOp<ops::ResizeOp>(input, ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
+ createOp<ops::ResizeOp>(input, ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
return {result->getOutput(0)};
}
diff --git a/compiler/mir/src/ops/AvgPool2DOp.cpp b/compiler/mir/src/ops/AvgPool2DOp.cpp
index 52b67303f..945917208 100644
--- a/compiler/mir/src/ops/AvgPool2DOp.cpp
+++ b/compiler/mir/src/ops/AvgPool2DOp.cpp
@@ -50,7 +50,7 @@ void AvgPool2DOp::inferOutputTypes()
// (in_size - window_size + 1 + stride - 1) / stride =
// (in_size - window_size) / stride + 1
output_shape.dim(spatial_dim_index) =
- (padded_input - _attributes.window[i]) / _attributes.strides[i] + 1;
+ (padded_input - _attributes.window[i]) / _attributes.strides[i] + 1;
}
setOutputType(0, {getInput(0)->getElementType(), output_shape});
diff --git a/compiler/mir/src/ops/Conv2DOp.cpp b/compiler/mir/src/ops/Conv2DOp.cpp
index 1addc5734..1de73b62d 100644
--- a/compiler/mir/src/ops/Conv2DOp.cpp
+++ b/compiler/mir/src/ops/Conv2DOp.cpp
@@ -54,7 +54,7 @@ void Conv2DOp::inferOutputTypes()
// (in_size - kernel_size + 1 + stride - 1) / stride =
// (in_size - kernel_size) / stride + 1
output_shape.dim(spatial_dim_index) =
- (padded_input - kernel_shape.dim(1 + i)) / _attributes.strides[i] + 1;
+ (padded_input - kernel_shape.dim(1 + i)) / _attributes.strides[i] + 1;
}
auto dt = getInput(0)->getElementType();
diff --git a/compiler/mir/src/ops/DeConv2DOp.cpp b/compiler/mir/src/ops/DeConv2DOp.cpp
index 35b111bc0..08829d327 100644
--- a/compiler/mir/src/ops/DeConv2DOp.cpp
+++ b/compiler/mir/src/ops/DeConv2DOp.cpp
@@ -36,8 +36,8 @@ void DeConv2DOp::inferPaddings()
{
const int spatial_dim_index = getDataSpatialDimIndex(_attributes.data_format, i);
const std::int32_t total_padding =
- (input_shape.dim(spatial_dim_index) - 1) * _attributes.strides[i] + kernel_shape.dim(i) -
- output_shape.dim(spatial_dim_index);
+ (input_shape.dim(spatial_dim_index) - 1) * _attributes.strides[i] + kernel_shape.dim(i) -
+ output_shape.dim(spatial_dim_index);
switch (_attributes.padding_type)
{
@@ -85,8 +85,8 @@ void DeConv2DOp::inferOutputTypes()
{
const int spatial_dim_index = getDataSpatialDimIndex(_attributes.data_format, i);
output_shape.dim(spatial_dim_index) =
- (input_shape.dim(spatial_dim_index) - 1) * _attributes.strides[i] + kernel_shape.dim(i) -
- (_attributes.padding_before.at(i) + _attributes.padding_after.at(i));
+ (input_shape.dim(spatial_dim_index) - 1) * _attributes.strides[i] + kernel_shape.dim(i) -
+ (_attributes.padding_before.at(i) + _attributes.padding_after.at(i));
}
setOutputType(0, {getInput(0)->getElementType(), output_shape});
diff --git a/compiler/mir/src/ops/DepthwiseConv2DOp.cpp b/compiler/mir/src/ops/DepthwiseConv2DOp.cpp
index 0154bcd09..521d2eb49 100644
--- a/compiler/mir/src/ops/DepthwiseConv2DOp.cpp
+++ b/compiler/mir/src/ops/DepthwiseConv2DOp.cpp
@@ -50,7 +50,7 @@ void DepthwiseConv2DOp::inferOutputTypes()
// (in_size - kernel_size + 1 + stride - 1) / stride =
// (in_size - kernel_size) / stride + 1
output_shape.dim(spatial_dim_index) =
- (padded_input - kernel_shape.dim(i)) / _attributes.strides[i] + 1;
+ (padded_input - kernel_shape.dim(i)) / _attributes.strides[i] + 1;
}
setOutputType(0, {getInput(0)->getElementType(), output_shape});
diff --git a/compiler/mir/src/ops/MaxPool2DOp.cpp b/compiler/mir/src/ops/MaxPool2DOp.cpp
index 38e72424e..0cb3aa93c 100644
--- a/compiler/mir/src/ops/MaxPool2DOp.cpp
+++ b/compiler/mir/src/ops/MaxPool2DOp.cpp
@@ -50,7 +50,7 @@ void MaxPool2DOp::inferOutputTypes()
// (in_size - window_size + 1 + stride - 1) / stride =
// (in_size - window_size) / stride + 1
output_shape.dim(spatial_dim_index) =
- (padded_input - _attributes.window[i]) / _attributes.strides[i] + 1;
+ (padded_input - _attributes.window[i]) / _attributes.strides[i] + 1;
}
setOutputType(0, {getInput(0)->getElementType(), output_shape});
diff --git a/compiler/mir/src/ops/PadOp.cpp b/compiler/mir/src/ops/PadOp.cpp
index 465856d92..38feaccdc 100644
--- a/compiler/mir/src/ops/PadOp.cpp
+++ b/compiler/mir/src/ops/PadOp.cpp
@@ -30,7 +30,7 @@ void PadOp::inferOutputTypes()
for (int32_t dim = 0; dim < num_dims; ++dim)
{
out_shape.dim(dim) =
- _attributes.padding_before[dim] + input_shape.dim(dim) + _attributes.padding_after[dim];
+ _attributes.padding_before[dim] + input_shape.dim(dim) + _attributes.padding_after[dim];
}
setOutputType(0, {getInput(0)->getElementType(), out_shape});
diff --git a/compiler/mir/src/ops/TransposeOp.cpp b/compiler/mir/src/ops/TransposeOp.cpp
index 92282e17d..d04cdb4f2 100644
--- a/compiler/mir/src/ops/TransposeOp.cpp
+++ b/compiler/mir/src/ops/TransposeOp.cpp
@@ -22,7 +22,7 @@ namespace ops
{
TransposeOp::TransposeOp(Output *arg, const std::vector<std::size_t> &axis_order)
- : Operation(Type::transpose, {arg}), _axis_order(axis_order)
+ : Operation(Type::transpose, {arg}), _axis_order(axis_order)
{
assert(_axis_order.size() == static_cast<std::size_t>(getInputShape(0).rank()));
inferOutputTypes();
@@ -34,7 +34,7 @@ void TransposeOp::inferOutputTypes()
Shape output_shape(input_shape.rank());
for (std::size_t i = 0; i < _axis_order.size(); ++i)
output_shape.dim(static_cast<std::int64_t>(i)) =
- input_shape.dim(static_cast<int32_t>(_axis_order.at(i)));
+ input_shape.dim(static_cast<int32_t>(_axis_order.at(i)));
setOutputType(0, {getInput(0)->getElementType(), output_shape});
}
diff --git a/compiler/mir/unittests/ShapeInference.cpp b/compiler/mir/unittests/ShapeInference.cpp
index bae4ec5e2..c902b1e12 100644
--- a/compiler/mir/unittests/ShapeInference.cpp
+++ b/compiler/mir/unittests/ShapeInference.cpp
@@ -80,8 +80,8 @@ TEST(ShapeInferenceTest, ResizeWithScale)
auto input = g.create<ops::InputOp>(input_type);
auto op =
- g.create<ops::ResizeOp>(input->getOutput(0), ops::ResizeOp::ResizeMethod::nearestNeighbor,
- std::vector<float>{1, 6, 2, 1});
+ g.create<ops::ResizeOp>(input->getOutput(0), ops::ResizeOp::ResizeMethod::nearestNeighbor,
+ std::vector<float>{1, 6, 2, 1});
ASSERT_EQ(result_shape, op->getOutputShape(0));
}
diff --git a/compiler/mir/unittests/ShapeRange.cpp b/compiler/mir/unittests/ShapeRange.cpp
index 3b32d0c61..91b1be744 100644
--- a/compiler/mir/unittests/ShapeRange.cpp
+++ b/compiler/mir/unittests/ShapeRange.cpp
@@ -29,7 +29,7 @@ struct ParamType
template <typename... Args>
explicit ParamType(int32_t actual_len, Args &&... args)
- : actual_length(actual_len), shape({static_cast<int32_t>(args)...})
+ : actual_length(actual_len), shape({static_cast<int32_t>(args)...})
{
}
};
@@ -56,7 +56,7 @@ TEST_P(ShapeIteratorTest, ElementCount)
std::vector<ParamType> test_data{ParamType{6, 1, 2, 3}, ParamType{16, 2, 2, 4},
ParamType{1, 1, 1, 1, 1, 1}, ParamType{5, 5, 1, 1, 1, 1, 1}};
-INSTANTIATE_TEST_CASE_P(SimpleInput, ShapeIteratorTest, ::testing::ValuesIn(test_data));
+INSTANTIATE_TEST_SUITE_P(SimpleInput, ShapeIteratorTest, ::testing::ValuesIn(test_data));
TEST(ShapeRange, Contains)
{