summaryrefslogtreecommitdiff
path: root/compiler/nnc/unittests/soft_backend/CPPOperations.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/nnc/unittests/soft_backend/CPPOperations.cpp')
-rw-r--r--compiler/nnc/unittests/soft_backend/CPPOperations.cpp1007
1 files changed, 1007 insertions, 0 deletions
diff --git a/compiler/nnc/unittests/soft_backend/CPPOperations.cpp b/compiler/nnc/unittests/soft_backend/CPPOperations.cpp
new file mode 100644
index 000000000..508ee954d
--- /dev/null
+++ b/compiler/nnc/unittests/soft_backend/CPPOperations.cpp
@@ -0,0 +1,1007 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <memory>
+#include <vector>
+#include <functional>
+
+// artifact part
+#include "CommonData.def"
+
+#include "code_snippets/eigen.def"
+
+#include "code_snippets/cpp_header_types.def"
+#include "code_snippets/cpp_common_funcs.def"
+
+#include "code_snippets/cpp_broadcast.def"
+#include "code_snippets/cpp_capped_relu.def"
+#include "code_snippets/cpp_concat.def"
+#include "code_snippets/cpp_conv.def"
+#include "code_snippets/cpp_conv_transpose.def"
+#include "code_snippets/cpp_depthwise_conv.def"
+#include "code_snippets/cpp_elementwise.def"
+#include "code_snippets/cpp_elu.def"
+#include "code_snippets/cpp_fully_connected.def"
+#include "code_snippets/cpp_gather.def"
+#include "code_snippets/cpp_sigmoid.def"
+#include "code_snippets/cpp_pad.def"
+#include "code_snippets/cpp_pool.def"
+#include "code_snippets/cpp_reduce.def"
+#include "code_snippets/cpp_relu.def"
+#include "code_snippets/cpp_resize.def"
+#include "code_snippets/cpp_softmax.def"
+#include "code_snippets/cpp_sqrt.def"
+#include "code_snippets/cpp_slice.def"
+#include "code_snippets/cpp_tanh.def"
+#include "code_snippets/cpp_transpose.def"
+
+#include "code_snippets/cpp_operations.def"
+#include "code_snippets/cpp_leaky_relu.def"
+
+// soft backend part
+
+#include "ModelAnalyzer.h"
+#include "SBSerializer.h"
+
+// operations part
+#include "mir/ops/AbsOp.h"
+#include "mir/ops/AddOp.h"
+#include "mir/ops/AvgPool2DOp.h"
+#include "mir/ops/BroadcastOp.h"
+#include "mir/ops/CappedReluOp.h"
+#include "mir/ops/ConcatOp.h"
+#include "mir/ops/ConstantOp.h"
+#include "mir/ops/Conv2DOp.h"
+#include "mir/ops/Deconv2DOp.h"
+#include "mir/ops/DepthwiseConv2DOp.h"
+#include "mir/ops/DivOp.h"
+#include "mir/ops/EluOp.h"
+#include "mir/ops/FullyConnectedOp.h"
+#include "mir/ops/LeakyReluOp.h"
+#include "mir/ops/MaxOp.h"
+#include "mir/ops/MaxPool2DOp.h"
+#include "mir/ops/MulOp.h"
+#include "mir/ops/PadOp.h"
+#include "mir/ops/ReduceMeanOp.h"
+#include "mir/ops/ReluOp.h"
+#include "mir/ops/ReshapeOp.h"
+#include "mir/ops/ResizeOp.h"
+#include "mir/ops/SigmoidOp.h"
+#include "mir/ops/SliceOp.h"
+#include "mir/ops/SoftmaxOp.h"
+#include "mir/ops/SqrtOp.h"
+#include "mir/ops/SubOp.h"
+#include "mir/ops/TanhOp.h"
+#include "mir/ops/TransposeOp.h"
+
+// various headers
+#include "mir/TensorVariant.h"
+#include "mir/Tensor.h"
+#include "mir/Graph.h"
+#include "mir/ShapeRange.h"
+
+#include "MirInterpreter.h"
+
+#include "gtest/gtest.h"
+
+using namespace std;
+
+using namespace nnc;
+
+namespace irOps = mir::ops;
+
+/*
+ This test suite operates with both artifact and NNC tensors:
+ nnc data has `n` literal in name
+ artifact data has 'a' literal in name
+ For example: nShape, aShape, nTensor, aTensor.
+
+ Artifact data types are: Tensor, Shape
+ NNC data types are: mir::TensorVariant, tensor::Shape, mir::Tensor<float>
+*/
+
+namespace
+{
+
+/**
+ * @brief Creates graph with one operation generated by opGen function and returns this operation
+ * node
+ */
+mir::Operation *
+fillGraph(mir::Graph &g,
+ const function<mir::Operation *(mir::Graph &g, vector<mir::Operation::Output *> &inputs)>
+ &op_gen,
+ const vector<unique_ptr<mir::TensorVariant>> &input_ntensors)
+{
+ // Create operation inputs.
+ vector<mir::Operation::Output *> inputs;
+ for (const unique_ptr<mir::TensorVariant> &ntensor : input_ntensors)
+ {
+ auto input = g.create<mir::ops::ConstantOp>(*ntensor)->getOutput(0);
+ inputs.push_back(input);
+ }
+
+ return op_gen(g, inputs);
+}
+
+/**
+ * @brief Fills NNC Shape object with data from src container
+ */
+void fillNShape(mir::Shape &nshape, const vector<int> &raw_shape_data)
+{
+ int shape_rank = raw_shape_data.size();
+ nshape.resize(shape_rank);
+ for (int i = 0; i < shape_rank; ++i)
+ nshape.dim(i) = raw_shape_data[i];
+}
+
+/**
+ * @brief Converts NNC Shape to artifact Shape
+ */
+void copyAShapeFromNShape(Shape &ashape, const mir::Shape &src)
+{
+ int shape_rank = src.rank();
+ ashape.setDims(shape_rank);
+ for (int i = 0; i < shape_rank; ++i)
+ ashape[i] = src.dim(i);
+}
+
+/**
+ * @brief Fills NNC and artifact Shape objects with data from rawShapeData
+ */
+void fillShapes(mir::Shape &nshape, Shape &ashape, const vector<int> &raw_shape_data)
+{
+ fillNShape(nshape, raw_shape_data);
+ copyAShapeFromNShape(ashape, nshape);
+}
+
+/**
+ * @brief Fills NNC tensor with some determined data
+ */
+void fillNTensor(mir::TensorVariant &dst, float start)
+{
+ float t = start;
+ mir::Tensor<float> wrapper(dst);
+ for (const mir::Index &idx : mir::ShapeRange(dst.getShape()))
+ {
+ wrapper.at(idx) = sin(t) * 2.0f;
+ t += 1.0f;
+ }
+}
+
+/**
+ * @brief Converts NNC mir::TensorVariant to artifact Tensor object
+ */
+void copyATensorFromNTensor(Tensor &dst, mir::TensorVariant &src)
+{
+ mir::Tensor<float> wrapper(src);
+ Index art_idx;
+ int rank = src.getShape().rank();
+ art_idx.setDims(rank);
+ for (mir::Index idx : mir::ShapeRange(src.getShape()))
+ {
+ for (int i = 0; i < rank; ++i)
+ art_idx[i] = idx.at(i);
+ dst.at(art_idx) = wrapper.at(idx);
+ }
+}
+
+/**
+ * @brief Fills NNC and artifact tensor objects with some determined data
+ */
+void fillTensors(unique_ptr<mir::TensorVariant> &ntensor, Tensor &atensor, const vector<int> &shape,
+ float start)
+{
+ Shape ashape;
+ mir::Shape nshape;
+ fillShapes(nshape, ashape, shape);
+ atensor.reshape(ashape);
+ ntensor.reset(new mir::TensorVariant(mir::DataType::FLOAT32, nshape));
+ fillNTensor(*ntensor, start);
+ copyATensorFromNTensor(atensor, *ntensor);
+}
+
+/**
+ * @brief Run interpreter to get reference output data
+ */
+mir::TensorVariant getReferenceTensor(mir::Graph &g, mir::Operation *op)
+{
+ mir_interpreter::MIRInterpreter interpreter;
+ g.accept(&interpreter);
+ assert(op->getNumOutputs() == 1);
+ return interpreter.getTensor(op->getOutput(0));
+}
+
+/**
+ * @brief Run selected operation, used to make code in tests more compact and fit getReferenceTensor
+ * format
+ */
+template <typename Operation, typename... Args> Tensor run(Operation op, const Args &... args)
+{
+ Tensor output;
+ op(output, args...);
+ return output;
+}
+
+/**
+ * @brief Compare floats using combined Units in the Last Place and epsilon approach
+ * @param a First number to compare
+ * @param b Second number to compare
+ * @param ulp Max tolerated number of units in the last place
+ * @param eps Max tolerated absolute difference
+ * @return true if diff of two numbers is less than 'eps' or ULP between arguments is less than
+ * 'ulp'
+ */
+bool areFloatsNear(float a, float b, int32_t ulp, float eps)
+{
+ assert(ulp < (1 << 23) && "this algorithm is not applicable for such large diffs");
+ assert(eps >= 0 && "epsilon should be positive number");
+ if (fabs(a - b) <= eps)
+ return true;
+ // since this point need to dind difference between numbers
+ // in terms of ULP
+ int32_t ai;
+ int32_t bi;
+ memcpy(&ai, &a, sizeof(float));
+ memcpy(&bi, &b, sizeof(float));
+ // compare mantissa of numbers
+ if (ai > bi)
+ return ai - bi <= ulp;
+ return bi - ai <= ulp;
+}
+
+/**
+ * @brief Compares nnc mir::TensorVariant and artifact Tensor objects
+ * @param ref_nnc_tensor Reference tensor that interpreter produced
+ * @param test_art_tensor Tensor that artifact operation computed
+ */
+void compareResults(const mir::TensorVariant &ref_nnc_tensor, const Tensor &test_art_tensor)
+{
+ assert(ref_nnc_tensor.getElementSize() == 4L &&
+ ref_nnc_tensor.getDataType() == mir::DataType::FLOAT32);
+
+ const mir::Shape &nnc_shape = ref_nnc_tensor.getShape();
+ const Shape &art_shape = test_art_tensor.getShape();
+
+ // check that reference and test shapes are equal
+ ASSERT_EQ(nnc_shape.rank(), art_shape.getDims());
+
+ int rank = nnc_shape.rank();
+ for (int i = 0; i < rank; ++i)
+ ASSERT_EQ(nnc_shape.dim(i), art_shape[i]);
+
+ // check that reference and test tensor contents are equal
+ Index artifact_idx;
+ artifact_idx.setDims(rank);
+ for (mir::Index nnc_idx : mir::ShapeRange(nnc_shape))
+ {
+ for (int i = 0; i < rank; ++i)
+ artifact_idx[i] = nnc_idx.at(i);
+ // Input and output data lies in range of [-10, 10],
+ // chosen epsilon lies near the edge of float type computational precision
+ float ref_data = mir::Tensor<float>(ref_nnc_tensor).at(nnc_idx);
+ float test_data = test_art_tensor.at(artifact_idx);
+ ASSERT_TRUE(areFloatsNear(ref_data, test_data, 32, 1e-5))
+ << "Tensor element " << nnc_idx << " diverged, reference: " << ref_data
+ << " test result: " << test_data;
+ }
+}
+
+/**
+ * @brief This function creates test graph, runs interpeter, specifies artifact operation and
+ * compares results
+ */
+template <typename TestFunc, typename... Args>
+void createAndRunTestGraph(
+ function<mir::Operation *(mir::Graph &, const std::vector<mir::Operation::Output *> &inputs)>
+ op_generator,
+ TestFunc artifactOperation, const vector<unique_ptr<mir::TensorVariant>> &input_ntensors,
+ Args &... input_atensors)
+{
+ mir::Graph g;
+ mir::Operation *actual_operation = fillGraph(g, op_generator, input_ntensors);
+
+ // serialize data for soft backend operation
+ vector<unique_ptr<sir::Action>> inference_sequence;
+ unique_ptr<sir::CallFunction> op_call(new sir::CallFunction);
+ op_call->mirOp = actual_operation;
+ inference_sequence.push_back(std::move(op_call));
+ Serializer serializer;
+ serializer.serialize(inference_sequence);
+ assert(static_cast<sir::CallFunction *>(inference_sequence.front().get())->paramStartOffset == 0);
+
+ mir::TensorVariant reference_output = getReferenceTensor(g, actual_operation);
+
+ Tensor test_output;
+ artifactOperation(test_output, serializer.getBuffer().data(), input_atensors...);
+
+ compareResults(reference_output, test_output);
+}
+} // namespace
+
+TEST(cpp_operations_test, capped_relu)
+{
+ // test prerequisites
+ // cap has this value to cut input numbers(they are in range [-1, 1])
+ float cap = 0.5f;
+ vector<int> shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+ auto op_generator = [cap](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::CappedReluOp>(inputs[0], cap);
+ };
+
+ createAndRunTestGraph(op_generator, cappedRelu, input_ntensors, input_atensor);
+}
+
+TEST(cpp_operations_test, concat)
+{
+ for (int num_dims = 1; num_dims <= 4; ++num_dims)
+ for (int axis = 0; axis < num_dims; ++axis)
+ {
+ // test prerequisites
+ vector<int> shape_data1{2, 3, 5, 7};
+ vector<int> shape_data2{2, 3, 5, 7};
+ shape_data1.resize(num_dims);
+ shape_data2.resize(num_dims);
+ // set different size for concatenating axis
+ shape_data2[axis] = 11;
+ vector<Tensor> input_atensors(2);
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f);
+ fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
+ auto op_generator = [axis](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::ConcatOp>(inputs, axis);
+ };
+
+ createAndRunTestGraph(op_generator, concat<Tensor, Tensor>, input_ntensors, input_atensors[0],
+ input_atensors[1]);
+ }
+}
+
+TEST(cpp_operations_test, addbc)
+{
+ for (int num_dims = 2; num_dims <= 4; ++num_dims)
+ {
+ // test prerequisites
+ vector<int> shape_data1{3, 44, 5, 1};
+ vector<int> shape_data2{3, 1, 5, 6};
+ shape_data1.resize(num_dims);
+ shape_data2.resize(num_dims);
+ vector<Tensor> input_atensors(2);
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f);
+ fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::AddOp>(inputs[0], inputs[1]);
+ };
+
+ createAndRunTestGraph(op_generator, ElementWise<Add>, input_ntensors, input_atensors[0],
+ input_atensors[1]);
+ }
+}
+
+TEST(cpp_operations_test, mulbc)
+{
+ for (int num_dims = 2; num_dims <= 4; ++num_dims)
+ {
+ // test prerequisites
+ vector<int> shape_data1{3, 22, 5, 1};
+ vector<int> shape_data2{3, 1, 5, 6};
+ shape_data1.resize(num_dims);
+ shape_data2.resize(num_dims);
+ vector<Tensor> input_atensors(2);
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f);
+ fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
+ auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::MulOp>(inputs[0], inputs[1]);
+ };
+
+ createAndRunTestGraph(opGenerator, ElementWise<Mul>, input_ntensors, input_atensors[0],
+ input_atensors[1]);
+ }
+}
+
+TEST(cpp_operations_test, divbc)
+{
+ for (int num_dims = 2; num_dims <= 4; ++num_dims)
+ {
+ // test prerequisites
+ vector<int> shape_data1{3, 22, 5, 1};
+ vector<int> shape_data2{3, 1, 5, 6};
+ shape_data1.resize(num_dims);
+ shape_data2.resize(num_dims);
+ vector<Tensor> input_atensors(2);
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 5.0f);
+ fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
+ auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::DivOp>(inputs[0], inputs[1]);
+ };
+
+ createAndRunTestGraph(opGenerator, ElementWise<Div>, input_ntensors, input_atensors[0],
+ input_atensors[1]);
+ }
+}
+
+TEST(cpp_operations_test, add)
+{
+ for (int num_dims = 2; num_dims <= 4; ++num_dims)
+ {
+ // test prerequisites
+ vector<int> shape_data{2, 3, 5, 7};
+ shape_data.resize(num_dims);
+ vector<Tensor> input_atensors(2);
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
+ fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::AddOp>(inputs[0], inputs[1]);
+ };
+
+ createAndRunTestGraph(op_generator, ElementWise<Add>, input_ntensors, input_atensors[0],
+ input_atensors[1]);
+ }
+}
+
+TEST(cpp_operations_test, sub)
+{
+ for (int num_dims = 2; num_dims <= 4; ++num_dims)
+ {
+ // test prerequisites
+ vector<int> shape_data{2, 3, 5, 7};
+ shape_data.resize(num_dims);
+ vector<Tensor> input_atensors(2);
+ vector<unique_ptr<mir::TensorVariant>> input_n_tensors(2);
+ fillTensors(input_n_tensors[0], input_atensors[0], shape_data, 1.0f);
+ fillTensors(input_n_tensors[1], input_atensors[1], shape_data, 2.0f);
+ auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::SubOp>(inputs[0], inputs[1]);
+ };
+
+ createAndRunTestGraph(opGenerator, ElementWise<Sub>, input_n_tensors, input_atensors[0],
+ input_atensors[1]);
+ }
+}
+
+TEST(cpp_operations_test, mul)
+{
+ for (int num_dims = 2; num_dims <= 4; ++num_dims)
+ {
+ // test prerequisites
+ vector<int> shape_data{2, 3, 5, 7};
+ shape_data.resize(num_dims);
+ vector<Tensor> input_atensors(2);
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
+ fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::MulOp>(inputs[0], inputs[1]);
+ };
+
+ createAndRunTestGraph(op_generator, ElementWise<Mul>, input_ntensors, input_atensors[0],
+ input_atensors[1]);
+ }
+}
+
+TEST(cpp_operations_test, max)
+{
+ for (int num_dims = 2; num_dims <= 4; ++num_dims)
+ {
+ // test prerequisites
+ vector<int> shape_data{2, 3, 5, 7};
+ shape_data.resize(num_dims);
+ vector<Tensor> input_atensors(2);
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
+ fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::MaxOp>(inputs[0], inputs[1]);
+ };
+
+ createAndRunTestGraph(op_generator, ElementWise<Max>, input_ntensors, input_atensors[0],
+ input_atensors[1]);
+ }
+}
+
+TEST(cpp_operations_test, convTransposed2d)
+{
+ // Iterate over kernel width, kernel height,
+ // input channels(input_c), output channels(output_c),
+ // stride width, stride height
+ // size 3 is chosen to cover all cases, where width bigger/smaller then height and equal/not equal
+ // to 1
+ using iT = int32_t;
+ Tensor temporary(Shape({1024 * 40}));
+ for (iT kernel_h = 2; kernel_h <= 4; ++kernel_h)
+ for (iT kernel_w = 2; kernel_w <= 4; ++kernel_w)
+ for (iT input_c = 1; input_c <= 3; ++input_c)
+ for (iT output_c = 1; output_c <= 3; ++output_c)
+ for (iT stride_h = 1; stride_h <= 3; ++stride_h)
+ for (iT stride_w = 1; stride_w <= 3; ++stride_w)
+ {
+ vector<int> input_shape_data{3, 9, 3, static_cast<int>(input_c)}; // NHWC
+ vector<int> kernel_shape_data{kernel_h, kernel_w, output_c, input_c};
+ vector<int32_t> strides{stride_h, stride_w};
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ Tensor input_atensor0;
+ Tensor input_atensor1;
+ fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
+ fillTensors(input_ntensors[1], input_atensor1, kernel_shape_data, 1.0f);
+ auto op_generator = [&strides](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ mir::Deconv2DOpAttributes attributes;
+ attributes.strides = strides;
+ return g.create<mir::ops::DeConv2DOp>(inputs[0], inputs[1], attributes);
+ };
+
+ createAndRunTestGraph(op_generator, convTransposed2d, input_ntensors, input_atensor0,
+ input_atensor1, temporary);
+ }
+}
+
+TEST(cpp_operations_test, conv2d)
+{
+ // Iterate over kernel width, kernel height,
+ // input channels(input_c), output channels(output_c),
+ // stride width, stride height
+ // size 3 is chosen to cover all cases, where width bigger/smaller then height and equal/not equal
+ // to 1
+ using iT = int32_t;
+ Tensor temporary(Shape({1024 * 20}));
+ for (iT kernel_h = 1; kernel_h <= 3; ++kernel_h)
+ for (iT kernel_w = 1; kernel_w <= 3; ++kernel_w)
+ for (iT input_c = 1; input_c <= 3; ++input_c)
+ for (iT output_c = 1; output_c <= 3; ++output_c)
+ for (iT stride_h = 1; stride_h <= 3; ++stride_h)
+ for (iT stride_w = 1; stride_w <= 3; ++stride_w)
+ {
+ vector<int> input_shape_data{3, 5, 7, static_cast<int>(input_c)}; // NHWC
+ vector<int> kernel_shape_data{output_c, kernel_h, kernel_w, input_c}; // OHWI
+ vector<int32_t> strides{stride_h, stride_w};
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ Tensor input_atensor0;
+ Tensor input_atensor1;
+ fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
+ fillTensors(input_ntensors[1], input_atensor1, kernel_shape_data, 1.0f);
+ auto op_generator = [&strides](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ mir::Conv2DOpAttributes attributes;
+ attributes.strides = strides;
+ return g.create<mir::ops::Conv2DOp>(inputs[0], inputs[1], attributes);
+ };
+
+ createAndRunTestGraph(op_generator, conv2d, input_ntensors, input_atensor0,
+ input_atensor1, temporary);
+ }
+}
+
+TEST(cpp_operations_test, depthwise_conv)
+{
+ // Iterate over kernel width, kernel height,
+ // channels
+ // stride width, stride height
+ // layers multiplier
+ // size 3 is chosen to cover all cases, where width bigger/smaller then height and equal/not equal
+ // to 1
+ using iT = int32_t;
+ for (iT kernel_h = 1; kernel_h <= 3; ++kernel_h)
+ for (iT kernel_w = 1; kernel_w <= 3; ++kernel_w)
+ for (iT channels = 1; channels <= 3; ++channels)
+ for (iT stride_w = 1; stride_w <= 3; ++stride_w)
+ for (iT stride_h = 1; stride_h <= 3; ++stride_h)
+ for (iT multiplier = 1; multiplier <= 2; ++multiplier)
+ {
+ vector<int> input_shape_data{3, 7, 6, static_cast<int>(channels)}; // NHWC
+ vector<int> kernel_shape_data{kernel_h, kernel_w, channels, multiplier}; // HWCN
+ vector<int32_t> strides{stride_h, stride_w};
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ Tensor input_atensor0;
+ Tensor input_atensor1;
+ fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
+ fillTensors(input_ntensors[1], input_atensor1, kernel_shape_data, 1.0f);
+ auto op_generator = [&strides](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ mir::Conv2DOpAttributes attributes;
+ attributes.strides = strides;
+ return g.create<mir::ops::DepthwiseConv2DOp>(inputs[0], inputs[1], attributes);
+ };
+
+ createAndRunTestGraph(op_generator, depthwiseConv2d, input_ntensors, input_atensor0,
+ input_atensor1);
+ }
+}
+
+TEST(cpp_operations_test, fully_connected)
+{
+ vector<int> input_shape_data{3, 13};
+ vector<int> weights_shape_data{13, 7};
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
+ Tensor input_atensor0;
+ Tensor input_atensor1;
+ fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
+ fillTensors(input_ntensors[1], input_atensor1, weights_shape_data, 1.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::FullyConnectedOp>(inputs[0], inputs[1]);
+ };
+
+ createAndRunTestGraph(op_generator, fullConnect, input_ntensors, input_atensor0, input_atensor1);
+}
+
+TEST(cpp_operations_test, resize_NN_test)
+{
+ mir::Shape test_shapes[] = {{1, 8, 8, 1}, {2, 10, 10, 1}, {1, 11, 11, 2}, {2, 8, 12, 2},
+ {1, 48, 12, 1}, {1, 48, 48, 1}, {1, 48, 56, 1}};
+ for (mir::Shape res_shape : test_shapes)
+ {
+ vector<int> input_shape_data{res_shape.dim(0), 4, 4, res_shape.dim(3)};
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ Tensor input_atensor;
+ fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
+ auto op_generator = [&res_shape](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::ResizeOp>(
+ inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
+ };
+
+ createAndRunTestGraph(op_generator, resize, input_ntensors, input_atensor);
+ }
+}
+
+TEST(cpp_operations_test, resize_NN_test_scales)
+{
+ cout << "\n";
+ std::vector<float> test_scales[] = {
+ {1, 2, 2, 1}, {1, 2, 3, 1}, {1, 3, 2, 1}, {1, 2.5, 2, 1}, {1, 3, 9, 1}};
+ for (const std::vector<float> &scales : test_scales)
+ {
+ vector<int> input_shape_data{1, 4, 4, 1};
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ Tensor input_atensor;
+ fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
+ auto op_generator = [&scales](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::ResizeOp>(
+ inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales);
+ };
+ createAndRunTestGraph(op_generator, resize, input_ntensors, input_atensor);
+ }
+}
+
+TEST(cpp_operations_test, avgpool)
+{
+ // Iterate over window width, window height
+ // channels
+ // stride width, stride height
+ // size 3 is chosen to cover all cases, where width bigger/smaller then height and equal/not equal
+ // to 1
+ using iT = int32_t;
+ for (iT windowH = 1; windowH <= 3; ++windowH)
+ for (iT windowW = 1; windowW <= 3; ++windowW)
+ for (iT channels = 1; channels <= 2; ++channels)
+ for (iT stride_h = 1; stride_h <= 3; ++stride_h)
+ for (iT stride_w = 1; stride_w <= 3; ++stride_w)
+ {
+ vector<int> shape_data{3, 5, 7, static_cast<int>(channels)};
+ vector<int32_t> window_size{windowH, windowW};
+ vector<int32_t> strides{stride_h, stride_w};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+
+ mir::AvgPool2DOpAttributes attributes;
+ attributes.window = window_size;
+ attributes.strides = strides;
+ for (const auto include_pad : {false, true})
+ {
+ attributes.include_pad = include_pad;
+ auto op_generator = [&attributes](
+ mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::AvgPool2DOp>(inputs[0], attributes);
+ };
+
+ createAndRunTestGraph(op_generator, avgPool, input_ntensors, input_atensor);
+ }
+ }
+}
+
+TEST(cpp_operations_test, maxpool)
+{
+ // Iterate over window width, window height
+ // channels
+ // stride width, stride height
+ // size 3 is chosen to cover all cases, where width bigger/smaller then height and equal/not equal
+ // to 1
+ using iT = int32_t;
+ for (iT windowH = 1; windowH <= 3; ++windowH)
+ for (iT windowW = 1; windowW <= 3; ++windowW)
+ for (iT channels = 1; channels <= 2; ++channels)
+ for (iT stride_h = 1; stride_h <= 3; ++stride_h)
+ for (iT stride_w = 1; stride_w <= 3; ++stride_w)
+ {
+ vector<int> shape_data{3, 5, 7, static_cast<int>(channels)};
+ vector<int32_t> window_size{windowH, windowW};
+ vector<int32_t> strides{stride_h, stride_w};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+
+ auto op_generator = [&window_size, &strides](
+ mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ mir::MaxPool2DOpAttributes attributes;
+ attributes.window = window_size;
+ attributes.strides = strides;
+ return g.create<mir::ops::MaxPool2DOp>(inputs[0], attributes);
+ };
+
+ createAndRunTestGraph(op_generator, maxPool, input_ntensors, input_atensor);
+ }
+}
+
+TEST(cpp_operations_test, relu)
+{
+ // test prerequisites
+ vector<int> shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::ReluOp>(inputs[0]);
+ };
+
+ createAndRunTestGraph(op_generator, relu, input_ntensors, input_atensor);
+}
+
+TEST(cpp_operations_test, leaky_relu)
+{
+ // test prerequisites
+ vector<int> shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::LeakyReluOp>(inputs[0], 0.1);
+ };
+
+ createAndRunTestGraph(op_generator, leakyRelu, input_ntensors, input_atensor);
+}
+
+TEST(cpp_operations_test, sigmoid)
+{
+ // test prerequisites
+ vector<int> shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+ auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::SigmoidOp>(inputs[0]);
+ };
+
+ createAndRunTestGraph(opGenerator, sigmoid, input_ntensors, input_atensor);
+}
+
+TEST(cpp_operations_test, elu)
+{
+ // test prerequisites
+ vector<int> shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::EluOp>(inputs[0], 1);
+ };
+
+ createAndRunTestGraph(op_generator, elu, input_ntensors, input_atensor);
+}
+
+TEST(cpp_operations_test, tanh)
+{
+ // test prerequisites
+ vector<int> shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::TanhOp>(inputs[0]);
+ };
+
+ createAndRunTestGraph(op_generator, tanhActivation, input_ntensors, input_atensor);
+}
+
+TEST(cpp_operations_test, reduceMeanTst)
+{
+ // test prerequisites
+ // different test cases
+ std::vector<int> test_axis_list[] = {{2, 3}, {1}, {0}, {2}, {3}, {0, 2}, {1, 2, 3}};
+ for (const vector<int> &axis_list : test_axis_list)
+ {
+ for (const bool keep_dims : {true, false})
+ {
+ vector<int> input_shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
+ auto op_generator = [&axis_list, keep_dims](
+ mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ auto op = g.create<mir::ops::ReduceMeanOp>(inputs[0], axis_list, keep_dims);
+ return op;
+ };
+
+ createAndRunTestGraph(op_generator, reduceMean, input_ntensors, input_atensor);
+ }
+ }
+}
+
+TEST(cpp_operations_test, softmax)
+{
+ // iterate over number of dimensions in tensor
+ for (int num_dims = 1; num_dims <= 4; ++num_dims)
+ {
+ // test prerequisites
+ vector<int> shape_data{2, 3, 4, 5};
+ shape_data.resize(num_dims);
+ int axis = num_dims - 1;
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+ auto op_generator = [axis](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::SoftmaxOp>(inputs[0], axis);
+ };
+
+ createAndRunTestGraph(op_generator, softmax, input_ntensors, input_atensor);
+ }
+}
+
+TEST(cpp_operations_test, slice4d)
+{
+ vector<int> shape_data{5, 30, 40, 12};
+ vector<int> starts[] = {{0, 0, 0, 0}, {1, 1, 1, 1}, {1, 0, 1, 0}, {0, 1, 1, 0}};
+ vector<int> sizes[] = {
+ {-1, -1, -1, -1}, {4, -1, 10, -1},
+ };
+ for (auto st : starts)
+ {
+ for (auto sz : sizes)
+ {
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_n_tensor(1);
+ fillTensors(input_n_tensor[0], input_atensor, shape_data, 1.0f);
+ auto op_gen = [st, sz](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::SliceOp>(inputs[0], mir::Shape(st), mir::Shape(sz));
+ };
+ createAndRunTestGraph(op_gen, slice, input_n_tensor, input_atensor);
+ }
+ }
+}
+
+TEST(cpp_operations_test, reshape)
+{
+ // test prerequisites
+ vector<int> input_shape_data{2, 3, 4, 5};
+ vector<int> output_shape_data{1, 120};
+ mir::Shape output_nshape;
+ fillNShape(output_nshape, output_shape_data);
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
+ auto op_generator = [&output_nshape](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::ReshapeOp>(inputs[0], output_nshape);
+ };
+
+ createAndRunTestGraph(op_generator, reshape, input_ntensors, input_atensor);
+}
+
+TEST(cpp_operations_test, abs)
+{
+ // test prerequisites
+ vector<int> shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensor(1);
+ fillTensors(input_ntensor[0], input_atensor, shape_data, 1.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::AbsOp>(inputs[0]);
+ };
+ createAndRunTestGraph(op_generator, absFN, input_ntensor, input_atensor);
+}
+
+TEST(cpp_operations_test, sqrt)
+{
+ // test prerequisites
+ vector<int> shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensor(1);
+ fillTensors(input_ntensor[0], input_atensor, shape_data, 1.0f);
+ auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::SqrtOp>(inputs[0]);
+ };
+ createAndRunTestGraph(op_generator, sqrtFN, input_ntensor, input_atensor);
+}
+
+TEST(cpp_operations_test, pad)
+{
+ // test on matrix 2x3
+ vector<int> input_shape{2, 3};
+
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensor(1);
+ fillTensors(input_ntensor[0], input_atensor, input_shape, 1.0f);
+ // PadOp params
+ mir::PadOpAttributes attributes;
+ attributes.padding_before = {1, 2};
+ attributes.padding_after = {1, 2};
+
+ auto op_generator = [&attributes](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::PadOp>(inputs[0], attributes);
+ };
+
+ createAndRunTestGraph(op_generator, pad, input_ntensor, input_atensor);
+}
+
+TEST(cpp_operations_test, transpose)
+{
+ // test transpose for 4 dims tensors
+ vector<int> input_shape_4d{2, 3, 4, 5};
+ Tensor input_atensor_4d;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensor_4d(1);
+ fillTensors(input_ntensor_4d[0], input_atensor_4d, input_shape_4d, 1.0f);
+
+ vector<size_t> test_cases_pack_4d[] = {{0, 1, 2, 3}, {1, 0, 2, 3}, {3, 2, 1, 0}};
+ for (const auto &permute : test_cases_pack_4d)
+ {
+ auto op_generator = [&permute](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::TransposeOp>(inputs[0], permute);
+ };
+ createAndRunTestGraph(op_generator, transpose, input_ntensor_4d, input_atensor_4d);
+ }
+
+ // test transpose for 3 dims tensors
+ vector<int> input_shape_3d{3, 4, 5};
+ Tensor input_atensor_3d;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensor_3d(1);
+ fillTensors(input_ntensor_3d[0], input_atensor_3d, input_shape_3d, 1.0f);
+ vector<size_t> test_cases_pack_3d[] = {{0, 1, 2}, {1, 0, 2}, {2, 1, 0}};
+ for (const auto &permute : test_cases_pack_3d)
+ {
+ auto op_generator = [&permute](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::TransposeOp>(inputs[0], permute);
+ };
+ createAndRunTestGraph(op_generator, transpose, input_ntensor_3d, input_atensor_3d);
+ }
+}
+
+TEST(cpp_operation_test, broadcast)
+{
+ const mir::Shape target_shapes[] = {{6}, {2, 3}, {2, 3, 1}, {1, 2, 1, 3}};
+ for (const mir::Shape &target_shape : target_shapes)
+ {
+ vector<int> input_shape_data{};
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ Tensor input_atensor;
+ fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
+ auto op_generator = [&target_shape](mir::Graph &g,
+ const std::vector<mir::Operation::Output *> &inputs) {
+ return g.create<mir::ops::BroadcastOp>(inputs[0], target_shape);
+ };
+ createAndRunTestGraph(op_generator, broadcast, input_ntensors, input_atensor);
+ }
+}