summaryrefslogtreecommitdiff
path: root/tests/nnfw_api
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-10-28 12:16:55 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-10-28 12:16:55 +0900
commitc55f8a6db48cda9d3a78048338b7f18c4cca62b8 (patch)
tree761ee8e171e5203f5c598ad93b2e7e0bc2e31aa2 /tests/nnfw_api
parent74476a2d0296bdad70a2f7f90bc7419a8b05bffd (diff)
downloadnnfw-c55f8a6db48cda9d3a78048338b7f18c4cca62b8.tar.gz
nnfw-c55f8a6db48cda9d3a78048338b7f18c4cca62b8.tar.bz2
nnfw-c55f8a6db48cda9d3a78048338b7f18c4cca62b8.zip
Diffstat (limited to 'tests/nnfw_api')
-rw-r--r--tests/nnfw_api/README.md4
-rw-r--r--tests/nnfw_api/src/CircleGen.cc174
-rw-r--r--tests/nnfw_api/src/CircleGen.h70
-rw-r--r--tests/nnfw_api/src/GenModelTest.h262
-rw-r--r--tests/nnfw_api/src/GenModelTests.cc195
-rw-r--r--tests/nnfw_api/src/ModelTestDynamicTensor.cc197
-rw-r--r--tests/nnfw_api/src/ModelTestInputReshaping.cc57
-rw-r--r--tests/nnfw_api/src/NNPackages.cc7
-rw-r--r--tests/nnfw_api/src/NNPackages.h2
-rw-r--r--tests/nnfw_api/src/RegressionTests.cc114
-rw-r--r--tests/nnfw_api/src/ValidationTestAddSessionPrepared.cc30
-rw-r--r--tests/nnfw_api/src/fixtures.h15
-rw-r--r--tests/nnfw_api/src/one_op_tests/Add.cc75
-rw-r--r--tests/nnfw_api/src/one_op_tests/ArgMax.cc122
-rw-r--r--tests/nnfw_api/src/one_op_tests/AveragePool2D.cc79
-rw-r--r--tests/nnfw_api/src/one_op_tests/Cast.cc159
-rw-r--r--tests/nnfw_api/src/one_op_tests/Concat.cc210
-rw-r--r--tests/nnfw_api/src/one_op_tests/Cos.cc4
-rw-r--r--tests/nnfw_api/src/one_op_tests/Equal.cc69
-rw-r--r--tests/nnfw_api/src/one_op_tests/FullyConnected.cc90
-rw-r--r--tests/nnfw_api/src/one_op_tests/If.cc132
-rw-r--r--tests/nnfw_api/src/one_op_tests/InstanceNorm.cc57
-rw-r--r--tests/nnfw_api/src/one_op_tests/L2Normalization.cc7
-rw-r--r--tests/nnfw_api/src/one_op_tests/LeakyRelu.cc18
-rw-r--r--tests/nnfw_api/src/one_op_tests/LogSoftmax.cc51
-rw-r--r--tests/nnfw_api/src/one_op_tests/OneHot.cc201
-rw-r--r--tests/nnfw_api/src/one_op_tests/Pad.cc9
-rw-r--r--tests/nnfw_api/src/one_op_tests/PadV2.cc9
-rw-r--r--tests/nnfw_api/src/one_op_tests/Rank.cc22
-rw-r--r--tests/nnfw_api/src/one_op_tests/ResizeBilinear.cc75
-rw-r--r--tests/nnfw_api/src/one_op_tests/ResizeNearestNeighbor.cc5
-rw-r--r--tests/nnfw_api/src/one_op_tests/Reverse.cc59
-rw-r--r--tests/nnfw_api/src/one_op_tests/Split.cc65
-rw-r--r--tests/nnfw_api/src/one_op_tests/StridedSlice.cc43
-rw-r--r--tests/nnfw_api/src/one_op_tests/Tile.cc136
-rw-r--r--tests/nnfw_api/src/one_op_tests/Transpose.cc145
-rw-r--r--tests/nnfw_api/src/one_op_tests/While.cc126
37 files changed, 2866 insertions, 229 deletions
diff --git a/tests/nnfw_api/README.md b/tests/nnfw_api/README.md
index 7e14fc445..58ba12992 100644
--- a/tests/nnfw_api/README.md
+++ b/tests/nnfw_api/README.md
@@ -16,6 +16,8 @@ This test framework consists of 3 kinds of tests:
## nnpackages for testing
+> NOTE It is not recommended adding a test this way, since you can make a Circle model with some code using `CircleGen` class. See also `GenModelTest`.
+
To test *nnfw_api*, we almost always need some nnpackages. Those are stored in a web server so there is no nnpackage files in the repo.
### How to add nnpackages for test
@@ -27,4 +29,4 @@ Once you have done the above steps, please register it in the test source code t
### Installation
-You must install the test nnpackages before running the tests. They must be in the same directory with the test executable, under `nnfw_api_gtest_models/`. There is an installation script `tests/scripts/nnfw_api_gtest/install_nnfw_api_gtest_nnpackages.sh`, however the nnpackage file server is not public so it will fail.
+You must install the test nnpackages before running the tests. They must be in the same directory with the test executable, under `nnfw_api_gtest_models/`. Installation is done by command `onert-test prepare-model`. It only runs correctly on CI, since the nnpackage file server is not public.
diff --git a/tests/nnfw_api/src/CircleGen.cc b/tests/nnfw_api/src/CircleGen.cc
index 19cb95f37..8040f7dfb 100644
--- a/tests/nnfw_api/src/CircleGen.cc
+++ b/tests/nnfw_api/src/CircleGen.cc
@@ -14,22 +14,6 @@
* limitations under the License.
*/
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
#include "CircleGen.h"
CircleGen::CircleGen() : _subgraph_contexts(1) // Create primary subgraph
@@ -54,11 +38,18 @@ uint32_t CircleGen::addBuffer(const uint8_t *buf, size_t size)
uint32_t CircleGen::addTensor(const TensorParams &params)
{
- int ind = curSubgCtx().tensors.size();
+ uint32_t ind = curSubgCtx().tensors.size();
curSubgCtx().tensors.emplace_back(buildTensor(params));
return ind;
}
+uint32_t CircleGen::addTensor(const TensorParams &params, const SparsityParams &sp)
+{
+ uint32_t ind = curSubgCtx().tensors.size();
+ curSubgCtx().tensors.emplace_back(buildTensor(params, sp));
+ return ind;
+}
+
void CircleGen::setInputsAndOutputs(const std::vector<int> &inputs, const std::vector<int> &outputs)
{
curSubgCtx().inputs = inputs;
@@ -93,6 +84,13 @@ uint32_t CircleGen::addOperatorAdd(const OperatorParams &params,
circle::BuiltinOptions_AddOptions, options);
}
+uint32_t CircleGen::addOperatorArgMax(const OperatorParams &params, circle::TensorType output_type)
+{
+ auto options = circle::CreateArgMaxOptions(_fbb, output_type).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_ARG_MAX,
+ circle::BuiltinOptions_ArgMaxOptions, options);
+}
+
uint32_t CircleGen::addOperatorAveragePool2D(const OperatorParams &params, circle::Padding padding,
int stride_w, int stride_h, int filter_w, int filter_h,
circle::ActivationFunctionType actfn)
@@ -104,6 +102,14 @@ uint32_t CircleGen::addOperatorAveragePool2D(const OperatorParams &params, circl
circle::BuiltinOptions_Pool2DOptions, options);
}
+uint32_t CircleGen::addOperatorCast(const OperatorParams &params, circle::TensorType input_type,
+ circle::TensorType output_type)
+{
+ auto options = circle::CreateCastOptions(_fbb, input_type, output_type).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_CAST,
+ circle::BuiltinOptions_AddOptions, options);
+}
+
uint32_t CircleGen::addOperatorConcatenation(const OperatorParams &params, int axis,
circle::ActivationFunctionType actfn)
{
@@ -119,6 +125,20 @@ uint32_t CircleGen::addOperatorCos(const OperatorParams &params)
circle::BuiltinOptions_CosOptions, options);
}
+uint32_t CircleGen::addOperatorEqual(const OperatorParams &params)
+{
+ auto options = circle::CreateEqualOptions(_fbb).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_EQUAL,
+ circle::BuiltinOptions_EqualOptions, options);
+}
+
+uint32_t CircleGen::addOperatorFullyConnected(const OperatorParams &params)
+{
+ auto options = circle::CreateFullyConnectedOptions(_fbb).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_FULLY_CONNECTED,
+ circle::BuiltinOptions_FullyConnectedOptions, options);
+}
+
uint32_t CircleGen::addOperatorL2Normalization(const OperatorParams &params)
{
auto options = circle::CreateL2NormOptions(_fbb).Union();
@@ -140,6 +160,13 @@ uint32_t CircleGen::addOperatorLeakyRelu(const OperatorParams &params, float alp
circle::BuiltinOptions_LeakyReluOptions, options);
}
+uint32_t CircleGen::addOperatorLogSoftmax(const OperatorParams &params)
+{
+ auto options = circle::CreateLogSoftmaxOptions(_fbb).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_LOG_SOFTMAX,
+ circle::BuiltinOptions_LogSoftmaxOptions, options);
+}
+
uint32_t CircleGen::addOperatorNeg(const OperatorParams &params)
{
auto options = circle::CreatePadOptions(_fbb).Union();
@@ -147,6 +174,13 @@ uint32_t CircleGen::addOperatorNeg(const OperatorParams &params)
circle::BuiltinOptions_NegOptions, options);
}
+uint32_t CircleGen::addOperatorOneHot(const OperatorParams &params, int32_t axis)
+{
+ auto options = circle::CreateOneHotOptions(_fbb, axis).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_ONE_HOT,
+ circle::BuiltinOptions_OneHotOptions, options);
+}
+
uint32_t CircleGen::addOperatorPad(const OperatorParams &params)
{
auto options = circle::CreatePadOptions(_fbb).Union();
@@ -168,6 +202,22 @@ uint32_t CircleGen::addOperatorRank(const OperatorParams &params)
circle::BuiltinOptions_RankOptions, options);
}
+uint32_t CircleGen::addOperatorReshape(const OperatorParams &params, const Shape &new_shape)
+{
+ auto options = circle::CreateReshapeOptionsDirect(_fbb, &new_shape).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_RESHAPE,
+ circle::BuiltinOptions_ReshapeOptions, options);
+}
+
+uint32_t CircleGen::addOperatorResizeBilinear(const OperatorParams &params, bool align_corners,
+ bool half_pixel_centers)
+{
+ auto options =
+ circle::CreateResizeBilinearOptions(_fbb, align_corners, half_pixel_centers).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_RESIZE_BILINEAR,
+ circle::BuiltinOptions_ResizeBilinearOptions, options);
+}
+
uint32_t CircleGen::addOperatorResizeNearestNeighbor(const OperatorParams &params)
{
auto options = circle::CreateResizeNearestNeighborOptions(_fbb).Union();
@@ -175,6 +225,36 @@ uint32_t CircleGen::addOperatorResizeNearestNeighbor(const OperatorParams &param
circle::BuiltinOptions_ResizeNearestNeighborOptions, options);
}
+uint32_t CircleGen::addOperatorReverseV2(const OperatorParams &params)
+{
+ auto options = circle::CreateReverseV2Options(_fbb).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_REVERSE_V2,
+ circle::BuiltinOptions_ReverseV2Options, options);
+}
+
+uint32_t CircleGen::addOperatorSplit(const OperatorParams &params, int32_t num_split)
+{
+ auto options = circle::CreateSplitOptions(_fbb, num_split).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_SPLIT,
+ circle::BuiltinOptions_SplitOptions, options);
+}
+uint32_t CircleGen::addOperatorStridedSlice(const OperatorParams &params, int32_t begin_mask,
+ int32_t end_mask, int32_t ellipsis_mask,
+ int32_t new_axis_mask, int32_t shrink_axis_mask)
+{
+ auto options = circle::CreateStridedSliceOptions(_fbb, begin_mask, end_mask, ellipsis_mask,
+ new_axis_mask, shrink_axis_mask)
+ .Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_STRIDED_SLICE,
+ circle::BuiltinOptions_StridedSliceOptions, options);
+}
+uint32_t CircleGen::addOperatorTile(const OperatorParams &params)
+{
+ auto options = circle::CreateTileOptions(_fbb).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_TILE,
+ circle::BuiltinOptions_TileOptions, options);
+}
+
uint32_t CircleGen::addOperatorWhile(const OperatorParams &params, uint32_t cond_subg,
uint32_t body_subg)
{
@@ -183,6 +263,29 @@ uint32_t CircleGen::addOperatorWhile(const OperatorParams &params, uint32_t cond
circle::BuiltinOptions_WhileOptions, options);
}
+uint32_t CircleGen::addOperatorIf(const OperatorParams &params, uint32_t then_subg,
+ uint32_t else_subg)
+{
+ auto options = circle::CreateIfOptions(_fbb, then_subg, else_subg).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_IF,
+ circle::BuiltinOptions_IfOptions, options);
+}
+
+uint32_t CircleGen::addOperatorInstanceNorm(const OperatorParams &params, float epsilon,
+ circle::ActivationFunctionType actfn)
+{
+ auto options = circle::CreateInstanceNormOptions(_fbb, epsilon, actfn).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_INSTANCE_NORM,
+ circle::BuiltinOptions_InstanceNormOptions, options);
+}
+
+uint32_t CircleGen::addOperatorTranspose(const OperatorParams &params)
+{
+ auto options = circle::CreateTransposeOptions(_fbb).Union();
+ return addOperatorWithOptions(params, circle::BuiltinOperator_TRANSPOSE,
+ circle::BuiltinOptions_TransposeOptions, options);
+}
+
// NOTE Please add addOperator functions ABOVE this lie
//
// % How to add a new addOperatorXXX fuction
@@ -233,6 +336,43 @@ flatbuffers::Offset<circle::Tensor> CircleGen::buildTensor(const TensorParams &p
0 /* shape_signature */);
}
+flatbuffers::Offset<circle::SparsityParameters>
+CircleGen::buildSparsityParameters(const SparsityParams &sp)
+{
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order;
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map;
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::DimensionMetadata>>>
+ dim_metadata;
+
+ traversal_order = _fbb.CreateVector(sp.traversal_order);
+ block_map = _fbb.CreateVector(sp.block_map);
+
+ std::vector<flatbuffers::Offset<circle::DimensionMetadata>> dim_metadata_vec;
+ for (auto &it : sp.dim_metadata)
+ {
+ auto fb_array_segments = circle::CreateUint16VectorDirect(_fbb, &it._array_segments.u16);
+ auto fb_array_indices = circle::CreateUint16VectorDirect(_fbb, &it._array_indices.u16);
+ auto dim_metadata = circle::CreateDimensionMetadata(
+ _fbb, it._format, it._dense_size, it._array_segments_type, fb_array_segments.Union(),
+ it._array_indices_type, fb_array_indices.Union());
+ dim_metadata_vec.emplace_back(dim_metadata);
+ }
+ dim_metadata = _fbb.CreateVector(dim_metadata_vec);
+
+ return circle::CreateSparsityParameters(_fbb, traversal_order, block_map, dim_metadata);
+}
+
+flatbuffers::Offset<circle::Tensor> CircleGen::buildTensor(const TensorParams &params,
+ const SparsityParams &sp)
+{
+ auto shape = _fbb.CreateVector(params.shape);
+ auto name = _fbb.CreateString(params.name);
+ auto sparsity = buildSparsityParameters(sp);
+ return circle::CreateTensor(_fbb, shape, params.tensor_type, params.buffer, name,
+ 0 /* QuantParam */, false /* is_variable */, sparsity,
+ 0 /* shape_signature */);
+}
+
flatbuffers::Offset<circle::SubGraph> CircleGen::buildSubGraph(const SubgraphContext &ctx)
{
return circle::CreateSubGraphDirect(_fbb, &ctx.tensors, &ctx.inputs, &ctx.outputs, &ctx.operators,
diff --git a/tests/nnfw_api/src/CircleGen.h b/tests/nnfw_api/src/CircleGen.h
index 09ca5a5db..d72fb95ab 100644
--- a/tests/nnfw_api/src/CircleGen.h
+++ b/tests/nnfw_api/src/CircleGen.h
@@ -52,6 +52,47 @@ private:
class CircleGen
{
public:
+ using Shape = std::vector<int32_t>;
+
+ using SparseIndexVectorType = circle::SparseIndexVector;
+ using SparseDimensionType = circle::DimensionType;
+
+ struct SparseIndexVector
+ {
+ std::vector<uint16_t> u16;
+ };
+
+ struct DimMetaData
+ {
+ DimMetaData() = delete;
+ DimMetaData(SparseDimensionType format, std::vector<uint16_t> array_segments,
+ std::vector<uint16_t> array_indices)
+ : _format{format},
+ _array_segments_type(SparseIndexVectorType::SparseIndexVector_Uint16Vector),
+ _array_indices_type(SparseIndexVectorType::SparseIndexVector_Uint16Vector)
+ {
+ _array_segments.u16 = array_segments;
+ _array_indices.u16 = array_indices;
+ }
+ DimMetaData(SparseDimensionType format, int32_t dense_size)
+ : _format{format}, _dense_size{dense_size}
+ {
+ }
+ SparseDimensionType _format{circle::DimensionType_DENSE};
+ int32_t _dense_size{0};
+ SparseIndexVectorType _array_segments_type{circle::SparseIndexVector_NONE};
+ SparseIndexVector _array_segments;
+ SparseIndexVectorType _array_indices_type{circle::SparseIndexVector_NONE};
+ SparseIndexVector _array_indices;
+ };
+
+ struct SparsityParams
+ {
+ std::vector<int32_t> traversal_order;
+ std::vector<int32_t> block_map;
+ std::vector<DimMetaData> dim_metadata;
+ };
+
struct TensorParams
{
std::vector<int32_t> shape;
@@ -86,30 +127,52 @@ public:
}
uint32_t addBuffer(const uint8_t *buf, size_t size);
uint32_t addTensor(const TensorParams &params);
+ uint32_t addTensor(const TensorParams &params, const SparsityParams &sp);
void setInputsAndOutputs(const std::vector<int> &inputs, const std::vector<int> &outputs);
uint32_t nextSubgraph();
CircleBuffer finish();
- // ===== Add Operator methods begin =====
+ // ===== Add Operator methods begin (SORTED IN ALPHABETICAL ORDER) =====
uint32_t addOperatorAdd(const OperatorParams &params, circle::ActivationFunctionType actfn);
+ uint32_t addOperatorArgMax(const OperatorParams &params,
+ circle::TensorType output_type = circle::TensorType::TensorType_INT32);
uint32_t addOperatorAveragePool2D(const OperatorParams &params, circle::Padding padding,
int stride_w, int stride_h, int filter_w, int filter_h,
circle::ActivationFunctionType actfn);
+ uint32_t addOperatorCast(const OperatorParams &params, circle::TensorType input_type,
+ circle::TensorType output_type);
uint32_t addOperatorConcatenation(const OperatorParams &params, int axis,
circle::ActivationFunctionType actfn);
uint32_t addOperatorCos(const OperatorParams &params);
+ uint32_t addOperatorEqual(const OperatorParams &params);
+ uint32_t addOperatorFullyConnected(const OperatorParams &params);
+ uint32_t addOperatorIf(const OperatorParams &params, uint32_t then_subg, uint32_t else_subg);
+ uint32_t addOperatorInstanceNorm(const OperatorParams &params, float epsilon,
+ circle::ActivationFunctionType actfn);
uint32_t addOperatorL2Normalization(const OperatorParams &params);
uint32_t addOperatorLeakyRelu(const OperatorParams &params, float alpha);
uint32_t addOperatorLess(const OperatorParams &params);
+ uint32_t addOperatorLogSoftmax(const OperatorParams &params);
uint32_t addOperatorNeg(const OperatorParams &params);
+ uint32_t addOperatorOneHot(const OperatorParams &params, int32_t axis);
uint32_t addOperatorPad(const OperatorParams &params);
uint32_t addOperatorPadV2(const OperatorParams &params);
uint32_t addOperatorRank(const OperatorParams &params);
+ uint32_t addOperatorReshape(const OperatorParams &params, const Shape &new_shape);
+ uint32_t addOperatorResizeBilinear(const OperatorParams &params, bool align_corners = false,
+ bool half_pixel_centers = false);
uint32_t addOperatorResizeNearestNeighbor(const OperatorParams &params);
+ uint32_t addOperatorReverseV2(const OperatorParams &params);
+ uint32_t addOperatorSplit(const OperatorParams &params, int32_t num_split);
+ uint32_t addOperatorStridedSlice(const OperatorParams &params, int32_t begin_mask = 0,
+ int32_t end_mask = 0, int32_t ellipsis_mask = 0,
+ int32_t new_axis_mask = 0, int32_t shrink_axis_mask = 0);
+ uint32_t addOperatorTile(const OperatorParams &params);
+ uint32_t addOperatorTranspose(const OperatorParams &params);
uint32_t addOperatorWhile(const OperatorParams &params, uint32_t cond_subg, uint32_t body_subg);
- // NOTE Please add addOperator functions ABOVE this lie
+ // NOTE Please add addOperator functions ABOVE this line in ALPHABETICAL ORDER
// ===== Add Operator methods end =====
private:
@@ -119,6 +182,9 @@ private:
uint32_t addOperatorCode(circle::BuiltinOperator opcode);
flatbuffers::Offset<circle::Buffer> buildBuffer(const uint8_t *buf, size_t size);
flatbuffers::Offset<circle::Tensor> buildTensor(const TensorParams &params);
+ flatbuffers::Offset<circle::SparsityParameters> buildSparsityParameters(const SparsityParams &sp);
+ flatbuffers::Offset<circle::Tensor> buildTensor(const TensorParams &params,
+ const SparsityParams &sp);
flatbuffers::Offset<circle::SubGraph> buildSubGraph(const SubgraphContext &ctx);
SubgraphContext &curSubgCtx() { return _subgraph_contexts.back(); }
diff --git a/tests/nnfw_api/src/GenModelTest.h b/tests/nnfw_api/src/GenModelTest.h
index 530ccdd8c..a4c67a863 100644
--- a/tests/nnfw_api/src/GenModelTest.h
+++ b/tests/nnfw_api/src/GenModelTest.h
@@ -19,26 +19,121 @@
#include <fstream>
#include <string>
+#include <unordered_map>
#include "CircleGen.h"
#include "fixtures.h"
+inline size_t sizeOfNnfwType(NNFW_TYPE type)
+{
+ switch (type)
+ {
+ case NNFW_TYPE_TENSOR_BOOL:
+ case NNFW_TYPE_TENSOR_UINT8:
+ case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
+ return 1;
+ case NNFW_TYPE_TENSOR_FLOAT32:
+ case NNFW_TYPE_TENSOR_INT32:
+ return 4;
+ case NNFW_TYPE_TENSOR_INT64:
+ return 8;
+ default:
+ throw std::runtime_error{"Invalid tensor type"};
+ }
+}
+
+// TODO Unify this with `SessionObject` in `fixtures.h`
+struct SessionObjectGeneric
+{
+ nnfw_session *session = nullptr;
+ std::vector<std::vector<uint8_t>> inputs;
+ std::vector<std::vector<uint8_t>> outputs;
+};
+
struct TestCaseData
{
/**
* @brief A vector of input buffers
- *
- * @todo support other types as well as float
*/
- std::vector<std::vector<float>> inputs;
+ std::vector<std::vector<uint8_t>> inputs;
+
/**
* @brief A vector of output buffers
+ */
+ std::vector<std::vector<uint8_t>> outputs;
+
+ /**
+ * @brief Append vector data to inputs
*
- * @todo support other types as well as float
+ * @tparam T Data type
+ * @param data vector data array
+ */
+ template <typename T> void addInput(const std::vector<T> &data) { addData(inputs, data); }
+
+ /**
+ * @brief Append vector data to inputs
+ *
+ * @tparam T Data type
+ * @param data vector data array
+ */
+ template <typename T> void addOutput(const std::vector<T> &data) { addData(outputs, data); }
+
+ /**
+ * @brief Set @c True if @c NNFW_STATUS_ERROR is expected after calling @c nnfw_run() with
+ * this test case; set @c False otherwise.
*/
- std::vector<std::vector<float>> outputs;
+ void expect_error_on_run(bool expect_error_on_run) { _expect_error_on_run = expect_error_on_run; }
+ bool expect_error_on_run() const { return _expect_error_on_run; }
+
+private:
+ template <typename T>
+ static void addData(std::vector<std::vector<uint8_t>> &dest, const std::vector<T> &data)
+ {
+ size_t size = data.size() * sizeof(T);
+ dest.emplace_back();
+ dest.back().resize(size);
+ std::memcpy(dest.back().data(), data.data(), size);
+ }
+
+ bool _expect_error_on_run = false;
};
+template <>
+inline void TestCaseData::addData<bool>(std::vector<std::vector<uint8_t>> &dest,
+ const std::vector<bool> &data)
+{
+ size_t size = data.size() * sizeof(uint8_t);
+ dest.emplace_back();
+ dest.back().resize(size);
+ std::transform(data.cbegin(), data.cend(), dest.back().data(),
+ [](bool b) { return static_cast<uint8_t>(b); });
+}
+
+/**
+ * @brief Create a TestCaseData with a uniform type
+ *
+ * A helper function for generating test cases that has the same data type for model inputs/outputs.
+ *
+ * @tparam T Uniform tensor type
+ * @param inputs Inputs tensor buffers
+ * @param outputs Output tensor buffers
+ * @return TestCaseData Generated test case data
+ */
+template <typename T>
+static TestCaseData uniformTCD(const std::vector<std::vector<T>> &inputs,
+ const std::vector<std::vector<T>> &outputs)
+{
+ TestCaseData ret;
+ for (const auto &data : inputs)
+ ret.addInput(data);
+ for (const auto &data : outputs)
+ ret.addOutput(data);
+ return ret;
+}
+
+/**
+ * @brief A test configuration class
+ */
class GenModelTestContext
{
public:
@@ -66,11 +161,32 @@ public:
const std::vector<std::string> &backends() const { return _backends; }
/**
+ * @brief Return test is defined to fail on model load
+ *
+ * @return bool test is defined to fail on model load
+ */
+ bool expected_fail_model_load() const { return _expected_fail_model_load; }
+
+ /**
* @brief Return test is defined to fail on compile
*
* @return bool test is defined to fail on compile
*/
- const bool fail_compile() const { return _fail_compile; }
+ bool expected_fail_compile() const { return _expected_fail_compile; }
+
+ /**
+ * @brief Set the output buffer size of specified output tensor
+ * Note that output tensor size of a model with dynamic tensor is calculated while
+ * running the model.
+ * Therefore, before runniing the model, the sufficient size of buffer should
+ * be prepared by calling this method.
+ * The size does not need to be the exact size.
+ */
+ void output_sizes(uint32_t ind, size_t size) { _output_sizes[ind] = size; }
+
+ size_t output_sizes(uint32_t ind) const { return _output_sizes.at(ind); }
+
+ bool hasOutputSizes(uint32_t ind) const { return _output_sizes.find(ind) != _output_sizes.end(); }
/**
* @brief Add a test case
@@ -104,15 +220,22 @@ public:
}
/**
- * @brief Set the Test Fail
+ * @brief Expect failure while model load
*/
- void setCompileFail() { _fail_compile = true; }
+ void expectFailModelLoad() { _expected_fail_model_load = true; }
+
+ /**
+ * @brief Expect failure while compiling
+ */
+ void expectFailCompile() { _expected_fail_compile = true; }
private:
CircleBuffer _cbuf;
std::vector<TestCaseData> _test_cases;
std::vector<std::string> _backends;
- bool _fail_compile{false};
+ std::unordered_map<uint32_t, size_t> _output_sizes;
+ bool _expected_fail_model_load{false};
+ bool _expected_fail_compile{false};
};
/**
@@ -141,10 +264,19 @@ protected:
// nnfw_load_circle_from_buffer to outside forloop
NNFW_ENSURE_SUCCESS(nnfw_create_session(&_so.session));
auto &cbuf = _context->cbuf();
- NNFW_ENSURE_SUCCESS(nnfw_load_circle_from_buffer(_so.session, cbuf.buffer(), cbuf.size()));
+ auto model_load_result =
+ nnfw_load_circle_from_buffer(_so.session, cbuf.buffer(), cbuf.size());
+ if (_context->expected_fail_model_load())
+ {
+ ASSERT_NE(model_load_result, NNFW_STATUS_NO_ERROR);
+ std::cerr << "Failed model loading as expected." << std::endl;
+ NNFW_ENSURE_SUCCESS(nnfw_close_session(_so.session));
+ continue;
+ }
+ NNFW_ENSURE_SUCCESS(model_load_result);
NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_so.session, backend.data()));
- if (_context->fail_compile())
+ if (_context->expected_fail_compile())
{
ASSERT_EQ(nnfw_prepare(_so.session), NNFW_STATUS_ERROR);
@@ -162,11 +294,18 @@ protected:
nnfw_tensorinfo ti;
NNFW_ENSURE_SUCCESS(nnfw_input_tensorinfo(_so.session, ind, &ti));
uint64_t input_elements = num_elems(&ti);
- _so.inputs[ind].resize(input_elements);
-
- ASSERT_EQ(nnfw_set_input(_so.session, ind, ti.dtype, _so.inputs[ind].data(),
- sizeof(float) * input_elements),
- NNFW_STATUS_NO_ERROR);
+ _so.inputs[ind].resize(input_elements * sizeOfNnfwType(ti.dtype));
+ if (_so.inputs[ind].size() == 0)
+ {
+ // Optional inputs
+ ASSERT_EQ(nnfw_set_input(_so.session, ind, ti.dtype, nullptr, 0), NNFW_STATUS_NO_ERROR);
+ }
+ else
+ {
+ ASSERT_EQ(nnfw_set_input(_so.session, ind, ti.dtype, _so.inputs[ind].data(),
+ _so.inputs[ind].size()),
+ NNFW_STATUS_NO_ERROR);
+ }
}
uint32_t num_outputs;
@@ -176,10 +315,24 @@ protected:
{
nnfw_tensorinfo ti;
NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_so.session, ind, &ti));
- uint64_t output_elements = num_elems(&ti);
- _so.outputs[ind].resize(output_elements);
+
+ auto size = 0;
+ {
+ if (_context->hasOutputSizes(ind))
+ {
+ size = _context->output_sizes(ind);
+ }
+ else
+ {
+ uint64_t output_elements = num_elems(&ti);
+ size = output_elements * sizeOfNnfwType(ti.dtype);
+ }
+ _so.outputs[ind].resize(size);
+ }
+
+ ASSERT_GT(_so.outputs[ind].size(), 0) << "Please make sure TC output is non-empty.";
ASSERT_EQ(nnfw_set_output(_so.session, ind, ti.dtype, _so.outputs[ind].data(),
- sizeof(float) * output_elements),
+ _so.outputs[ind].size()),
NNFW_STATUS_NO_ERROR);
}
@@ -193,7 +346,13 @@ protected:
{
// Fill the values
ASSERT_EQ(_so.inputs[i].size(), ref_inputs[i].size());
- memcpy(_so.inputs[i].data(), ref_inputs[i].data(), _so.inputs[i].size() * sizeof(float));
+ memcpy(_so.inputs[i].data(), ref_inputs[i].data(), ref_inputs[i].size());
+ }
+
+ if (test_case.expect_error_on_run())
+ {
+ ASSERT_EQ(nnfw_run(_so.session), NNFW_STATUS_ERROR);
+ continue;
}
NNFW_ENSURE_SUCCESS(nnfw_run(_so.session));
@@ -201,12 +360,43 @@ protected:
ASSERT_EQ(_so.outputs.size(), ref_outputs.size());
for (uint32_t i = 0; i < _so.outputs.size(); i++)
{
+ nnfw_tensorinfo ti;
+ NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_so.session, i, &ti));
+
// Check output tensor values
auto &ref_output = ref_outputs[i];
auto &output = _so.outputs[i];
ASSERT_EQ(output.size(), ref_output.size());
- for (uint32_t e = 0; e < ref_output.size(); e++)
- EXPECT_NEAR(ref_output[e], output[e], 0.001); // TODO better way for handling FP error?
+
+ switch (ti.dtype)
+ {
+ case NNFW_TYPE_TENSOR_BOOL:
+ compareBuffersExactBool(ref_output, output, i);
+ break;
+ case NNFW_TYPE_TENSOR_UINT8:
+ compareBuffersExact<uint8_t>(ref_output, output, i);
+ break;
+ case NNFW_TYPE_TENSOR_INT32:
+ compareBuffersExact<int32_t>(ref_output, output, i);
+ break;
+ case NNFW_TYPE_TENSOR_FLOAT32:
+ // TODO better way for handling FP error?
+ for (uint32_t e = 0; e < ref_output.size() / sizeof(float); e++)
+ {
+ float refval = reinterpret_cast<const float *>(ref_output.data())[e];
+ float val = reinterpret_cast<const float *>(output.data())[e];
+ EXPECT_NEAR(refval, val, 0.001) << "Output #" << i << ", Element Index : " << e;
+ }
+ break;
+ case NNFW_TYPE_TENSOR_INT64:
+ compareBuffersExact<int64_t>(ref_output, output, i);
+ break;
+ case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
+ throw std::runtime_error{"NYI : comparison of tensors of QUANT8_ASYMM"};
+ default:
+ throw std::runtime_error{"Invalid tensor type"};
+ }
+ // TODO Add shape comparison
}
}
@@ -214,7 +404,33 @@ protected:
}
}
+private:
+ template <typename T>
+ void compareBuffersExact(const std::vector<uint8_t> &ref_buf, const std::vector<uint8_t> &act_buf,
+ uint32_t index)
+ {
+ for (uint32_t e = 0; e < ref_buf.size() / sizeof(T); e++)
+ {
+ T ref = reinterpret_cast<const T *>(ref_buf.data())[e];
+ T act = reinterpret_cast<const T *>(act_buf.data())[e];
+ EXPECT_EQ(ref, act) << "Output #" << index << ", Element Index : " << e;
+ }
+ }
+
+ void compareBuffersExactBool(const std::vector<uint8_t> &ref_buf,
+ const std::vector<uint8_t> &act_buf, uint32_t index)
+ {
+ for (uint32_t e = 0; e < ref_buf.size() / sizeof(uint8_t); e++)
+ {
+ uint8_t ref_raw = reinterpret_cast<const uint8_t *>(ref_buf.data())[e];
+ bool ref = (ref_raw != 0 ? true : false);
+ uint8_t act_raw = reinterpret_cast<const uint8_t *>(act_buf.data())[e];
+ bool act = (act_raw != 0 ? true : false);
+ EXPECT_EQ(ref, act) << "Output #" << index << ", Element Index : " << e;
+ }
+ }
+
protected:
- SessionObject _so;
+ SessionObjectGeneric _so;
std::unique_ptr<GenModelTestContext> _context;
};
diff --git a/tests/nnfw_api/src/GenModelTests.cc b/tests/nnfw_api/src/GenModelTests.cc
new file mode 100644
index 000000000..538da5dfa
--- /dev/null
+++ b/tests/nnfw_api/src/GenModelTests.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file This file contains miscellaneous GenModelTest test cases.
+ *
+ */
+
+#include "GenModelTest.h"
+
+#include <memory>
+
+TEST_F(GenModelTest, UnusedConstOutputOnly)
+{
+ // A single tensor which is constant
+ CircleGen cgen;
+ uint32_t const_buf = cgen.addBuffer(std::vector<float>{9, 8, 7, 6});
+ int out_const = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32, const_buf});
+ cgen.setInputsAndOutputs({}, {out_const});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({}, {{9, 8, 7, 6}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, UnusedConstOutputAndAdd)
+{
+ // A single tensor which is constant + an Add op
+ CircleGen cgen;
+ uint32_t rhs_buf = cgen.addBuffer(std::vector<float>{5, 4, 7, 4});
+ uint32_t const_buf = cgen.addBuffer(std::vector<float>{9, 8, 7, 6});
+ int lhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int rhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32, rhs_buf});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out_const = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32, const_buf});
+ cgen.addOperatorAdd({{lhs, rhs}, {out}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({lhs}, {out, out_const});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 3, 2, 4}}, {{6, 7, 9, 8}, {9, 8, 7, 6}}));
+ _context->addTestCase(uniformTCD<float>({{0, 1, 2, 3}}, {{5, 5, 9, 7}, {9, 8, 7, 6}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, UsedConstOutput)
+{
+ // (( Input 1 )) ---------\
+ // |=> [ Add ] -> (( Output 1 ))
+ // (( Const Output 2 )) --<
+ // |=> [ Add ] -> (( Output 0 ))
+ // (( Input 0 )) ---------/
+ CircleGen cgen;
+ uint32_t rhs_buf = cgen.addBuffer(std::vector<float>{6, 4, 8, 1});
+ int in0 = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int in1 = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out0 = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out1 = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int const_out2 = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32, rhs_buf});
+ cgen.addOperatorAdd({{in0, const_out2}, {out0}}, circle::ActivationFunctionType_NONE);
+ cgen.addOperatorAdd({{const_out2, in1}, {out1}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in0, in1}, {out0, out1, const_out2});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 1, 1, 1}, {-1, -1, -1, -1}},
+ {{7, 5, 9, 2}, {5, 3, 7, 0}, {6, 4, 8, 1}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, TensorBothInputOutput)
+{
+ // A single tensor which is an input and an output at the same time
+ CircleGen cgen;
+ int t = cgen.addTensor({{2, 2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.setInputsAndOutputs({t}, {t});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 3, 2, 4}}, {{1, 3, 2, 4}}));
+ _context->addTestCase(uniformTCD<float>({{100, 300, 200, 400}}, {{100, 300, 200, 400}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, TensorBothInputOutputCrossed)
+{
+ // Two tensors which are an input and an output at the same time
+ // But the order of inputs and outputs is changed.
+ CircleGen cgen;
+ int t1 = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int t2 = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.setInputsAndOutputs({t1, t2}, {t2, t1});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1}, {2}}, {{2}, {1}}));
+ _context->addTestCase(uniformTCD<float>({{100}, {200}}, {{200}, {100}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneTensor_TwoOutputs)
+{
+ CircleGen cgen;
+ int lhs = cgen.addTensor({{2}, circle::TensorType::TensorType_FLOAT32});
+ int rhs = cgen.addTensor({{2}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAdd({{lhs, rhs}, {out}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({lhs, rhs}, {out, out}); // Same tensors are used twice as output
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 1}, {2, 2}}, {{3, 3}, {3, 3}}));
+ _context->addTestCase(uniformTCD<float>({{2, 4}, {7, 4}}, {{9, 8}, {9, 8}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneTensor_ThreeOutputs)
+{
+ CircleGen cgen;
+ int lhs = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int rhs = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAdd({{lhs, rhs}, {out}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({lhs, rhs}, {out, out, out}); // Same tensors are used 3 times as output
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1}, {2}}, {{3}, {3}, {3}}));
+ _context->addTestCase(uniformTCD<float>({{2}, {7}}, {{9}, {9}, {9}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneTensor_InputAndTwoOutputs)
+{
+ CircleGen cgen;
+ int t = cgen.addTensor({{2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.setInputsAndOutputs({t}, {t, t}); // Same tensor is an input and 2 outputs
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 1}}, {{1, 1}, {1, 1}}));
+ _context->addTestCase(uniformTCD<float>({{2, 4}}, {{2, 4}, {2, 4}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneTensor_InputAndTwoOutputsUsed)
+{
+ CircleGen cgen;
+ int t = cgen.addTensor({{2}, circle::TensorType::TensorType_FLOAT32});
+ int o = cgen.addTensor({{2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorNeg({{t}, {o}});
+ cgen.setInputsAndOutputs({t}, {t, t, o}); // Same tensor is an input and 2 outputs
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 1}}, {{1, 1}, {1, 1}, {-1, -1}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneTensor_ConstAndThreeOutputs)
+{
+ CircleGen cgen;
+ uint32_t const_buf = cgen.addBuffer(std::vector<float>{2, 5});
+ int t = cgen.addTensor({{2}, circle::TensorType_FLOAT32, const_buf});
+ cgen.setInputsAndOutputs({}, {t, t, t}); // A const tensor is 3 outputs
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({}, {{2, 5}, {2, 5}, {2, 5}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/ModelTestDynamicTensor.cc b/tests/nnfw_api/src/ModelTestDynamicTensor.cc
index e2d70d2c0..9a3a1add0 100644
--- a/tests/nnfw_api/src/ModelTestDynamicTensor.cc
+++ b/tests/nnfw_api/src/ModelTestDynamicTensor.cc
@@ -19,8 +19,8 @@
#include "common.h"
#include "fixtures.h"
-#include "NNPackages.h"
#include "CircleGen.h"
+#include "GenModelTest.h"
void set_input_output(nnfw_session *session, const std::vector<float> &input,
std::vector<float> &actual_output)
@@ -59,151 +59,120 @@ void set_input_output(nnfw_session *session, const std::vector<float> &input0,
*
* @note Run this test with "cpu" backend
*/
-// TODO Rewrite this with CircleGen
-class TestDynamicTensorReshapeModelLoaded
- : public ValidationTestModelLoaded<NNPackages::DYNAMIC_TENSOR_RESHAPE>
+auto build_dynamic_Reshape()
{
-protected:
- void set_input_output(const std::vector<int> &new_shape, int actual_output_size,
- std::vector<float> *actual_output)
- {
- NNFW_STATUS res = nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_INT32, new_shape.data(),
- sizeof(int) * new_shape.size());
- NNFW_ENSURE_SUCCESS(res);
-
- res = nnfw_set_output(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, actual_output->data(),
- sizeof(float) * actual_output_size);
- NNFW_ENSURE_SUCCESS(res);
- }
-
- void prepare_and_set_input_output(const std::vector<int> &new_shape, int actual_output_size,
- std::vector<float> *actual_output)
- {
- NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu"));
+ CircleGen cgen;
- NNFW_STATUS res = NNFW_STATUS_ERROR;
+ auto f32 = circle::TensorType::TensorType_FLOAT32;
+ auto i32 = circle::TensorType::TensorType_INT32;
- res = nnfw_prepare(_session);
- NNFW_ENSURE_SUCCESS(res);
+ std::vector<float> new_shape_data{-1.5, -1.0, -0.5, 0.5, 1.0, 1.5};
+ uint32_t input_buf = cgen.addBuffer(new_shape_data); // shape = [2, 3]
+ int input = cgen.addTensor({{2, 3}, f32, input_buf});
+ int new_shape = cgen.addTensor({{2}, i32});
+ int out = cgen.addTensor({{}, f32}); // scalar, meaning output shape is unspecified
- set_input_output(new_shape, actual_output_size, actual_output);
- // real test case should start from calling nnfw_run()
- }
+ CircleGen::Shape empty_new_shape;
+ cgen.addOperatorReshape({{input, new_shape}, {out}}, empty_new_shape);
+ cgen.setInputsAndOutputs({new_shape}, {out});
+ auto cbuf = cgen.finish();
+ return cbuf;
+}
- // call this after calling nnfw_prepare()
- void set_input_output_and_run(const std::vector<int> &new_shape,
- const std::vector<float> &expected_output, bool no_run_error = true)
- {
- int output_element_num = expected_output.size();
- std::vector<float> actual_output(output_element_num);
-
- set_input_output(new_shape, output_element_num, &actual_output);
-
- // Do inference
- NNFW_STATUS res = nnfw_run(_session);
-
- if (no_run_error)
- {
- NNFW_ENSURE_SUCCESS(res);
-
- // output shape check
- nnfw_tensorinfo info;
- NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_session, 0, &info));
- ASSERT_EQ(info.rank, new_shape.size());
- for (uint32_t d = 0; d < info.rank; ++d)
- ASSERT_EQ(info.dims[d], new_shape[d]);
-
- // output value check
- for (int i = 0; i < expected_output.size(); ++i)
- ASSERT_EQ(expected_output[i], actual_output[i]);
- }
- else
- {
- ASSERT_EQ(res, NNFW_STATUS_ERROR);
- }
- };
+TEST_F(GenModelTest, dynamic_reshape_from_2x3_to_3x2)
+{
+ const std::vector<int> new_shape{3, 2};
+ const std::vector<float> expected{-1.5, -1.0, -0.5, 0.5, 1.0, 1.5};
- void TearDown() override
+ _context = std::make_unique<GenModelTestContext>(build_dynamic_Reshape());
{
- ValidationTestModelLoaded<NNPackages::DYNAMIC_TENSOR_RESHAPE>::TearDown();
+ TestCaseData tcd;
+ tcd.addInput(new_shape);
+ tcd.addOutput(expected);
+ _context->addTestCase(tcd);
+ _context->setBackends({"cpu"}); // Currently, dynamic tensor runs on "cpu" only
+ _context->output_sizes(0, sizeof(float) * expected.size());
}
-};
-
-TEST_F(TestDynamicTensorReshapeModelLoaded, reshape_to_3x2)
-{
- const std::vector<int> new_shape = {3, 2};
- const std::vector<float> expected = {-1.5, -1.0, -0.5, 0.5, 1.0, 1.5};
- std::vector<float> actual_output(expected.size());
-
- prepare_and_set_input_output(new_shape, expected.size(), &actual_output);
-
- // Do inference
- NNFW_STATUS res = nnfw_run(_session);
- NNFW_ENSURE_SUCCESS(res);
-
- // output value check
- for (int i = 0; i < expected.size(); ++i)
- ASSERT_EQ(expected[i], actual_output[i]);
+ // GenModelTest::teardown() will do the rest
+ SUCCEED();
}
/**
* @brief Negative test.
* Reshape's first input has 6 values but trying to reshaping to [3, 3]
*/
-TEST_F(TestDynamicTensorReshapeModelLoaded, neg_reshape_to_wrong_3x3)
+TEST_F(GenModelTest, neg_reshape_from_2x3_to_wrong_3x3)
{
- const std::vector<int> wrong_shape = {3, 3}; // wrong shape input
- const int actual_element_num = 9; // whatever number
- std::vector<float> actual_output(9); // whatever size
+ const std::vector<int> wrong_shape{3, 3}; // wrong shape input
+ const std::vector<float> expected{0}; // whatever
- prepare_and_set_input_output(wrong_shape, actual_element_num, &actual_output);
-
- // Do inference
- NNFW_STATUS res = nnfw_run(_session);
- ASSERT_EQ(res, NNFW_STATUS_ERROR); // run should fail
+ _context = std::make_unique<GenModelTestContext>(build_dynamic_Reshape());
+ {
+ TestCaseData tcd;
+ tcd.addInput(wrong_shape);
+ tcd.addOutput(expected);
+ tcd.expect_error_on_run(true);
+
+ _context->addTestCase(tcd);
+ _context->setBackends({"cpu"}); // Currently, dynamic tensor runs on "cpu" only
+ _context->output_sizes(0, sizeof(float) * expected.size());
+ }
+ // GenModelTest::teardown() will do the rest
+ SUCCEED();
}
-TEST_F(TestDynamicTensorReshapeModelLoaded, reshape_multiple_executions)
+TEST_F(GenModelTest, reshape_multiple_executions)
{
- NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu"));
-
- NNFW_STATUS res = nnfw_prepare(_session);
- NNFW_ENSURE_SUCCESS(res);
-
std::vector<int> new_shape;
std::vector<float> expected = {-1.5, -1.0, -0.5, 0.5, 1.0, 1.5};
- // let's call multiple times
- new_shape = {3, 2};
- set_input_output_and_run(new_shape, expected);
+ auto add_tcd = [&](const decltype(new_shape) &&new_shape) {
+ TestCaseData tcd;
+ tcd.addInput(new_shape);
+ tcd.addOutput(expected);
+ _context->addTestCase(tcd);
+ };
- new_shape = {1, 6};
- set_input_output_and_run(new_shape, expected);
+ _context = std::make_unique<GenModelTestContext>(build_dynamic_Reshape());
+ {
+ add_tcd({3, 2});
+ add_tcd({1, 6});
+ add_tcd({6, 1});
- new_shape = {6, 1};
- set_input_output_and_run(new_shape, expected);
+ _context->setBackends({"cpu"}); // Currently, dynamic tensor runs on "cpu" only
+ _context->output_sizes(0, sizeof(float) * expected.size());
+ }
+ // GenModelTest::teardown() will do the rest
+ SUCCEED();
}
-TEST_F(TestDynamicTensorReshapeModelLoaded, neg_reshape_multiple_executions)
+TEST_F(GenModelTest, neg_reshape_multiple_executions)
{
- NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu"));
-
- NNFW_STATUS res = nnfw_prepare(_session);
- NNFW_ENSURE_SUCCESS(res);
-
std::vector<int> new_shape;
std::vector<float> expected = {-1.5, -1.0, -0.5, 0.5, 1.0, 1.5};
- // let's call multiple times including the second nnfw_run() to fail
- new_shape = {3, 2};
- set_input_output_and_run(new_shape, expected);
+ auto add_tcd = [&](const decltype(new_shape) &&new_shape, bool expect_error_on_run) {
+ TestCaseData tcd;
+ tcd.addInput(new_shape);
+ tcd.addOutput(expected);
+ tcd.expect_error_on_run(expect_error_on_run);
+ _context->addTestCase(tcd);
+ };
- new_shape = {1, 100}; // wrong shape
- set_input_output_and_run(new_shape, expected, false); // Run will fail
+ _context = std::make_unique<GenModelTestContext>(build_dynamic_Reshape());
+ {
+ bool EXPECT_ERROR_ON_RUN = true;
+ bool EXPECT_SUCCESS_ON_RUN = !EXPECT_ERROR_ON_RUN;
- // next run should succeed
- new_shape = {6, 1};
- set_input_output_and_run(new_shape, expected);
+ add_tcd({3, 2}, EXPECT_SUCCESS_ON_RUN);
+ add_tcd({1, 100}, EXPECT_ERROR_ON_RUN); // 1th tcd. wrong shape
+ add_tcd({6, 1}, EXPECT_SUCCESS_ON_RUN);
+
+ _context->setBackends({"cpu"}); // Currently, dynamic tensor runs on "cpu" only
+ _context->output_sizes(0, sizeof(float) * expected.size());
+ }
+ // GenModelTest::teardown() will do the rest
+ SUCCEED();
}
//
diff --git a/tests/nnfw_api/src/ModelTestInputReshaping.cc b/tests/nnfw_api/src/ModelTestInputReshaping.cc
index bfe347fe7..f5ce3e062 100644
--- a/tests/nnfw_api/src/ModelTestInputReshaping.cc
+++ b/tests/nnfw_api/src/ModelTestInputReshaping.cc
@@ -18,25 +18,37 @@
#include <nnfw_internal.h>
#include "fixtures.h"
-#include "NNPackages.h"
#include "common.h"
-
-using TestInputReshapingAddModelLoaded = ValidationTestModelLoaded<NNPackages::INPUT_RESHAPING_ADD>;
+#include "CircleGen.h"
/**
* @brief Testing the following model:
* #1 = placeholder (shape = [2, 2], dtype=float)
* #2 = placeholder (shape = [2], dtype=float)
* #3 = add(#1, #2)
- *
- * @note Run this test with "cpu" backend and "linear" executor
*/
-TEST_F(TestInputReshapingAddModelLoaded, reshaping_2x2_to_4x2)
+auto build_model_add_input_reshaping()
+{
+ // Model is not important
+ CircleGen cgen;
+ auto f32 = circle::TensorType::TensorType_FLOAT32;
+ int in1 = cgen.addTensor({{2, 2}, f32}); // consider this [None, None]
+ int in2 = cgen.addTensor({{2}, f32});
+ int out = cgen.addTensor({{}, f32}); // scalar, meaning output shape is unspecified
+ cgen.addOperatorAdd({{in1, in2}, {out}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in1, in2}, {out});
+ auto cbuf = cgen.finish();
+ return cbuf;
+}
+
+TEST(TestDynamicTensor, input_reshaping)
{
- NNFW_STATUS res = NNFW_STATUS_ERROR;
+ nnfw_session *session = nullptr;
+ NNFW_ENSURE_SUCCESS(nnfw_create_session(&session));
+ const auto model_buf = build_model_add_input_reshaping();
+ NNFW_ENSURE_SUCCESS(nnfw_load_circle_from_buffer(session, model_buf.buffer(), model_buf.size()));
- NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_session, "cpu"));
- NNFW_ENSURE_SUCCESS(nnfw_set_config(_session, "EXECUTOR", "Linear"));
+ NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(session, "cpu"));
// input and output values
const std::vector<float> input1 = {0, 1, 2, 3, 4, 5, 6, 7}; // of changed shape [4, 2]
@@ -53,36 +65,31 @@ TEST_F(TestInputReshapingAddModelLoaded, reshaping_2x2_to_4x2)
// input reshaping from [2, 2] to [4, 2]
nnfw_tensorinfo ti = {NNFW_TYPE_TENSOR_FLOAT32, 2, {4, 2}};
- res = nnfw_set_input_tensorinfo(_session, 0, &ti);
+ NNFW_ENSURE_SUCCESS(nnfw_set_input_tensorinfo(session, 0, &ti));
- res = nnfw_prepare(_session);
- NNFW_ENSURE_SUCCESS(res);
+ NNFW_ENSURE_SUCCESS(nnfw_prepare(session));
nnfw_tensorinfo ti_input = {}; // Static inference result will be stored
- nnfw_input_tensorinfo(_session, 0, &ti_input);
+ NNFW_ENSURE_SUCCESS(nnfw_input_tensorinfo(session, 0, &ti_input));
ASSERT_TRUE(tensorInfoEqual(ti, ti_input));
nnfw_tensorinfo ti_output = {}; // Static inference result will be stored
- nnfw_output_tensorinfo(_session, 0, &ti_output);
+ NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(session, 0, &ti_output));
ASSERT_TRUE(tensorInfoEqual(ti, ti_output)); // input/output shapes are same with for this model
- res = nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, input1.data(),
- sizeof(float) * input1.size());
- NNFW_ENSURE_SUCCESS(res);
- res = nnfw_set_input(_session, 1, NNFW_TYPE_TENSOR_FLOAT32, input2.data(),
- sizeof(float) * input2.size());
- NNFW_ENSURE_SUCCESS(res);
+ NNFW_ENSURE_SUCCESS(nnfw_set_input(session, 0, NNFW_TYPE_TENSOR_FLOAT32, input1.data(),
+ sizeof(float) * input1.size()));
+ NNFW_ENSURE_SUCCESS(nnfw_set_input(session, 1, NNFW_TYPE_TENSOR_FLOAT32, input2.data(),
+ sizeof(float) * input2.size()));
uint64_t output_num_elements = tensorInfoNumElements(ti_output);
ASSERT_EQ(output_num_elements, expected.size());
std::vector<float> actual_output(output_num_elements);
- res = nnfw_set_output(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, actual_output.data(),
- sizeof(float) * actual_output.size());
- NNFW_ENSURE_SUCCESS(res);
+ NNFW_ENSURE_SUCCESS(nnfw_set_output(session, 0, NNFW_TYPE_TENSOR_FLOAT32, actual_output.data(),
+ sizeof(float) * actual_output.size()));
// Do inference
- res = nnfw_run(_session);
- NNFW_ENSURE_SUCCESS(res);
+ NNFW_ENSURE_SUCCESS(nnfw_run(session));
// compare
for (int i = 0; i < expected.size(); ++i)
diff --git a/tests/nnfw_api/src/NNPackages.cc b/tests/nnfw_api/src/NNPackages.cc
index 534973cb0..b0febb7a1 100644
--- a/tests/nnfw_api/src/NNPackages.cc
+++ b/tests/nnfw_api/src/NNPackages.cc
@@ -29,7 +29,7 @@ const char *TEST_PACKAGE_NAMES[] = {
"add", "add_no_manifest", "add_invalid_manifest",
// for dynamic tensor test
- "input_reshaping_add", "dynamic_tensor_reshape", "while_dynamic", "if_dynamic",
+ "while_dynamic", "if_dynamic",
};
NNPackages &NNPackages::get()
@@ -71,7 +71,10 @@ void NNPackages::checkAll()
DIR *dir = opendir(path.c_str());
if (!dir)
{
- std::string msg = "missing nnpackage: " + package_name + ", path: " + path;
+ std::string msg =
+ "missing nnpackage: " + package_name + ", path: " + path +
+ "\nPlease run \'[install_dir]/test/onert-test prepare-model --nnpackage\' to "
+ "download nnpackage";
throw std::runtime_error{msg};
}
closedir(dir);
diff --git a/tests/nnfw_api/src/NNPackages.h b/tests/nnfw_api/src/NNPackages.h
index 735fa96a0..a51b7701d 100644
--- a/tests/nnfw_api/src/NNPackages.h
+++ b/tests/nnfw_api/src/NNPackages.h
@@ -43,8 +43,6 @@ public:
ADD_INVALID_MANIFEST, //< Contains "Add" model but the manifest file is broken JSON
// for dynamic tensor test
- INPUT_RESHAPING_ADD,
- DYNAMIC_TENSOR_RESHAPE,
WHILE_DYNAMIC,
IF_DYNAMIC,
diff --git a/tests/nnfw_api/src/RegressionTests.cc b/tests/nnfw_api/src/RegressionTests.cc
index 05914b839..10d6e5d6e 100644
--- a/tests/nnfw_api/src/RegressionTests.cc
+++ b/tests/nnfw_api/src/RegressionTests.cc
@@ -62,3 +62,117 @@ TEST_F(RegressionTest, neg_github_3826)
ASSERT_EQ(nnfw_prepare(session), NNFW_STATUS_ERROR);
NNFW_ENSURE_SUCCESS(nnfw_close_session(session));
}
+
+TEST_F(RegressionTest, github_11748)
+{
+ // At the 1st call, input tensor is static. From the 2nd call, input tensor becomes dynamic.
+ // the following model and calling sequence were what nnstreamer people used for their test case.
+ CircleGen cgen;
+ int lhs = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+
+ std::vector<float> rhs_data{2};
+ uint32_t rhs_buf = cgen.addBuffer(rhs_data);
+ int rhs = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32, rhs_buf});
+
+ int out = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAdd({{lhs, rhs}, {out}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({lhs}, {out});
+ auto cbuf = cgen.finish();
+
+ nnfw_session *session = nullptr;
+ NNFW_ENSURE_SUCCESS(nnfw_create_session(&session));
+ NNFW_ENSURE_SUCCESS(nnfw_load_circle_from_buffer(session, cbuf.buffer(), cbuf.size()));
+ // To test when there is no backends loaded for the session
+ NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(session, "cpu"));
+ NNFW_ENSURE_SUCCESS(nnfw_prepare(session));
+
+ uint32_t input_num = -1;
+ NNFW_ENSURE_SUCCESS(nnfw_input_size(session, &input_num));
+
+ nnfw_tensorinfo t_input;
+ NNFW_ENSURE_SUCCESS(nnfw_input_tensorinfo(session, 0, &t_input));
+
+ uint32_t output_num = -1;
+ NNFW_ENSURE_SUCCESS(nnfw_output_size(session, &output_num));
+
+ nnfw_tensorinfo t_output;
+ NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(session, 0, &t_output));
+
+ // when new_dim == 1, input tensor is static. From 2, input tensor becomes dynamic.
+ for (int32_t new_dim = 1; new_dim <= 4; new_dim++)
+ {
+ nnfw_tensorinfo t_new_input;
+ t_new_input.dtype = t_input.dtype;
+ t_new_input.rank = 1;
+ t_new_input.dims[0] = new_dim;
+ NNFW_ENSURE_SUCCESS(nnfw_set_input_tensorinfo(session, 0, &t_new_input));
+
+ NNFW_ENSURE_SUCCESS(nnfw_input_size(session, &input_num));
+ NNFW_ENSURE_SUCCESS(nnfw_input_tensorinfo(session, 0, &t_input));
+
+ ASSERT_EQ(input_num, 1);
+ ASSERT_EQ(t_input.rank, t_new_input.rank);
+ ASSERT_EQ(t_input.dims[0], new_dim);
+
+ uint8_t input_buf[new_dim * sizeof(float)];
+ NNFW_ENSURE_SUCCESS(
+ nnfw_set_input(session, 0, t_input.dtype, &input_buf, new_dim * sizeof(float)));
+
+ uint8_t output_buf[new_dim * sizeof(float)];
+ NNFW_ENSURE_SUCCESS(
+ nnfw_set_output(session, 0, t_output.dtype, &output_buf, new_dim * sizeof(float)));
+
+ NNFW_ENSURE_SUCCESS(nnfw_run(session));
+
+ NNFW_ENSURE_SUCCESS(nnfw_output_size(session, &output_num));
+ NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(session, 0, &t_output));
+
+ ASSERT_EQ(output_num, 1);
+ ASSERT_EQ(t_output.rank, t_new_input.rank);
+ ASSERT_EQ(t_output.dims[0], new_dim);
+
+ // seems weird calling but anyway nnstreamer people case calls this again.
+ // Anyways, runtime should work
+ NNFW_ENSURE_SUCCESS(
+ nnfw_set_input(session, 0, t_input.dtype, &input_buf, new_dim * sizeof(float)));
+ NNFW_ENSURE_SUCCESS(
+ nnfw_set_output(session, 0, t_output.dtype, &output_buf, new_dim * sizeof(float)));
+ NNFW_ENSURE_SUCCESS(nnfw_run(session));
+ }
+
+ NNFW_ENSURE_SUCCESS(nnfw_close_session(session));
+}
+
+TEST_F(RegressionTest, github_4585)
+{
+ // A single tensor which is an input and an output at the same time
+ CircleGen cgen;
+ int t = cgen.addTensor({{1, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.setInputsAndOutputs({t}, {t});
+ auto cbuf = cgen.finish();
+
+ nnfw_session *session = nullptr;
+ NNFW_ENSURE_SUCCESS(nnfw_create_session(&session));
+ NNFW_ENSURE_SUCCESS(nnfw_load_circle_from_buffer(session, cbuf.buffer(), cbuf.size()));
+ // To test when there is no backends loaded for the session
+ NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(session, "cpu"));
+ NNFW_ENSURE_SUCCESS(nnfw_prepare(session));
+
+ // Change input tensorinfo (Make dynamic shape inference happen)
+ nnfw_tensorinfo ti_new = {NNFW_TYPE_TENSOR_FLOAT32, 2, {1, 2}};
+ NNFW_ENSURE_SUCCESS(nnfw_set_input_tensorinfo(session, 0, &ti_new));
+
+ std::vector<float> in_buf{1, 1};
+ std::vector<float> out_buf{-1, -1};
+
+ NNFW_ENSURE_SUCCESS(
+ nnfw_set_input(session, 0, ti_new.dtype, in_buf.data(), in_buf.size() * sizeof(float)));
+ NNFW_ENSURE_SUCCESS(
+ nnfw_set_output(session, 0, ti_new.dtype, out_buf.data(), out_buf.size() * sizeof(float)));
+
+ NNFW_ENSURE_SUCCESS(nnfw_run(session));
+
+ ASSERT_EQ(in_buf, out_buf);
+
+ NNFW_ENSURE_SUCCESS(nnfw_close_session(session));
+}
diff --git a/tests/nnfw_api/src/ValidationTestAddSessionPrepared.cc b/tests/nnfw_api/src/ValidationTestAddSessionPrepared.cc
index f19bb782c..e2ae655be 100644
--- a/tests/nnfw_api/src/ValidationTestAddSessionPrepared.cc
+++ b/tests/nnfw_api/src/ValidationTestAddSessionPrepared.cc
@@ -39,6 +39,28 @@ TEST_F(ValidationTestAddSessionPrepared, run_twice)
ASSERT_FLOAT_EQ(_output[0], 7.0);
}
+TEST_F(ValidationTestAddSessionPrepared, run_many_times_dynamic_input)
+{
+ for (int v = 1; v <= 5; v++) // 5 times with different shapes
+ {
+ nnfw_tensorinfo ti_input = {NNFW_TYPE_TENSOR_FLOAT32, 4, {1, 1, 1, v}};
+ SetInOutBuffersDynamic(&ti_input);
+
+ for (int i = 0; i < v; i++)
+ _input[i] = i * 10.0;
+
+ NNFW_ENSURE_SUCCESS(nnfw_run(_session));
+
+ // Check if the shape inference is correct
+ nnfw_tensorinfo ti_output;
+ ASSERT_EQ(nnfw_output_tensorinfo(_session, 0, &ti_output), NNFW_STATUS_NO_ERROR);
+ EXPECT_EQ(num_elems(&ti_input), num_elems(&ti_output));
+
+ for (int i = 0; i < v; i++)
+ ASSERT_FLOAT_EQ(_output[i], i * 10.0 + 2.0) << "i : " << i;
+ }
+}
+
TEST_F(ValidationTestAddSessionPrepared, run_async)
{
SetInOutBuffers();
@@ -161,4 +183,12 @@ TEST_F(ValidationTestAddSessionPrepared, neg_prepare)
ASSERT_EQ(nnfw_prepare(_session), NNFW_STATUS_INVALID_STATE);
}
+TEST_F(ValidationTestAddSessionPrepared, neg_run_without_set_output)
+{
+ uint8_t input[4];
+ NNFW_ENSURE_SUCCESS(nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, input, sizeof(input)));
+ // `nnfw_set_output()` is not called
+ ASSERT_EQ(nnfw_run(_session), NNFW_STATUS_ERROR);
+}
+
// TODO Validation check when "nnfw_run" is called without input & output tensor setting
diff --git a/tests/nnfw_api/src/fixtures.h b/tests/nnfw_api/src/fixtures.h
index f273d6553..0cb67b5e2 100644
--- a/tests/nnfw_api/src/fixtures.h
+++ b/tests/nnfw_api/src/fixtures.h
@@ -126,6 +126,21 @@ protected:
NNFW_STATUS_NO_ERROR);
}
+ void SetInOutBuffersDynamic(const nnfw_tensorinfo *ti_input)
+ {
+ NNFW_ENSURE_SUCCESS(nnfw_set_input_tensorinfo(_session, 0, ti_input));
+ uint64_t input_elements = num_elems(ti_input);
+ _input.resize(input_elements);
+ ASSERT_EQ(
+ nnfw_set_input(_session, 0, ti_input->dtype, _input.data(), sizeof(float) * input_elements),
+ NNFW_STATUS_NO_ERROR);
+
+ _output.resize(40000); // Give sufficient size for the output
+ ASSERT_EQ(nnfw_set_output(_session, 0, ti_input->dtype, _output.data(),
+ sizeof(float) * _output.size()),
+ NNFW_STATUS_NO_ERROR);
+ }
+
protected:
std::vector<float> _input;
std::vector<float> _output;
diff --git a/tests/nnfw_api/src/one_op_tests/Add.cc b/tests/nnfw_api/src/one_op_tests/Add.cc
index 281d5ded5..1fff0ed30 100644
--- a/tests/nnfw_api/src/one_op_tests/Add.cc
+++ b/tests/nnfw_api/src/one_op_tests/Add.cc
@@ -30,8 +30,8 @@ TEST_F(GenModelTest, OneOp_Add_VarToConst)
cgen.setInputsAndOutputs({lhs}, {out});
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase({{{1, 3, 2, 4}}, {{6, 7, 9, 8}}});
- _context->addTestCase({{{0, 1, 2, 3}}, {{5, 5, 9, 7}}});
+ _context->addTestCase(uniformTCD<float>({{1, 3, 2, 4}}, {{6, 7, 9, 8}}));
+ _context->addTestCase(uniformTCD<float>({{0, 1, 2, 3}}, {{5, 5, 9, 7}}));
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
SUCCEED();
@@ -47,7 +47,22 @@ TEST_F(GenModelTest, OneOp_Add_VarToVar)
cgen.setInputsAndOutputs({lhs, rhs}, {out});
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase({{{1, 3, 2, 4}, {5, 4, 7, 4}}, {{6, 7, 9, 8}}});
+ _context->addTestCase(uniformTCD<float>({{1, 3, 2, 4}, {5, 4, 7, 4}}, {{6, 7, 9, 8}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Add_VarToVarSame)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAdd({{in, in}, {out}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 3, 2, 4}}, {{2, 6, 4, 8}}));
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
SUCCEED();
@@ -64,7 +79,7 @@ TEST_F(GenModelTest, neg_OneOp_Add_InvalidShape)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
- _context->setCompileFail();
+ _context->expectFailCompile();
SUCCEED();
}
@@ -82,7 +97,7 @@ TEST_F(GenModelTest, neg_OneOp_Add_InvalidShapeConst)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
- _context->setCompileFail();
+ _context->expectFailCompile();
SUCCEED();
}
@@ -97,7 +112,55 @@ TEST_F(GenModelTest, neg_OneOp_Add_OneOperand)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
- _context->setCompileFail();
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Add_ThreeOperands)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 3, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAdd({{in, in, in}, {out}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Add_NoOutput)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 3, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAdd({{in}, {}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Add_InvalidActivation)
+{
+ CircleGen cgen;
+ int lhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int rhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAdd({{lhs, rhs}, {out}},
+ static_cast<circle::ActivationFunctionType>(128) /* Invalid value*/);
+ cgen.setInputsAndOutputs({lhs, rhs}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 3, 2, 4}, {5, 4, 7, 4}}, {{6, 7, 9, 8}}));
+ _context->setBackends({"cpu"});
+ _context->expectFailModelLoad();
SUCCEED();
}
diff --git a/tests/nnfw_api/src/one_op_tests/ArgMax.cc b/tests/nnfw_api/src/one_op_tests/ArgMax.cc
new file mode 100644
index 000000000..2876d8d70
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/ArgMax.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+#include <memory>
+
+TEST_F(GenModelTest, OneOp_ArgMax_AxisToConst)
+{
+ CircleGen cgen;
+ const auto output_type = circle::TensorType::TensorType_INT32;
+ std::vector<int32_t> axis_data{1};
+ uint32_t axis_buf = cgen.addBuffer(axis_data);
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, axis_buf});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 1}, output_type});
+ cgen.addOperatorArgMax({{in, axis}, {out}}, output_type);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<float>{1, 4, 2, 3});
+ tcd.addOutput(std::vector<int32_t>{1, 0});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_ArgMax_Int64_AxisToConst)
+{
+ CircleGen cgen;
+ const auto output_type = circle::TensorType::TensorType_INT64;
+ std::vector<int32_t> axis_data{1};
+ uint32_t axis_buf = cgen.addBuffer(axis_data);
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, axis_buf});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 1}, output_type});
+ cgen.addOperatorArgMax({{in, axis}, {out}}, output_type);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<float>{1, 4, 2, 3});
+ tcd.addOutput(std::vector<int64_t>{1, 0});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_ArgMax_AxisToVar)
+{
+ CircleGen cgen;
+ const auto output_type = circle::TensorType::TensorType_INT32;
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 1}, output_type});
+ cgen.addOperatorArgMax({{in, axis}, {out}}, output_type);
+ cgen.setInputsAndOutputs({in, axis}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<float>{1, 4, 2, 3});
+ tcd.addInput(std::vector<int32_t>{-3});
+ tcd.addOutput(std::vector<int32_t>{1, 0});
+ _context->addTestCase(tcd);
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_ArgMax_InvalidAxis0)
+{
+ CircleGen cgen;
+ const auto output_type = circle::TensorType::TensorType_INT32;
+ std::vector<int32_t> axis_data{4};
+ uint32_t axis_buf = cgen.addBuffer(axis_data);
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, axis_buf});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 1}, output_type});
+ cgen.addOperatorArgMax({{in, axis}, {out}}, output_type);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_ArgMax_InvalidAxis1)
+{
+ CircleGen cgen;
+ const auto output_type = circle::TensorType::TensorType_INT32;
+ std::vector<int32_t> axis_data{-3};
+ uint32_t axis_buf = cgen.addBuffer(axis_data);
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, axis_buf});
+ int in = cgen.addTensor({{2, 2}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{2}, output_type});
+ cgen.addOperatorArgMax({{in, axis}, {out}}, output_type);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/AveragePool2D.cc b/tests/nnfw_api/src/one_op_tests/AveragePool2D.cc
index 854517e47..8ba82083f 100644
--- a/tests/nnfw_api/src/one_op_tests/AveragePool2D.cc
+++ b/tests/nnfw_api/src/one_op_tests/AveragePool2D.cc
@@ -26,7 +26,7 @@ TEST_F(GenModelTest, OneOp_AvgPool2D)
cgen.setInputsAndOutputs({in}, {out});
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase({{{1, 3, 2, 4}}, {{2.5}}});
+ _context->addTestCase(uniformTCD<float>({{1, 3, 2, 4}}, {{2.5}}));
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
SUCCEED();
@@ -43,7 +43,82 @@ TEST_F(GenModelTest, neg_OneOp_AvgPool2D)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
- _context->setCompileFail();
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_AvgPool2D_InvalidPaddingType)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 1, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAveragePool2D({{in}, {out}}, static_cast<circle::Padding>(99), 2, 2, 2, 2,
+ circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_AvgPool2D_InvalidFilterSize_1)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 1, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAveragePool2D({{in}, {out}}, circle::Padding_SAME, 2, 2, -1, 2,
+ circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_AvgPool2D_InvalidFilterSize_2)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 1, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAveragePool2D({{in}, {out}}, circle::Padding_SAME, 2, 2, 2, 0,
+ circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_AvgPool2D_InvalidStrides_1)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 1, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAveragePool2D({{in}, {out}}, circle::Padding_SAME, 0, 2, 2, 2,
+ circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_AvgPool2D_InvalidStrides_2)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 1, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAveragePool2D({{in}, {out}}, circle::Padding_SAME, 1, -100, 2, 2,
+ circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->expectFailModelLoad();
SUCCEED();
}
diff --git a/tests/nnfw_api/src/one_op_tests/Cast.cc b/tests/nnfw_api/src/one_op_tests/Cast.cc
new file mode 100644
index 000000000..71d98ee59
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/Cast.cc
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+#include <memory>
+
+TEST_F(GenModelTest, OneOp_Cast_Int32ToFloat32)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_INT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorCast({{in}, {out}}, circle::TensorType::TensorType_INT32,
+ circle::TensorType::TensorType_FLOAT32);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<int32_t>{1, 2, 3, 4});
+ tcd.addOutput(std::vector<float>{1, 2, 3, 4});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Cast_Float32ToInt32)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_INT32});
+ cgen.addOperatorCast({{in}, {out}}, circle::TensorType::TensorType_FLOAT32,
+ circle::TensorType::TensorType_INT32);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<float>{1, 2, 3, 4});
+ tcd.addOutput(std::vector<int32_t>{1, 2, 3, 4});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Cast_BoolToFloat32)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_BOOL});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorCast({{in}, {out}}, circle::TensorType::TensorType_BOOL,
+ circle::TensorType::TensorType_FLOAT32);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<bool>{true, false, true, true});
+ tcd.addOutput(std::vector<float>{1, 0, 1, 1});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Cast_AfterEqual)
+{
+ CircleGen cgen;
+ int lhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int rhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int equal_out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_BOOL});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorEqual({{lhs, rhs}, {equal_out}});
+ cgen.addOperatorCast({{equal_out}, {out}}, circle::TensorType::TensorType_BOOL,
+ circle::TensorType::TensorType_FLOAT32);
+ cgen.setInputsAndOutputs({lhs, rhs}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 3, 2, 4}, {2, 3, 1, 4}}, {{0, 1, 0, 1}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Cast_InvalidInputCount0)
+{
+ CircleGen cgen;
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_INT32});
+ cgen.addOperatorCast({{}, {out}}, circle::TensorType::TensorType_FLOAT32,
+ circle::TensorType::TensorType_INT32);
+ cgen.setInputsAndOutputs({}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Cast_InvalidInputCount2)
+{
+ CircleGen cgen;
+ int lhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_INT32});
+ int rhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_INT32});
+ int out = cgen.addTensor({{1, 2, 2, 3}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorCast({{lhs, rhs}, {out}}, circle::TensorType::TensorType_INT32,
+ circle::TensorType::TensorType_FLOAT32);
+ cgen.setInputsAndOutputs({lhs, rhs}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Cast_InvalidOutputCount0)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_INT32});
+ cgen.addOperatorCast({{in}, {}}, circle::TensorType::TensorType_INT32,
+ circle::TensorType::TensorType_FLOAT32);
+ cgen.setInputsAndOutputs({in}, {});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Cast_InvalidOutputCount2)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_INT32});
+ int out1 = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out2 = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_INT32});
+ cgen.addOperatorCast({{in}, {out1, out2}}, circle::TensorType::TensorType_INT32,
+ circle::TensorType::TensorType_FLOAT32);
+ cgen.setInputsAndOutputs({in}, {out1, out2});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/Concat.cc b/tests/nnfw_api/src/one_op_tests/Concat.cc
new file mode 100644
index 000000000..2ef1185d4
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/Concat.cc
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+#include <memory>
+
+TEST_F(GenModelTest, OneOp_Concat_ShareSubTensor)
+{
+ CircleGen cgen;
+ int lhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int rhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int shared_subtensor = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int concat_out = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType::TensorType_FLOAT32});
+ std::vector<int32_t> padding_data{0, 0, 1, 1, 1, 1, 0, 0};
+ uint32_t padding_buf = cgen.addBuffer(padding_data);
+ int padding = cgen.addTensor({{4, 2}, circle::TensorType::TensorType_INT32, padding_buf});
+ int pad_out = cgen.addTensor({{1, 4, 4, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorAdd({{lhs, rhs}, {shared_subtensor}}, circle::ActivationFunctionType_NONE);
+ cgen.addOperatorConcatenation({{rhs, shared_subtensor}, {concat_out}}, 3,
+ circle::ActivationFunctionType_NONE);
+ cgen.addOperatorPad({{shared_subtensor, padding}, {pad_out}});
+ cgen.setInputsAndOutputs({lhs, rhs}, {pad_out, concat_out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>(
+ {{1, 3, 2, 4}, {5, 4, 7, 4}},
+ {{0, 0, 0, 0, 0, 6, 7, 0, 0, 9, 8, 0, 0, 0, 0, 0}, {5, 6, 4, 7, 7, 9, 4, 8}}));
+ _context->setBackends({"acl_cl", "acl_neon"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Concat)
+{
+ CircleGen cgen;
+
+ int input1 = cgen.addTensor({{2, 3}, circle::TensorType::TensorType_FLOAT32});
+ int input2 = cgen.addTensor({{2, 3}, circle::TensorType::TensorType_FLOAT32});
+ int output = cgen.addTensor({{4, 3}, circle::TensorType::TensorType_FLOAT32});
+
+ cgen.addOperatorConcatenation({{input1, input2}, {output}}, 0,
+ circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({input1, input2}, {output});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 2, 3, 4, 5, 6}, {7, 8, 9, 10, 11, 12}},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}}));
+ _context->setBackends({"cpu;acl_cl"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Concat_Subtensor_4D)
+{
+ CircleGen cgen;
+ int in1 = cgen.addTensor({{1, 1, 1, 20}, circle::TensorType::TensorType_FLOAT32});
+ int in2 = cgen.addTensor({{1, 1, 1, 10}, circle::TensorType::TensorType_FLOAT32});
+ std::vector<int32_t> axis_data{3};
+ uint32_t axis_buf = cgen.addBuffer(axis_data);
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, axis_buf});
+
+ int s_out1 = cgen.addTensor({{1, 1, 1, 5}, circle::TensorType::TensorType_FLOAT32});
+ int s_out2 = cgen.addTensor({{1, 1, 1, 5}, circle::TensorType::TensorType_FLOAT32});
+ int s_out3 = cgen.addTensor({{1, 1, 1, 5}, circle::TensorType::TensorType_FLOAT32});
+ int s_out4 = cgen.addTensor({{1, 1, 1, 5}, circle::TensorType::TensorType_FLOAT32});
+
+ int c_out1 = cgen.addTensor({{1, 1, 1, 10}, circle::TensorType::TensorType_FLOAT32});
+ int c_out2 = cgen.addTensor({{1, 1, 1, 10}, circle::TensorType::TensorType_FLOAT32});
+ int c_out3 = cgen.addTensor({{1, 1, 1, 10}, circle::TensorType::TensorType_FLOAT32});
+
+ int a_out1 = cgen.addTensor({{1, 1, 1, 10}, circle::TensorType::TensorType_FLOAT32});
+ int a_out2 = cgen.addTensor({{1, 1, 1, 10}, circle::TensorType::TensorType_FLOAT32});
+ int a_out3 = cgen.addTensor({{1, 1, 1, 10}, circle::TensorType::TensorType_FLOAT32});
+
+ int final_out = cgen.addTensor({{1, 1, 1, 35}, circle::TensorType::TensorType_FLOAT32});
+
+ cgen.addOperatorSplit({{axis, in1}, {s_out1, s_out2, s_out3, s_out4}}, 4);
+
+ cgen.addOperatorConcatenation({{s_out1, s_out2}, {c_out1}}, 3,
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+ cgen.addOperatorConcatenation({{s_out1, s_out3}, {c_out2}}, 3,
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+ cgen.addOperatorConcatenation({{s_out1, s_out4}, {c_out3}}, 3,
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+
+ cgen.addOperatorAdd({{c_out1, in2}, {a_out1}},
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+ cgen.addOperatorAdd({{c_out2, in2}, {a_out2}},
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+ cgen.addOperatorAdd({{c_out3, in2}, {a_out3}},
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+
+ cgen.addOperatorConcatenation({{s_out1, a_out1, a_out2, a_out3}, {final_out}}, 3,
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+
+ cgen.setInputsAndOutputs({in1, in2}, {s_out1, s_out2, s_out3, s_out4, c_out1, c_out2, c_out3,
+ a_out1, a_out2, a_out3, final_out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>(
+ {
+ // inputs
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, // in1
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // in2
+ },
+ {
+ // outputs
+ {1, 2, 3, 4, 5}, // s_out1
+ {6, 7, 8, 9, 10}, // s_out2
+ {11, 12, 13, 14, 15}, // s_out3
+ {16, 17, 18, 19, 20}, // s_out4
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // c_out1
+ {1, 2, 3, 4, 5, 11, 12, 13, 14, 15}, // c_out2
+ {1, 2, 3, 4, 5, 16, 17, 18, 19, 20}, // c_out3
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // a_out1
+ {1, 2, 3, 4, 5, 11, 12, 13, 14, 15}, // a_out2
+ {1, 2, 3, 4, 5, 16, 17, 18, 19, 20}, // a_out3
+ {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3,
+ 4, 5, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 16, 17, 18, 19, 20} // final_out
+ }));
+ _context->setBackends({"acl_cl"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Concat_Subtensor_2D)
+{
+ CircleGen cgen;
+ int in1 = cgen.addTensor({{1, 4}, circle::TensorType::TensorType_FLOAT32});
+ int in2 = cgen.addTensor({{1, 2}, circle::TensorType::TensorType_FLOAT32});
+ std::vector<int32_t> axis_data{1};
+ uint32_t axis_buf = cgen.addBuffer(axis_data);
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, axis_buf});
+
+ int s_out1 = cgen.addTensor({{1, 1}, circle::TensorType::TensorType_FLOAT32});
+ int s_out2 = cgen.addTensor({{1, 1}, circle::TensorType::TensorType_FLOAT32});
+ int s_out3 = cgen.addTensor({{1, 1}, circle::TensorType::TensorType_FLOAT32});
+ int s_out4 = cgen.addTensor({{1, 1}, circle::TensorType::TensorType_FLOAT32});
+
+ int c_out1 = cgen.addTensor({{1, 2}, circle::TensorType::TensorType_FLOAT32});
+ int c_out2 = cgen.addTensor({{1, 2}, circle::TensorType::TensorType_FLOAT32});
+ int c_out3 = cgen.addTensor({{1, 2}, circle::TensorType::TensorType_FLOAT32});
+
+ int a_out1 = cgen.addTensor({{1, 2}, circle::TensorType::TensorType_FLOAT32});
+ int a_out2 = cgen.addTensor({{1, 2}, circle::TensorType::TensorType_FLOAT32});
+ int a_out3 = cgen.addTensor({{1, 2}, circle::TensorType::TensorType_FLOAT32});
+
+ int final_out = cgen.addTensor({{1, 7}, circle::TensorType::TensorType_FLOAT32});
+
+ cgen.addOperatorSplit({{axis, in1}, {s_out1, s_out2, s_out3, s_out4}}, 4);
+
+ cgen.addOperatorConcatenation({{s_out1, s_out2}, {c_out1}}, 1,
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+ cgen.addOperatorConcatenation({{s_out1, s_out3}, {c_out2}}, 1,
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+ cgen.addOperatorConcatenation({{s_out1, s_out4}, {c_out3}}, 1,
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+
+ cgen.addOperatorAdd({{c_out1, in2}, {a_out1}},
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+ cgen.addOperatorAdd({{c_out2, in2}, {a_out2}},
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+ cgen.addOperatorAdd({{c_out3, in2}, {a_out3}},
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+
+ cgen.addOperatorConcatenation({{s_out1, a_out1, a_out2, a_out3}, {final_out}}, 1,
+ circle::ActivationFunctionType::ActivationFunctionType_NONE);
+
+ cgen.setInputsAndOutputs({in1, in2}, {s_out1, s_out2, s_out3, s_out4, c_out1, c_out2, c_out3,
+ a_out1, a_out2, a_out3, final_out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>(
+ {
+ // inputs
+ {1, 2, 3, 4}, // in1
+ {0, 0} // in2
+ },
+ {
+ // outputs
+ {1}, // s_out1
+ {2}, // s_out2
+ {3}, // s_out3
+ {4}, // s_out4
+ {1, 2}, // c_out1
+ {1, 3}, // c_out2
+ {1, 4}, // c_out3
+ {1, 2}, // a_out1
+ {1, 3}, // a_out2
+ {1, 4}, // a_out3
+ {1, 1, 2, 1, 3, 1, 4} // final_out
+ }));
+ _context->setBackends({"acl_cl"});
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/Cos.cc b/tests/nnfw_api/src/one_op_tests/Cos.cc
index 72bfe3e2f..03944746a 100644
--- a/tests/nnfw_api/src/one_op_tests/Cos.cc
+++ b/tests/nnfw_api/src/one_op_tests/Cos.cc
@@ -26,7 +26,7 @@ TEST_F(GenModelTest, OneOp_Cos)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
const float pi = 3.141592653589793;
- _context->addTestCase({{{0, pi / 2, pi, 7}}, {{1, 0, -1, 0.75390225434}}});
+ _context->addTestCase(uniformTCD<float>({{0, pi / 2, pi, 7}}, {{1, 0, -1, 0.75390225434}}));
_context->setBackends({"cpu"});
SUCCEED();
@@ -44,7 +44,7 @@ TEST_F(GenModelTest, neg_OneOp_Cos_TwoOperand)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"cpu"});
- _context->setCompileFail();
+ _context->expectFailModelLoad();
SUCCEED();
}
diff --git a/tests/nnfw_api/src/one_op_tests/Equal.cc b/tests/nnfw_api/src/one_op_tests/Equal.cc
new file mode 100644
index 000000000..54dcbee12
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/Equal.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+TEST_F(GenModelTest, OneOp_Equal)
+{
+ CircleGen cgen;
+ int lhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int rhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_BOOL});
+ cgen.addOperatorEqual({{lhs, rhs}, {out}});
+ cgen.setInputsAndOutputs({lhs, rhs}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<float>{0.1, 0.3, 0.5, 0.7});
+ tcd.addInput(std::vector<float>{0.1, 0.2, 0.3, 0.4});
+ tcd.addOutput(std::vector<bool>{true, false, false, false});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Equal_DifferentType)
+{
+ CircleGen cgen;
+ int lhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int rhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_INT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_BOOL});
+ cgen.addOperatorEqual({{lhs, rhs}, {out}});
+ cgen.setInputsAndOutputs({lhs, rhs}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Equal_InvalidType)
+{
+ CircleGen cgen;
+ int lhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int rhs = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_INT32});
+ cgen.addOperatorEqual({{lhs, rhs}, {out}});
+ cgen.setInputsAndOutputs({lhs, rhs}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/FullyConnected.cc b/tests/nnfw_api/src/one_op_tests/FullyConnected.cc
new file mode 100644
index 000000000..58bc830ef
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/FullyConnected.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+#include <memory>
+
+TEST_F(GenModelTest, OneOp_FullyConnected)
+{
+ CircleGen cgen;
+ // clang-format off
+ std::vector<float> weight_data{ 1, 0, 0, 1,
+ 2, 0, 0, -1,
+ 3, 0, 0, 2,
+ 4, 0, 0, 1,
+ 1, 0, 0, 1,
+ 2, 0, 0, -1,
+ 3, 0, 0, 2,
+ 4, 0, 0, 1,
+ 1, 0, 0, 1,
+ 2, 0, 0, -1,
+ 3, 0, 0, 2,
+ 4, 0, 0, 1,
+ 1, 0, 0, 1,
+ 2, 0, 0, -1,
+ 3, 0, 0, 2,
+ 4, 0, 0, 1 };
+ std::vector<float> bias_data{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 };
+ // clang-format on
+ uint32_t weight_buf = cgen.addBuffer(weight_data);
+ uint32_t bias_buf = cgen.addBuffer(bias_data);
+ int input = cgen.addTensor({{1, 4}, circle::TensorType::TensorType_FLOAT32});
+ int weight = cgen.addTensor({{16, 4}, circle::TensorType::TensorType_FLOAT32, weight_buf});
+ int bias = cgen.addTensor({{16}, circle::TensorType::TensorType_FLOAT32, bias_buf});
+ int output = cgen.addTensor({{1, 16}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorFullyConnected({{input, weight, bias}, {output}});
+ cgen.setInputsAndOutputs({input}, {output});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(
+ uniformTCD<float>({{1, 3, 2, 1}}, {{2, 1, 5, 5, 2, 1, 5, 5, 2, 1, 5, 5, 2, 1, 5, 6}}));
+ _context->setBackends({"cpu", "acl_neon"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_FullyConnected16x1)
+{
+ CircleGen cgen;
+ // clang-format off
+ std::vector<float> weight_data{ 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4,
+ 1, -1, 2, 1, 1, -1, 2, 1, 1, -1, 2, 1, 1, -1, 2, 1};
+ std::vector<float> bias_data{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 };
+ // clang-format on
+ uint32_t weight_buf = cgen.addBuffer(weight_data);
+ uint32_t bias_buf = cgen.addBuffer(bias_data);
+ int input = cgen.addTensor({{1, 4}, circle::TensorType::TensorType_FLOAT32});
+ CircleGen::SparsityParams sp{
+ {0, 1, 2, 3},
+ {0, 1},
+ {{CircleGen::SparseDimensionType::DimensionType_DENSE, 1},
+ {CircleGen::SparseDimensionType::DimensionType_SPARSE_CSR, {0, 2}, {0, 3}},
+ {CircleGen::SparseDimensionType::DimensionType_DENSE, 16},
+ {CircleGen::SparseDimensionType::DimensionType_DENSE, 1}}};
+ int weight = cgen.addTensor({{16, 4}, circle::TensorType::TensorType_FLOAT32, weight_buf}, sp);
+ int bias = cgen.addTensor({{16}, circle::TensorType::TensorType_FLOAT32, bias_buf});
+ int output = cgen.addTensor({{1, 16}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorFullyConnected({{input, weight, bias}, {output}});
+ cgen.setInputsAndOutputs({input}, {output});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(
+ uniformTCD<float>({{1, 3, 2, 1}}, {{2, 1, 5, 5, 2, 1, 5, 5, 2, 1, 5, 5, 2, 1, 5, 6}}));
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/If.cc b/tests/nnfw_api/src/one_op_tests/If.cc
new file mode 100644
index 000000000..4ec294223
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/If.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+#include <memory>
+
+TEST_F(GenModelTest, OneOp_If)
+{
+ // The model looks just like the below pseudocode
+ //
+ // function model(x)
+ // {
+ // if (x < 0.0)
+ // return -100.0;
+ // else
+ // return 100.0;
+ // }
+
+ CircleGen cgen;
+
+ // constant buffers
+ std::vector<float> comp_data{0.0};
+ uint32_t comp_buf = cgen.addBuffer(comp_data);
+ std::vector<float> then_data{-100};
+ uint32_t then_buf = cgen.addBuffer(then_data);
+ std::vector<float> else_data{100};
+ uint32_t else_buf = cgen.addBuffer(else_data);
+
+ // primary subgraph
+ {
+ int x = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int comp = cgen.addTensor({{1}, circle::TensorType_FLOAT32, comp_buf});
+ int cond = cgen.addTensor({{1}, circle::TensorType_BOOL});
+ cgen.addOperatorLess({{x, comp}, {cond}});
+
+ int ret = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ cgen.addOperatorIf({{cond}, {ret}}, 1, 2);
+
+ cgen.setInputsAndOutputs({x}, {ret});
+ }
+
+ // then subgraph
+ {
+ cgen.nextSubgraph();
+ int ret = cgen.addTensor({{1}, circle::TensorType_FLOAT32, then_buf});
+ cgen.setInputsAndOutputs({}, {ret});
+ }
+
+ // else subgraph
+ {
+ cgen.nextSubgraph();
+ int ret = cgen.addTensor({{1}, circle::TensorType_FLOAT32, else_buf});
+ cgen.setInputsAndOutputs({}, {ret});
+ }
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{-1.0}}, {{-100.0}}));
+ _context->addTestCase(uniformTCD<float>({{1.0}}, {{100.0}}));
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
+
+class IfWrongSubgraphIndex : public GenModelTest,
+ public ::testing::WithParamInterface<std::pair<int, int>>
+{
+};
+
+TEST_P(IfWrongSubgraphIndex, neg_Test)
+{
+ // These values must be less than 0 or greater than 2
+ int then_subg = GetParam().first;
+ int else_subg = GetParam().second;
+
+ // When If operation's subgraph index is invalid
+
+ CircleGen cgen;
+
+ // constant buffers
+ std::vector<float> then_data{-100};
+ uint32_t then_buf = cgen.addBuffer(then_data);
+ std::vector<float> else_data{100};
+ uint32_t else_buf = cgen.addBuffer(else_data);
+
+ // primary subgraph
+ {
+ int x = cgen.addTensor({{1}, circle::TensorType_BOOL});
+ int ret = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ cgen.addOperatorIf({{x}, {ret}}, then_subg, else_subg);
+
+ cgen.setInputsAndOutputs({x}, {ret});
+ }
+
+ // then subgraph
+ {
+ cgen.nextSubgraph();
+ int ret = cgen.addTensor({{1}, circle::TensorType_FLOAT32, then_buf});
+ cgen.setInputsAndOutputs({}, {ret});
+ }
+
+ // else subgraph
+ {
+ cgen.nextSubgraph();
+ int ret = cgen.addTensor({{1}, circle::TensorType_FLOAT32, else_buf});
+ cgen.setInputsAndOutputs({}, {ret});
+ }
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+INSTANTIATE_TEST_CASE_P(GenModelTest, IfWrongSubgraphIndex,
+ ::testing::Values(std::make_pair(99, 2), std::make_pair(-1, 2),
+ std::make_pair(1, 99), std::make_pair(1, -99),
+ std::make_pair(-99, 99)));
diff --git a/tests/nnfw_api/src/one_op_tests/InstanceNorm.cc b/tests/nnfw_api/src/one_op_tests/InstanceNorm.cc
new file mode 100644
index 000000000..6569ced21
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/InstanceNorm.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+TEST_F(GenModelTest, OneOp_InstanceNorm)
+{
+ CircleGen cgen;
+ uint32_t beta_buf = cgen.addBuffer(std::vector<float>{1});
+ uint32_t gamma_buf = cgen.addBuffer(std::vector<float>{2});
+ int beta = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32, beta_buf});
+ int gamma = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32, gamma_buf});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+
+ cgen.addOperatorInstanceNorm({{in, beta, gamma}, {out}}, 0, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 1, 1, 1}}, {{2, 2, 2, 2}}));
+ _context->setBackends({"acl_cl", "acl_neon"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_InstanceNorm_InvalidActivation)
+{
+ CircleGen cgen;
+ uint32_t beta_buf = cgen.addBuffer(std::vector<float>{1});
+ uint32_t gamma_buf = cgen.addBuffer(std::vector<float>{2});
+ int beta = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32, beta_buf});
+ int gamma = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32, gamma_buf});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+
+ cgen.addOperatorInstanceNorm({{in, beta, gamma}, {out}}, 0,
+ static_cast<circle::ActivationFunctionType>(128) /* Invalid value*/);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/L2Normalization.cc b/tests/nnfw_api/src/one_op_tests/L2Normalization.cc
index 8b4b8f5b6..8e0ae6df2 100644
--- a/tests/nnfw_api/src/one_op_tests/L2Normalization.cc
+++ b/tests/nnfw_api/src/one_op_tests/L2Normalization.cc
@@ -26,9 +26,10 @@ TEST_F(GenModelTest, OneOp_L2Normalization)
cgen.setInputsAndOutputs({in}, {out});
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase({{{0, 3, 4, 0, 5, 12, 0, 8, 15, 0, 7, 24}},
- {{0, 0.6, 0.8, 0, 0.38461539149284363, 0.92307698726654053, 0,
- 0.47058823704719543, 0.88235294818878174, 0, 0.28, 0.96}}});
+ _context->addTestCase(
+ uniformTCD<float>({{0, 3, 4, 0, 5, 12, 0, 8, 15, 0, 7, 24}},
+ {{0, 0.6, 0.8, 0, 0.38461539149284363, 0.92307698726654053, 0,
+ 0.47058823704719543, 0.88235294818878174, 0, 0.28, 0.96}}));
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
SUCCEED();
diff --git a/tests/nnfw_api/src/one_op_tests/LeakyRelu.cc b/tests/nnfw_api/src/one_op_tests/LeakyRelu.cc
index 9db911734..e17f34fb3 100644
--- a/tests/nnfw_api/src/one_op_tests/LeakyRelu.cc
+++ b/tests/nnfw_api/src/one_op_tests/LeakyRelu.cc
@@ -25,8 +25,24 @@ TEST_F(GenModelTest, OneOp_LeakyRelu)
cgen.setInputsAndOutputs({in}, {out});
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase({{{0, 1.0, 3.0, 1.0, -1.0, -2.0f}}, {{0, 1.0, 3.0, 1.0, -0.5, -1.0}}});
+ _context->addTestCase(
+ uniformTCD<float>({{0, 1.0, 3.0, 1.0, -1.0, -2.0f}}, {{0, 1.0, 3.0, 1.0, -0.5, -1.0}}));
_context->setBackends({"acl_cl", "acl_neon"});
SUCCEED();
}
+
+TEST_F(GenModelTest, neg_OneOp_LeakyRelu_InvalidType)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{2, 3}, circle::TensorType::TensorType_UINT8});
+ int out = cgen.addTensor({{2, 3}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorLeakyRelu({{in}, {out}}, 0.5);
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/LogSoftmax.cc b/tests/nnfw_api/src/one_op_tests/LogSoftmax.cc
new file mode 100644
index 000000000..b34b2e83f
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/LogSoftmax.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+TEST_F(GenModelTest, OneOp_LogSoftmax)
+{
+ // NOTE For tf lite the params are fixed as:
+ // beta = 1.0, axis = -1
+
+ CircleGen cgen;
+ int in = cgen.addTensor({{1, 1, 1, 4, 2}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 1, 1, 4, 2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorLogSoftmax({{in}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"cpu"});
+ _context->addTestCase(uniformTCD<float>(
+ {{0, -6, 2, 4, 3, -2, 10, 1}},
+ {{-.00247565, -6.00247, -2.12692, -.126928, -.00671534, -5.00671, -.000123374, -9.00012}}));
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_LogSoftmax_InvalidModel)
+{
+ CircleGen cgen;
+ int out = cgen.addTensor({{4, 2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorLogSoftmax({{}, {out}}); // No input tensor
+ cgen.setInputsAndOutputs({}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/OneHot.cc b/tests/nnfw_api/src/one_op_tests/OneHot.cc
new file mode 100644
index 000000000..e688e790d
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/OneHot.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+#include <memory>
+
+TEST_F(GenModelTest, OneOp_OneHot_OffValueToConst)
+{
+ CircleGen cgen;
+ std::vector<int32_t> depth_data{3};
+ uint32_t depth_buf = cgen.addBuffer(depth_data);
+ std::vector<float> off_value_data{0};
+ uint32_t off_value_buf = cgen.addBuffer(off_value_data);
+ int indices = cgen.addTensor({{1, 2, 2}, circle::TensorType::TensorType_INT32});
+ int depth = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, depth_buf});
+ int on_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int off_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32, off_value_buf});
+ int axis = 2;
+ int out = cgen.addTensor({{1, 2, 3, 2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorOneHot({{indices, depth, on_value, off_value}, {out}}, axis);
+ cgen.setInputsAndOutputs({indices, on_value}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<int32_t>{1, 2, 0, 2});
+ tcd.addInput(std::vector<float>{1});
+ tcd.addOutput(std::vector<float>{0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_OneHot_OffValueToNotZero)
+{
+ CircleGen cgen;
+ std::vector<int32_t> depth_data{3};
+ uint32_t depth_buf = cgen.addBuffer(depth_data);
+ int indices = cgen.addTensor({{1, 2, 2}, circle::TensorType::TensorType_INT32});
+ int depth = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, depth_buf});
+ int on_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int off_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int axis = 2;
+ int out = cgen.addTensor({{1, 2, 3, 2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorOneHot({{indices, depth, on_value, off_value}, {out}}, axis);
+ cgen.setInputsAndOutputs({indices, on_value, off_value}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<int32_t>{1, 2, 0, 2});
+ tcd.addInput(std::vector<float>{1});
+ tcd.addInput(std::vector<float>{-1});
+ tcd.addOutput(std::vector<float>{-1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_OneHot_IndicesValueToNeg_OffValueToConst)
+{
+ CircleGen cgen;
+ std::vector<int32_t> depth_data{3};
+ uint32_t depth_buf = cgen.addBuffer(depth_data);
+ std::vector<float> off_value_data{0};
+ uint32_t off_value_buf = cgen.addBuffer(off_value_data);
+ int indices = cgen.addTensor({{2, 2}, circle::TensorType::TensorType_INT32});
+ int depth = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, depth_buf});
+ int on_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int off_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32, off_value_buf});
+ int axis = 2;
+ int out = cgen.addTensor({{2, 2, 3}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorOneHot({{indices, depth, on_value, off_value}, {out}}, axis);
+ cgen.setInputsAndOutputs({indices, on_value}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<int32_t>{1, 2, 0, -1});
+ tcd.addInput(std::vector<float>{1});
+ tcd.addOutput(std::vector<float>{0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_OneHot_IndicesValueToNeg_OffValueToVar)
+{
+ CircleGen cgen;
+ std::vector<int32_t> depth_data{3};
+ uint32_t depth_buf = cgen.addBuffer(depth_data);
+ int indices = cgen.addTensor({{2, 2}, circle::TensorType::TensorType_INT32});
+ int depth = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, depth_buf});
+ int on_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int off_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int axis = 2;
+ int out = cgen.addTensor({{2, 2, 3}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorOneHot({{indices, depth, on_value, off_value}, {out}}, axis);
+ cgen.setInputsAndOutputs({indices, on_value, off_value}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<int32_t>{1, 2, 0, -1});
+ tcd.addInput(std::vector<float>{1});
+ tcd.addInput(std::vector<float>{0});
+ tcd.addOutput(std::vector<float>{0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_OneHot_OneOperand)
+{
+ CircleGen cgen;
+ int indices = cgen.addTensor({{1, 2, 2}, circle::TensorType::TensorType_INT32});
+ int axis = 2;
+ int out = cgen.addTensor({{1, 2, 3, 2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorOneHot({{indices}, {out}}, axis);
+ cgen.setInputsAndOutputs({indices}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_OneHot_TwoOperands)
+{
+ CircleGen cgen;
+ std::vector<int> depth_data{3};
+ uint32_t depth_buf = cgen.addBuffer(depth_data);
+ int indices = cgen.addTensor({{1, 2, 2}, circle::TensorType::TensorType_INT32});
+ int depth = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, depth_buf});
+ int axis = 2;
+ int out = cgen.addTensor({{1, 2, 3, 2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorOneHot({{indices, depth}, {out}}, axis);
+ cgen.setInputsAndOutputs({indices}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_OneHot_ThreeOperands)
+{
+ CircleGen cgen;
+ std::vector<int> depth_data{3};
+ uint32_t depth_buf = cgen.addBuffer(depth_data);
+ int indices = cgen.addTensor({{1, 2, 2}, circle::TensorType::TensorType_INT32});
+ int depth = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, depth_buf});
+ int on_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int axis = 2;
+ int out = cgen.addTensor({{1, 2, 3, 2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorOneHot({{indices, depth, on_value}, {out}}, axis);
+ cgen.setInputsAndOutputs({indices, on_value}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_OneHot_InvalidAxis)
+{
+ CircleGen cgen;
+ std::vector<int> depth_data{3};
+ uint32_t depth_buf = cgen.addBuffer(depth_data);
+ int indices = cgen.addTensor({{1, 2, 2}, circle::TensorType::TensorType_INT32});
+ int depth = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, depth_buf});
+ int on_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int off_value = cgen.addTensor({{1}, circle::TensorType::TensorType_FLOAT32});
+ int axis = 4;
+ int out = cgen.addTensor({{1, 2, 3, 2}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorOneHot({{indices, depth, on_value, off_value}, {out}}, axis);
+ cgen.setInputsAndOutputs({indices, on_value, off_value}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/Pad.cc b/tests/nnfw_api/src/one_op_tests/Pad.cc
index 10fe6c78a..63d02ab88 100644
--- a/tests/nnfw_api/src/one_op_tests/Pad.cc
+++ b/tests/nnfw_api/src/one_op_tests/Pad.cc
@@ -28,7 +28,8 @@ TEST_F(GenModelTest, OneOp_Pad)
cgen.addOperatorPad({{in, padding}, {out}});
cgen.setInputsAndOutputs({in}, {out});
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase({{{1, 2, 3, 4}}, {{0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, 0, 0, 0, 0, 0}}});
+ _context->addTestCase(
+ uniformTCD<float>({{1, 2, 3, 4}}, {{0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, 0, 0, 0, 0, 0}}));
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
SUCCEED();
@@ -48,7 +49,7 @@ TEST_F(GenModelTest, neg_OneOp_Pad_InvalidPadRank)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
- _context->setCompileFail();
+ _context->expectFailCompile();
SUCCEED();
}
@@ -67,7 +68,7 @@ TEST_F(GenModelTest, neg_OneOp_Pad_InvalidPadDim0)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
- _context->setCompileFail();
+ _context->expectFailCompile();
SUCCEED();
}
@@ -86,7 +87,7 @@ TEST_F(GenModelTest, neg_OneOp_Pad_InvalidPadDim1)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
- _context->setCompileFail();
+ _context->expectFailCompile();
SUCCEED();
}
diff --git a/tests/nnfw_api/src/one_op_tests/PadV2.cc b/tests/nnfw_api/src/one_op_tests/PadV2.cc
index 9f7ff9c0e..e613fe282 100644
--- a/tests/nnfw_api/src/one_op_tests/PadV2.cc
+++ b/tests/nnfw_api/src/one_op_tests/PadV2.cc
@@ -34,7 +34,8 @@ TEST_F(GenModelTest, OneOp_PadV2)
cgen.setInputsAndOutputs({in}, {out});
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase({{{1, 2, 3, 4}}, {{3, 3, 3, 3, 3, 1, 2, 3, 3, 3, 4, 3, 3, 3, 3, 3}}});
+ _context->addTestCase(
+ uniformTCD<float>({{1, 2, 3, 4}}, {{3, 3, 3, 3, 3, 1, 2, 3, 3, 3, 4, 3, 3, 3, 3, 3}}));
_context->setBackends({"cpu"});
SUCCEED();
@@ -59,7 +60,7 @@ TEST_F(GenModelTest, neg_OneOp_PadV2_InvalidPadRank)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
- _context->setCompileFail();
+ _context->expectFailCompile();
SUCCEED();
}
@@ -83,7 +84,7 @@ TEST_F(GenModelTest, neg_OneOp_PadV2_InvalidPadDim0)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
- _context->setCompileFail();
+ _context->expectFailCompile();
SUCCEED();
}
@@ -107,7 +108,7 @@ TEST_F(GenModelTest, neg_OneOp_PadV2_InvalidPadDim1)
_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->setBackends({"acl_cl", "acl_neon", "cpu"});
- _context->setCompileFail();
+ _context->expectFailCompile();
SUCCEED();
}
diff --git a/tests/nnfw_api/src/one_op_tests/Rank.cc b/tests/nnfw_api/src/one_op_tests/Rank.cc
index ed9d67294..7af1b4540 100644
--- a/tests/nnfw_api/src/one_op_tests/Rank.cc
+++ b/tests/nnfw_api/src/one_op_tests/Rank.cc
@@ -17,26 +17,19 @@
#include "GenModelTest.h"
// WORKAROUND Handle int32_t type input/output
-union float_int {
- int32_t i;
- float f;
-};
-
TEST_F(GenModelTest, OneOp_Rank)
{
CircleGen cgen;
int in = cgen.addTensor({{1, 3, 3, 2}, circle::TensorType::TensorType_FLOAT32});
int out = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32});
- // TODO handle many type in addTestCase
- float_int output_data;
- output_data.i = 4;
-
cgen.addOperatorRank({{in}, {out}});
cgen.setInputsAndOutputs({in}, {out});
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase(
- {{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}}, {{output_data.f}}});
+ TestCaseData tcd;
+ tcd.addInput(std::vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
+ tcd.addOutput(std::vector<int32_t>{4});
+ _context->addTestCase(tcd);
_context->setBackends({"cpu"});
SUCCEED();
@@ -49,14 +42,11 @@ TEST_F(GenModelTest, OneOp_Rank_Int32)
int out = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32});
// TODO handle many type in addTestCase
- float_int output_data;
- output_data.i = 4;
-
cgen.addOperatorRank({{in}, {out}});
cgen.setInputsAndOutputs({in}, {out});
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase(
- {{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}}, {{output_data.f}}});
+ _context->addTestCase(uniformTCD<int32_t>(
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}}, {{4}}));
_context->setBackends({"cpu"});
SUCCEED();
diff --git a/tests/nnfw_api/src/one_op_tests/ResizeBilinear.cc b/tests/nnfw_api/src/one_op_tests/ResizeBilinear.cc
new file mode 100644
index 000000000..555d074a3
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/ResizeBilinear.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+#include <memory>
+
+TEST_F(GenModelTest, OneOp_ResizeBilinear_SizeToConst)
+{
+ CircleGen cgen;
+ std::vector<int32_t> size_data{3, 3};
+ uint32_t size_buf = cgen.addBuffer(size_data);
+ int size = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, size_buf});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 3, 3, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorResizeBilinear({{in, size}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>(
+ {{1, 1, 2, 2}}, {{1, 1, 1, 1.666666667, 1.666666667, 1.666666667, 2, 2, 2}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_ResizeBilinear_SizeToVar)
+{
+ CircleGen cgen;
+ int size = cgen.addTensor({{2}, circle::TensorType::TensorType_INT32});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorResizeBilinear({{in, size}, {out}});
+ cgen.setInputsAndOutputs({in, size}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<int32_t>{3, 3});
+ tcd.addInput(std::vector<float>{1, 1, 2, 2});
+ tcd.addOutput(std::vector<float>{1, 1, 1, 1.666666667, 1.666666667, 1.666666667, 2, 2, 2});
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_ResizeBilinear_InvalidSizeVal)
+{
+ CircleGen cgen;
+ std::vector<int32_t> size_data{-3, 3};
+ uint32_t size_buf = cgen.addBuffer(size_data);
+ int size = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, size_buf});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorResizeBilinear({{in, size}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/ResizeNearestNeighbor.cc b/tests/nnfw_api/src/one_op_tests/ResizeNearestNeighbor.cc
index 94f45d4a5..d1617c33a 100644
--- a/tests/nnfw_api/src/one_op_tests/ResizeNearestNeighbor.cc
+++ b/tests/nnfw_api/src/one_op_tests/ResizeNearestNeighbor.cc
@@ -30,8 +30,9 @@ TEST_F(GenModelTest, OneOp_ResizeNearestNeighbor)
cgen.setInputsAndOutputs({in}, {out});
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase({{{3, 4, 6, 10, 9, 10, 12, 16}},
- {{3, 4, 3, 4, 6, 10, 3, 4, 3, 4, 6, 10, 9, 10, 9, 10, 12, 16}}});
+ _context->addTestCase(
+ uniformTCD<float>({{3, 4, 6, 10, 9, 10, 12, 16}},
+ {{3, 4, 3, 4, 6, 10, 3, 4, 3, 4, 6, 10, 9, 10, 9, 10, 12, 16}}));
_context->setBackends({"acl_cl"});
SUCCEED();
diff --git a/tests/nnfw_api/src/one_op_tests/Reverse.cc b/tests/nnfw_api/src/one_op_tests/Reverse.cc
new file mode 100644
index 000000000..ef0c5fe82
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/Reverse.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+TEST_F(GenModelTest, OneOp_ReverseV2_3D)
+{
+ CircleGen cgen;
+
+ int in = cgen.addTensor({{4, 3, 2}, circle::TensorType::TensorType_FLOAT32});
+ std::vector<int32_t> axis_data{1};
+ uint32_t axis_buf = cgen.addBuffer(axis_data);
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, axis_buf});
+ int out = cgen.addTensor({{4, 3, 2}, circle::TensorType::TensorType_FLOAT32});
+
+ cgen.addOperatorReverseV2({{in, axis}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "cpu"});
+ _context->addTestCase(uniformTCD<float>(
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}},
+ {{5, 6, 3, 4, 1, 2, 11, 12, 9, 10, 7, 8, 17, 18, 15, 16, 13, 14, 23, 24, 21, 22, 19, 20}}));
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_ReverseV2_1D)
+{
+ CircleGen cgen;
+
+ int in = cgen.addTensor({{4}, circle::TensorType::TensorType_FLOAT32});
+ std::vector<int32_t> axis_data{0};
+ uint32_t axis_buf = cgen.addBuffer(axis_data);
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, axis_buf});
+ int out = cgen.addTensor({{4}, circle::TensorType::TensorType_FLOAT32});
+
+ cgen.addOperatorReverseV2({{in, axis}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "cpu"});
+ _context->addTestCase(uniformTCD<float>({{1, 2, 3, 4}}, {{4, 3, 2, 1}}));
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/Split.cc b/tests/nnfw_api/src/one_op_tests/Split.cc
new file mode 100644
index 000000000..1e91efec8
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/Split.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+TEST_F(GenModelTest, OneOp_Split)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{2, 4}, circle::TensorType::TensorType_FLOAT32});
+ std::vector<int32_t> axis_data{1};
+ uint32_t axis_buf = cgen.addBuffer(axis_data);
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32, axis_buf});
+
+ int out1 = cgen.addTensor({{2, 2}, circle::TensorType::TensorType_FLOAT32});
+ int out2 = cgen.addTensor({{2, 2}, circle::TensorType::TensorType_FLOAT32});
+
+ cgen.addOperatorSplit({{axis, in}, {out1, out2}}, 2);
+ cgen.setInputsAndOutputs({in}, {out1, out2});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(
+ uniformTCD<float>({{1, 2, 3, 4, 5, 6, 7, 8}}, {{1, 2, 5, 6}, {3, 4, 7, 8}}));
+ _context->setBackends({"cpu", "acl_cl", "acl_neon"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_SplitNonConstAxis)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{2, 4}, circle::TensorType::TensorType_FLOAT32});
+ int axis = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32});
+
+ int out1 = cgen.addTensor({{2, 2}, circle::TensorType::TensorType_FLOAT32});
+ int out2 = cgen.addTensor({{2, 2}, circle::TensorType::TensorType_FLOAT32});
+
+ cgen.addOperatorSplit({{axis, in}, {out1, out2}}, 2);
+ cgen.setInputsAndOutputs({axis, in}, {out1, out2});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+
+ TestCaseData tcd;
+ tcd.addInput(std::vector<int32_t>{1});
+ tcd.addInput(std::vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
+ tcd.addOutput(std::vector<float>{1, 2, 5, 6});
+ tcd.addOutput(std::vector<float>{3, 4, 7, 8});
+
+ _context->addTestCase(tcd);
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/StridedSlice.cc b/tests/nnfw_api/src/one_op_tests/StridedSlice.cc
new file mode 100644
index 000000000..fb29018d4
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/StridedSlice.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+#include <memory>
+
+TEST_F(GenModelTest, OneOp_StridedSlice_LastDim)
+{
+ CircleGen cgen;
+ std::vector<int32_t> begin_data{0, 3};
+ std::vector<int32_t> end_data{0, 6};
+ std::vector<int32_t> strides_data{1, 1};
+ uint32_t begin_buf = cgen.addBuffer(begin_data);
+ uint32_t end_buf = cgen.addBuffer(end_data);
+ uint32_t strides_buf = cgen.addBuffer(strides_data);
+ int input = cgen.addTensor({{1, 6}, circle::TensorType::TensorType_FLOAT32});
+ int begin = cgen.addTensor({{2}, circle::TensorType::TensorType_INT32, begin_buf});
+ int end = cgen.addTensor({{2}, circle::TensorType::TensorType_INT32, end_buf});
+ int strides = cgen.addTensor({{2}, circle::TensorType::TensorType_INT32, strides_buf});
+ int out = cgen.addTensor({{1, 3}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorStridedSlice({{input, begin, end, strides}, {out}}, 1, 1);
+ cgen.setInputsAndOutputs({input}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{1, 2, 3, 4, 5, 6}}, {{4, 5, 6}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/Tile.cc b/tests/nnfw_api/src/one_op_tests/Tile.cc
new file mode 100644
index 000000000..5fa76fc6d
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/Tile.cc
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+TEST_F(GenModelTest, OneOp_Tile_ConstMul)
+{
+ CircleGen cgen;
+ std::vector<int32_t> mul_data{1, 2};
+ uint32_t mul_buf = cgen.addBuffer(mul_data);
+ int in = cgen.addTensor({{2, 3}, circle::TensorType::TensorType_FLOAT32});
+ int mul = cgen.addTensor({{2}, circle::TensorType::TensorType_INT32, mul_buf});
+ int out = cgen.addTensor({{2, 6}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTile({{in, mul}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(
+ uniformTCD<float>({{1, 2, 3, 4, 5, 6}}, {{1, 2, 3, 1, 2, 3, 4, 5, 6, 4, 5, 6}}));
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Tile_MulToConst)
+{
+ CircleGen cgen;
+ std::vector<int32_t> multiplies_data{2, 3, 1};
+ uint32_t multiplies_buf = cgen.addBuffer(multiplies_data);
+ int multiplies = cgen.addTensor({{3}, circle::TensorType::TensorType_INT32, multiplies_buf});
+ int in = cgen.addTensor({{1, 2, 3}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{2, 6, 3}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTile({{in, multiplies}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>(
+ {{11, 12, 13, 21, 22, 23}},
+ {{11, 12, 13, 21, 22, 23, 11, 12, 13, 21, 22, 23, 11, 12, 13, 21, 22, 23,
+ 11, 12, 13, 21, 22, 23, 11, 12, 13, 21, 22, 23, 11, 12, 13, 21, 22, 23}}));
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Tile_MulToVar)
+{
+ CircleGen cgen;
+ int multiplies = cgen.addTensor({{3}, circle::TensorType::TensorType_INT32});
+ int in = cgen.addTensor({{1, 2, 3}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{2, 6, 3}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTile({{in, multiplies}, {out}});
+ cgen.setInputsAndOutputs({in, multiplies}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<float>{11, 12, 13, 21, 22, 23});
+ tcd.addInput(std::vector<int32_t>{2, 3, 1});
+ tcd.addOutput(std::vector<float>{11, 12, 13, 21, 22, 23, 11, 12, 13, 21, 22, 23,
+ 11, 12, 13, 21, 22, 23, 11, 12, 13, 21, 22, 23,
+ 11, 12, 13, 21, 22, 23, 11, 12, 13, 21, 22, 23});
+ _context->addTestCase(tcd);
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Tile_VarMul)
+{
+ CircleGen cgen;
+ int in = cgen.addTensor({{2, 3}, circle::TensorType::TensorType_FLOAT32});
+ int mul = cgen.addTensor({{2}, circle::TensorType::TensorType_INT32});
+ int out = cgen.addTensor({{2, 6}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTile({{in, mul}, {out}});
+ cgen.setInputsAndOutputs({in, mul}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<float>{1, 2, 3, 4, 5, 6});
+ tcd.addInput(std::vector<int32_t>{1, 2});
+ tcd.addOutput(std::vector<float>{1, 2, 3, 1, 2, 3, 4, 5, 6, 4, 5, 6});
+ _context->addTestCase(tcd);
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Tile)
+{
+ CircleGen cgen;
+ std::vector<int32_t> mul_data{1, 2, 1, 2};
+ uint32_t mul_buf = cgen.addBuffer(mul_data);
+ int in = cgen.addTensor({{2, 3}, circle::TensorType::TensorType_FLOAT32});
+ // 2D multiples input is not supported
+ int mul = cgen.addTensor({{2, 2}, circle::TensorType::TensorType_INT32, mul_buf});
+ int out = cgen.addTensor({{2, 6}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTile({{in, mul}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Tile_InvalidMulSize)
+{
+ CircleGen cgen;
+ std::vector<int32_t> multiplies_data{2, 6};
+ uint32_t multiplies_buf = cgen.addBuffer(multiplies_data);
+ int multiplies = cgen.addTensor({{2}, circle::TensorType::TensorType_INT32, multiplies_buf});
+ int in = cgen.addTensor({{1, 2, 3}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{2, 6, 3}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTile({{in, multiplies}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/Transpose.cc b/tests/nnfw_api/src/one_op_tests/Transpose.cc
new file mode 100644
index 000000000..f2e971198
--- /dev/null
+++ b/tests/nnfw_api/src/one_op_tests/Transpose.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenModelTest.h"
+
+#include <memory>
+
+TEST_F(GenModelTest, OneOp_Transpose_PermsToConst)
+{
+ CircleGen cgen;
+ std::vector<int32_t> perms_data{2, 0, 1, 3};
+ uint32_t perms_buf = cgen.addBuffer(perms_data);
+ int perms = cgen.addTensor({{4}, circle::TensorType::TensorType_INT32, perms_buf});
+ int in = cgen.addTensor({{2, 3, 4, 5}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{2, 3, 4, 5}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTranspose({{in, perms}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>(
+ {{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}},
+ {{0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44, 60, 61, 62,
+ 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104, 5, 6, 7, 8, 9, 25,
+ 26, 27, 28, 29, 45, 46, 47, 48, 49, 65, 66, 67, 68, 69, 85, 86, 87, 88,
+ 89, 105, 106, 107, 108, 109, 10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51,
+ 52, 53, 54, 70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
+ 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59, 75, 76, 77,
+ 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}}));
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Transpose_PermsToVar)
+{
+ CircleGen cgen;
+ int perms = cgen.addTensor({{4}, circle::TensorType::TensorType_INT32});
+ int in = cgen.addTensor({{1, 2, 3, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 3, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTranspose({{in, perms}, {out}});
+ cgen.setInputsAndOutputs({in, perms}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<float>{1, 2, 3, 4, 5, 6});
+ tcd.addInput(std::vector<int32_t>{0, 2, 1, 3});
+ tcd.addOutput(std::vector<float>{1, 4, 2, 5, 3, 6});
+ _context->addTestCase(tcd);
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, OneOp_Transpose_RegularTranspose)
+{
+ CircleGen cgen;
+ int perms = cgen.addTensor({{0}, circle::TensorType::TensorType_INT32});
+ int in = cgen.addTensor({{1, 2, 3, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 3, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTranspose({{in, perms}, {out}});
+ cgen.setInputsAndOutputs({in, perms}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ TestCaseData tcd;
+ tcd.addInput(std::vector<float>{1, 2, 3, 4, 5, 6});
+ tcd.addInput(std::vector<int32_t>{});
+ tcd.addOutput(std::vector<float>{1, 4, 2, 5, 3, 6});
+ _context->addTestCase(tcd);
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Transpose_InvalidPermsSize)
+{
+ CircleGen cgen;
+ std::vector<int32_t> perms_data{0, 1, 2};
+ uint32_t perms_buf = cgen.addBuffer(perms_data);
+ int perms = cgen.addTensor({{3}, circle::TensorType::TensorType_INT32, perms_buf});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTranspose({{in, perms}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Transpose_InvalidPermsVal)
+{
+ CircleGen cgen;
+ std::vector<int32_t> perms_data{-3, 3, 1, 2};
+ uint32_t perms_buf = cgen.addBuffer(perms_data);
+ int perms = cgen.addTensor({{4}, circle::TensorType::TensorType_INT32, perms_buf});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTranspose({{in, perms}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
+
+TEST_F(GenModelTest, neg_OneOp_Transpose_DuplicatedPermsVal)
+{
+ CircleGen cgen;
+ std::vector<int32_t> perms_data{3, 3, 1, 2};
+ uint32_t perms_buf = cgen.addBuffer(perms_data);
+ int perms = cgen.addTensor({{4}, circle::TensorType::TensorType_INT32, perms_buf});
+ int in = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ int out = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32});
+ cgen.addOperatorTranspose({{in, perms}, {out}});
+ cgen.setInputsAndOutputs({in}, {out});
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"acl_cl", "acl_neon", "cpu"});
+ _context->expectFailCompile();
+
+ SUCCEED();
+}
diff --git a/tests/nnfw_api/src/one_op_tests/While.cc b/tests/nnfw_api/src/one_op_tests/While.cc
index 1d86e6d6a..8b909c29f 100644
--- a/tests/nnfw_api/src/one_op_tests/While.cc
+++ b/tests/nnfw_api/src/one_op_tests/While.cc
@@ -66,10 +66,130 @@ TEST_F(GenModelTest, OneOp_While)
}
_context = std::make_unique<GenModelTestContext>(cgen.finish());
- _context->addTestCase({{{0}}, {{100}}});
- _context->addTestCase({{{2}}, {{102}}});
- _context->addTestCase({{{22}}, {{102}}});
+ _context->addTestCase(uniformTCD<float>({{0}}, {{100}}));
+ _context->addTestCase(uniformTCD<float>({{2}}, {{102}}));
+ _context->addTestCase(uniformTCD<float>({{22}}, {{102}}));
+ _context->addTestCase(uniformTCD<float>({{100}}, {{100}}));
_context->setBackends({"cpu"});
SUCCEED();
}
+
+TEST_F(GenModelTest, OneOp_While_TwoInputs)
+{
+ // The model looks just like the below pseudocode
+ //
+ // function model(x, end)
+ // {
+ // while (x < end)
+ // {
+ // x = x + 10.0
+ // }
+ // return x
+ // }
+
+ CircleGen cgen;
+ std::vector<float> incr_data{10};
+ uint32_t incr_buf = cgen.addBuffer(incr_data);
+
+ // primary subgraph
+ {
+ int x_in = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int x_out = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int end_in = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int end_out = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ cgen.addOperatorWhile({{x_in, end_in}, {x_out, end_out}}, 1, 2);
+ cgen.setInputsAndOutputs({x_in, end_in}, {x_out});
+ }
+
+ // cond subgraph
+ {
+ cgen.nextSubgraph();
+ int x = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int end = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int result = cgen.addTensor({{1}, circle::TensorType_BOOL});
+ cgen.addOperatorLess({{x, end}, {result}});
+ cgen.setInputsAndOutputs({x, end}, {result});
+ }
+
+ // body subgraph
+ {
+ cgen.nextSubgraph();
+ int x_in = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int incr = cgen.addTensor({{1}, circle::TensorType_FLOAT32, incr_buf});
+ int x_out = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int end = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ cgen.addOperatorAdd({{x_in, incr}, {x_out}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({x_in, end}, {x_out, end});
+ }
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->addTestCase(uniformTCD<float>({{0}, {20}}, {{20}}));
+ _context->addTestCase(uniformTCD<float>({{5}, {30}}, {{35}}));
+ _context->addTestCase(uniformTCD<float>({{20}, {10}}, {{20}}));
+ _context->setBackends({"cpu"});
+
+ SUCCEED();
+}
+
+class WhileWrongSubgraphIndex : public GenModelTest,
+ public ::testing::WithParamInterface<std::pair<int, int>>
+{
+};
+
+TEST_P(WhileWrongSubgraphIndex, neg_Test)
+{
+ // These values must be less than 0 or greater than 2
+ int cond_subg = GetParam().first;
+ int body_subg = GetParam().second;
+
+ // When While operation's subgraph index is invalid
+
+ CircleGen cgen;
+
+ // constant buffers
+ std::vector<float> incr_data{10};
+ uint32_t incr_buf = cgen.addBuffer(incr_data);
+
+ // primary subgraph
+ {
+ int x_in = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int x_out = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int end_in = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int end_out = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ cgen.addOperatorWhile({{x_in, end_in}, {x_out, end_out}}, cond_subg, body_subg);
+ cgen.setInputsAndOutputs({x_in, end_in}, {x_out});
+ }
+
+ // cond subgraph
+ {
+ cgen.nextSubgraph();
+ int x = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int end = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int result = cgen.addTensor({{1}, circle::TensorType_BOOL});
+ cgen.addOperatorLess({{x, end}, {result}});
+ cgen.setInputsAndOutputs({x, end}, {result});
+ }
+
+ // body subgraph
+ {
+ cgen.nextSubgraph();
+ int x_in = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int incr = cgen.addTensor({{1}, circle::TensorType_FLOAT32, incr_buf});
+ int x_out = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ int end = cgen.addTensor({{1}, circle::TensorType_FLOAT32});
+ cgen.addOperatorAdd({{x_in, incr}, {x_out}}, circle::ActivationFunctionType_NONE);
+ cgen.setInputsAndOutputs({x_in, end}, {x_out, end});
+ }
+
+ _context = std::make_unique<GenModelTestContext>(cgen.finish());
+ _context->setBackends({"cpu"});
+ _context->expectFailModelLoad();
+
+ SUCCEED();
+}
+
+INSTANTIATE_TEST_CASE_P(GenModelTest, WhileWrongSubgraphIndex,
+ ::testing::Values(std::make_pair(99, 2), std::make_pair(-1, 2),
+ std::make_pair(1, 99), std::make_pair(1, -99),
+ std::make_pair(-99, 99)));