summaryrefslogtreecommitdiff
path: root/tools/nnapi_quickcheck
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2018-09-18 16:53:40 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2018-09-18 16:53:40 +0900
commit91f4ba45449f700a047a4aeea00b1a7c84e94c75 (patch)
treec60eecdba0861c51010fb0519f8a59668d90a6d2 /tools/nnapi_quickcheck
parent07659ccd9fe7b1cf1547cc6cad78bcf489f0a361 (diff)
downloadnnfw-91f4ba45449f700a047a4aeea00b1a7c84e94c75.tar.gz
nnfw-91f4ba45449f700a047a4aeea00b1a7c84e94c75.tar.bz2
nnfw-91f4ba45449f700a047a4aeea00b1a7c84e94c75.zip
Imported Upstream version 0.2upstream/0.2submit/tizen/20180918.075952
Diffstat (limited to 'tools/nnapi_quickcheck')
-rw-r--r--tools/nnapi_quickcheck/CMakeLists.txt82
-rw-r--r--tools/nnapi_quickcheck/inc/env.h60
-rw-r--r--tools/nnapi_quickcheck/inc/memory.h34
-rw-r--r--tools/nnapi_quickcheck/lib/env.cpp50
-rw-r--r--tools/nnapi_quickcheck/lib/env.test.cpp45
-rw-r--r--tools/nnapi_quickcheck/tests/add_1.cpp159
-rw-r--r--tools/nnapi_quickcheck/tests/add_1.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/add_2.cpp177
-rw-r--r--tools/nnapi_quickcheck/tests/add_2.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/add_3.cpp137
-rw-r--r--tools/nnapi_quickcheck/tests/add_3.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/add_4.cpp159
-rw-r--r--tools/nnapi_quickcheck/tests/add_4.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/add_5.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/add_5.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/add_6.cpp144
-rw-r--r--tools/nnapi_quickcheck/tests/add_6.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/add_7.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/add_7.lst11
-rw-r--r--tools/nnapi_quickcheck/tests/add_8.cpp190
-rw-r--r--tools/nnapi_quickcheck/tests/add_8.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/add_9.cpp187
-rw-r--r--tools/nnapi_quickcheck/tests/add_9.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/add_quan_1.cpp162
-rw-r--r--tools/nnapi_quickcheck/tests/add_quan_1.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/avg_pool_1.cpp150
-rw-r--r--tools/nnapi_quickcheck/tests/avg_pool_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/avg_pool_quan_1.cpp149
-rw-r--r--tools/nnapi_quickcheck/tests/avg_pool_quan_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/cast_1.cpp136
-rw-r--r--tools/nnapi_quickcheck/tests/cast_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/cast_2.cpp134
-rw-r--r--tools/nnapi_quickcheck/tests/cast_2.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/cast_q_to_f_1.cpp136
-rw-r--r--tools/nnapi_quickcheck/tests/cast_q_to_f_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/concat_1.cpp161
-rw-r--r--tools/nnapi_quickcheck/tests/concat_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/concat_quan_1.cpp163
-rw-r--r--tools/nnapi_quickcheck/tests/concat_quan_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/conv_1.cpp207
-rw-r--r--tools/nnapi_quickcheck/tests/conv_1.lst14
-rw-r--r--tools/nnapi_quickcheck/tests/conv_quan_1.cpp211
-rw-r--r--tools/nnapi_quickcheck/tests/conv_quan_1.lst14
-rw-r--r--tools/nnapi_quickcheck/tests/dconv_1.cpp205
-rw-r--r--tools/nnapi_quickcheck/tests/dconv_1.lst16
-rw-r--r--tools/nnapi_quickcheck/tests/dconv_quan_1.cpp209
-rw-r--r--tools/nnapi_quickcheck/tests/dconv_quan_1.lst16
-rw-r--r--tools/nnapi_quickcheck/tests/dequantize_1.cpp136
-rw-r--r--tools/nnapi_quickcheck/tests/dequantize_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/div_1.cpp159
-rw-r--r--tools/nnapi_quickcheck/tests/div_1.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/div_2.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/div_2.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/fully_connected_1.cpp187
-rw-r--r--tools/nnapi_quickcheck/tests/fully_connected_1.lst9
-rw-r--r--tools/nnapi_quickcheck/tests/fully_connected_quan_1.cpp189
-rw-r--r--tools/nnapi_quickcheck/tests/fully_connected_quan_1.lst9
-rw-r--r--tools/nnapi_quickcheck/tests/gather_1.cpp132
-rw-r--r--tools/nnapi_quickcheck/tests/gather_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/gather_2.cpp136
-rw-r--r--tools/nnapi_quickcheck/tests/gather_2.lst7
-rw-r--r--tools/nnapi_quickcheck/tests/logistic_quan_1.cpp140
-rw-r--r--tools/nnapi_quickcheck/tests/logistic_quan_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/max_pool_1.cpp156
-rw-r--r--tools/nnapi_quickcheck/tests/max_pool_1.lst17
-rw-r--r--tools/nnapi_quickcheck/tests/max_pool_quan_1.cpp158
-rw-r--r--tools/nnapi_quickcheck/tests/max_pool_quan_1.lst17
-rw-r--r--tools/nnapi_quickcheck/tests/mul_1.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/mul_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/mul_2.cpp150
-rw-r--r--tools/nnapi_quickcheck/tests/mul_2.lst9
-rw-r--r--tools/nnapi_quickcheck/tests/mul_quan_1.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/mul_quan_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/relu1_1.cpp121
-rw-r--r--tools/nnapi_quickcheck/tests/relu1_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/relu6_1.cpp125
-rw-r--r--tools/nnapi_quickcheck/tests/relu6_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/relu6_quan_1.cpp123
-rw-r--r--tools/nnapi_quickcheck/tests/relu6_quan_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/relu_1.cpp125
-rw-r--r--tools/nnapi_quickcheck/tests/relu_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/relu_2.cpp128
-rw-r--r--tools/nnapi_quickcheck/tests/relu_2.lst7
-rw-r--r--tools/nnapi_quickcheck/tests/relu_3.cpp131
-rw-r--r--tools/nnapi_quickcheck/tests/relu_3.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/relu_quan_1.cpp123
-rw-r--r--tools/nnapi_quickcheck/tests/relu_quan_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/reshape_1.cpp141
-rw-r--r--tools/nnapi_quickcheck/tests/reshape_1.lst7
-rw-r--r--tools/nnapi_quickcheck/tests/reshape_quan_1.cpp143
-rw-r--r--tools/nnapi_quickcheck/tests/reshape_quan_1.lst7
-rw-r--r--tools/nnapi_quickcheck/tests/resize_bilinear_1.cpp141
-rw-r--r--tools/nnapi_quickcheck/tests/resize_bilinear_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_1.cpp120
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_2.cpp139
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_2.lst11
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_quan_1.cpp122
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_quan_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/split_1.cpp153
-rw-r--r--tools/nnapi_quickcheck/tests/split_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/split_2.cpp153
-rw-r--r--tools/nnapi_quickcheck/tests/split_2.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/split_3.cpp147
-rw-r--r--tools/nnapi_quickcheck/tests/split_3.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/split_4.cpp147
-rw-r--r--tools/nnapi_quickcheck/tests/split_4.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/sub_1.cpp159
-rw-r--r--tools/nnapi_quickcheck/tests/sub_1.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/sub_2.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/sub_2.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/sub_3.cpp144
-rw-r--r--tools/nnapi_quickcheck/tests/sub_3.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/sub_4.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/sub_4.lst11
-rw-r--r--tools/nnapi_quickcheck/tests/sub_5.cpp188
-rw-r--r--tools/nnapi_quickcheck/tests/sub_5.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/sub_6.cpp188
-rw-r--r--tools/nnapi_quickcheck/tests/sub_6.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/tanh_1.cpp134
-rw-r--r--tools/nnapi_quickcheck/tests/tanh_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/topk_v2_1.cpp138
-rw-r--r--tools/nnapi_quickcheck/tests/topk_v2_1.lst6
123 files changed, 9863 insertions, 0 deletions
diff --git a/tools/nnapi_quickcheck/CMakeLists.txt b/tools/nnapi_quickcheck/CMakeLists.txt
new file mode 100644
index 000000000..9dd7f5b3b
--- /dev/null
+++ b/tools/nnapi_quickcheck/CMakeLists.txt
@@ -0,0 +1,82 @@
+if(NOT BUILD_NNAPI_QUICKCHECK)
+ return()
+endif(NOT BUILD_NNAPI_QUICKCHECK)
+
+file(GLOB_RECURSE NNAPI_QUICKCHECK_LIB_SOURCES "lib/*.cpp")
+file(GLOB_RECURSE NNAPI_QUICKCHECK_LIB_TESTS "lib/*.test.cpp")
+list(REMOVE_ITEM NNAPI_QUICKCHECK_LIB_SOURCES ${NNAPI_QUICKCHECK_LIB_TESTS})
+
+add_library(nnapi_quickcheck_common ${NNAPI_QUICKCHECK_LIB_SOURCES})
+target_include_directories(nnapi_quickcheck_common PUBLIC "inc")
+target_link_libraries(nnapi_quickcheck_common nnfw_util)
+target_link_libraries(nnapi_quickcheck_common nnfw_support_tflite)
+
+add_executable(nnapi_quickcheck_lib_env_test "lib/env.test.cpp")
+target_link_libraries(nnapi_quickcheck_lib_env_test nnapi_quickcheck_common)
+
+function(add_nnapi_quickcheck NAME)
+ add_executable(nnapi_quickcheck_${NAME} "tests/${NAME}.cpp")
+ nnfw_find_package(GTest)
+ target_link_libraries(nnapi_quickcheck_${NAME} gtest gtest_main pthread)
+ target_link_libraries(nnapi_quickcheck_${NAME} nnapi_quickcheck_common)
+endfunction(add_nnapi_quickcheck)
+
+add_nnapi_quickcheck(add_1)
+add_nnapi_quickcheck(add_2)
+add_nnapi_quickcheck(add_3)
+add_nnapi_quickcheck(add_4)
+add_nnapi_quickcheck(add_5)
+add_nnapi_quickcheck(add_6)
+add_nnapi_quickcheck(add_7)
+add_nnapi_quickcheck(add_8)
+add_nnapi_quickcheck(add_9)
+add_nnapi_quickcheck(add_quan_1)
+add_nnapi_quickcheck(div_1)
+add_nnapi_quickcheck(div_2)
+add_nnapi_quickcheck(sub_1)
+add_nnapi_quickcheck(sub_2)
+add_nnapi_quickcheck(sub_3)
+add_nnapi_quickcheck(sub_4)
+add_nnapi_quickcheck(sub_5)
+add_nnapi_quickcheck(sub_6)
+add_nnapi_quickcheck(mul_1)
+add_nnapi_quickcheck(mul_2)
+add_nnapi_quickcheck(mul_quan_1)
+add_nnapi_quickcheck(relu_1)
+add_nnapi_quickcheck(relu_quan_1)
+add_nnapi_quickcheck(relu_2)
+add_nnapi_quickcheck(relu_3)
+add_nnapi_quickcheck(relu6_1)
+add_nnapi_quickcheck(relu6_quan_1)
+add_nnapi_quickcheck(relu1_1)
+add_nnapi_quickcheck(conv_1)
+add_nnapi_quickcheck(conv_quan_1)
+add_nnapi_quickcheck(dconv_1)
+add_nnapi_quickcheck(dconv_quan_1)
+add_nnapi_quickcheck(max_pool_1)
+add_nnapi_quickcheck(max_pool_quan_1)
+add_nnapi_quickcheck(avg_pool_1)
+add_nnapi_quickcheck(avg_pool_quan_1)
+add_nnapi_quickcheck(concat_1)
+add_nnapi_quickcheck(concat_quan_1)
+add_nnapi_quickcheck(reshape_1)
+add_nnapi_quickcheck(reshape_quan_1)
+add_nnapi_quickcheck(fully_connected_1)
+add_nnapi_quickcheck(fully_connected_quan_1)
+add_nnapi_quickcheck(softmax_1)
+add_nnapi_quickcheck(softmax_2)
+add_nnapi_quickcheck(softmax_quan_1)
+add_nnapi_quickcheck(resize_bilinear_1)
+add_nnapi_quickcheck(topk_v2_1)
+add_nnapi_quickcheck(cast_1)
+add_nnapi_quickcheck(cast_q_to_f_1)
+add_nnapi_quickcheck(cast_2)
+add_nnapi_quickcheck(gather_1)
+add_nnapi_quickcheck(gather_2)
+add_nnapi_quickcheck(dequantize_1)
+add_nnapi_quickcheck(tanh_1)
+add_nnapi_quickcheck(logistic_quan_1)
+add_nnapi_quickcheck(split_1)
+add_nnapi_quickcheck(split_2)
+add_nnapi_quickcheck(split_3)
+add_nnapi_quickcheck(split_4)
diff --git a/tools/nnapi_quickcheck/inc/env.h b/tools/nnapi_quickcheck/inc/env.h
new file mode 100644
index 000000000..c2efcebc9
--- /dev/null
+++ b/tools/nnapi_quickcheck/inc/env.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENV_UTILS_H__
+#define __ENV_UTILS_H__
+
+#include <string>
+
+#include <cstdint>
+
+class IntVar
+{
+public:
+ IntVar(const std::string &name, int32_t value);
+
+public:
+ int32_t operator()(void) const { return _value; }
+
+private:
+ int32_t _value;
+};
+
+class FloatVar
+{
+public:
+ FloatVar(const std::string &name, float value);
+
+public:
+ float operator()(void) const { return _value; }
+
+private:
+ float _value;
+};
+
+class StrVar
+{
+public:
+ StrVar(const std::string &name, const std::string &value);
+
+public:
+ const std::string &operator()(void) const { return _value; }
+
+private:
+ std::string _value;
+};
+
+#endif // __ENV_UTILS_H__
diff --git a/tools/nnapi_quickcheck/inc/memory.h b/tools/nnapi_quickcheck/inc/memory.h
new file mode 100644
index 000000000..3f1bca8a4
--- /dev/null
+++ b/tools/nnapi_quickcheck/inc/memory.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEMORY_H__
+#define __MEMORY_H__
+
+#include <cstdlib>
+
+template <typename T> inline T *make_alloc(void)
+{
+ auto ptr = malloc(sizeof(T));
+
+ if (ptr == nullptr)
+ {
+ throw std::bad_alloc{};
+ }
+
+ return reinterpret_cast<T *>(ptr);
+}
+
+#endif // __MEMORY_H__
diff --git a/tools/nnapi_quickcheck/lib/env.cpp b/tools/nnapi_quickcheck/lib/env.cpp
new file mode 100644
index 000000000..758516752
--- /dev/null
+++ b/tools/nnapi_quickcheck/lib/env.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "env.h"
+
+#include "util/environment.h"
+
+//
+// Integer variable
+//
+IntVar::IntVar(const std::string &name, int32_t value) : _value{value}
+{
+ nnfw::util::env::IntAccessor{name}.access(_value);
+}
+
+//
+// Float variable
+//
+FloatVar::FloatVar(const std::string &name, float value) : _value{value}
+{
+ nnfw::util::env::FloatAccessor{name}.access(_value);
+}
+
+//
+// String variable
+//
+#include <cstdlib>
+
+StrVar::StrVar(const std::string &name, const std::string &value) : _value{value}
+{
+ auto env = std::getenv(name.c_str());
+
+ if (env)
+ {
+ _value = std::string{env};
+ }
+}
diff --git a/tools/nnapi_quickcheck/lib/env.test.cpp b/tools/nnapi_quickcheck/lib/env.test.cpp
new file mode 100644
index 000000000..dd9ac8be5
--- /dev/null
+++ b/tools/nnapi_quickcheck/lib/env.test.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "env.h"
+
+#include <string>
+
+#include <cstdlib>
+#include <cassert>
+
+inline void ensure(int err) { assert(err == 0); }
+
+int main(int argc, char **argv)
+{
+ const std::string key{"TEST"};
+ const int num{3};
+
+ const auto str = std::to_string(num);
+
+ ensure(unsetenv(key.c_str()));
+ ensure(setenv(key.c_str(), str.c_str(), 0));
+
+ int value = 0;
+
+ assert(value != num);
+
+ IntVar buffer(key, value);
+
+ assert(buffer() == num);
+
+ return 0;
+}
diff --git a/tools/nnapi_quickcheck/tests/add_1.cpp b/tools/nnapi_quickcheck/tests/add_1.cpp
new file mode 100644
index 000000000..52aa2afa0
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_1.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_1.lst b/tools/nnapi_quickcheck/tests/add_1.lst
new file mode 100644
index 000000000..fa17caebb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_1.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 16)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/add_2.cpp b/tools/nnapi_quickcheck/tests/add_2.cpp
new file mode 100644
index 000000000..9b5b19c06
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_2.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_2.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure left data
+ const uint32_t left_size = LEFT_N * LEFT_C * LEFT_H * LEFT_W;
+ float left_data[left_size] = {
+ 0.0f,
+ };
+
+ // Fill left data with random data
+ {
+ std::normal_distribution<float> left_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < left_size; ++off)
+ {
+ left_data[off++] = left_dist(random);
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization,
+ reinterpret_cast<const char *>(left_data),
+ left_size * sizeof(float));
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read LHS from Tensor #1
+ // - Read RHS from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_2.lst b/tools/nnapi_quickcheck/tests/add_2.lst
new file mode 100644
index 000000000..fa17caebb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_2.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 16)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/add_3.cpp b/tools/nnapi_quickcheck/tests/add_3.cpp
new file mode 100644
index 000000000..e692fe314
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_3.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/TensorShapeUtils.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_3, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+#define STR_VALUE(NAME, VALUE) StrVar NAME##_Value(#NAME, VALUE);
+#include "add_3.lst"
+#undef STR_VALUE
+
+ const auto LHS_SHAPE = nnfw::util::tensor::Shape::from(LHS_SHAPE_Value());
+ const auto RHS_SHAPE = nnfw::util::tensor::Shape::from(RHS_SHAPE_Value());
+ const auto OUT_SHAPE = nnfw::support::tflite::broadcast(LHS_SHAPE, RHS_SHAPE);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LHS_SHAPE);
+ PRINT_VALUE(RHS_SHAPE);
+ PRINT_VALUE(OUT_SHAPE);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ using nnfw::support::tflite::as_dims;
+
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ as_dims(OUT_SHAPE), quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ as_dims(LHS_SHAPE), quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ as_dims(RHS_SHAPE), quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = 0;
+ param.tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(param.verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(param.tolerance);
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_3.lst b/tools/nnapi_quickcheck/tests/add_3.lst
new file mode 100644
index 000000000..1981db4e1
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_3.lst
@@ -0,0 +1,6 @@
+#ifndef STR_VALUE
+#error "STR_VALUE should be defined"
+#endif // STR_VALUE
+
+STR_VALUE(LHS_SHAPE, "1,3,16,16")
+STR_VALUE(RHS_SHAPE, "1,3,16,16")
diff --git a/tools/nnapi_quickcheck/tests/add_4.cpp b/tools/nnapi_quickcheck/tests/add_4.cpp
new file mode 100644
index 000000000..e519f1731
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_4.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_4, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_4.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_4.lst b/tools/nnapi_quickcheck/tests/add_4.lst
new file mode 100644
index 000000000..6b289007f
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_4.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 2)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 8)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 2)
+INT_VALUE(RIGHT_H, 1)
+INT_VALUE(RIGHT_W, 8)
diff --git a/tools/nnapi_quickcheck/tests/add_5.cpp b/tools/nnapi_quickcheck/tests/add_5.cpp
new file mode 100644
index 000000000..cacb5e42d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_5.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_5, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_5.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT = RIGHT_Value();
+
+ const int32_t OFM_N = LEFT_N;
+ const int32_t OFM_C = LEFT_C;
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT} /* dims */, quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_5.lst b/tools/nnapi_quickcheck/tests/add_5.lst
new file mode 100644
index 000000000..eb316b6ad
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_5.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 8)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT, 1)
diff --git a/tools/nnapi_quickcheck/tests/add_6.cpp b/tools/nnapi_quickcheck/tests/add_6.cpp
new file mode 100644
index 000000000..245b7ad39
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_6.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_6, simple_test)
+{
+ int verbose = 1;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_6.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT = RIGHT_Value();
+
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_H, LEFT_W} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT} /* dims */, quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_6.lst b/tools/nnapi_quickcheck/tests/add_6.lst
new file mode 100644
index 000000000..75db4c8d0
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_6.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_H, 8)
+INT_VALUE(LEFT_W, 2)
+
+INT_VALUE(RIGHT, 1)
diff --git a/tools/nnapi_quickcheck/tests/add_7.cpp b/tools/nnapi_quickcheck/tests/add_7.cpp
new file mode 100644
index 000000000..43d285c72
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_7.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_7, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_7.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_C = LEFT_C;
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_H, RIGHT_W, RIGHT_C} /* dims */, quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_7.lst b/tools/nnapi_quickcheck/tests/add_7.lst
new file mode 100644
index 000000000..1dc8b6147
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_7.lst
@@ -0,0 +1,11 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 8)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 8)
+INT_VALUE(RIGHT_W, 1)
diff --git a/tools/nnapi_quickcheck/tests/add_8.cpp b/tools/nnapi_quickcheck/tests/add_8.cpp
new file mode 100644
index 000000000..ec11c3969
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_8.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_8, simple_test)
+{
+ int verbose = 1;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_8.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure left data
+ const uint32_t left_size = LEFT_N * LEFT_C * LEFT_H * LEFT_W;
+ const uint32_t right_size = RIGHT_N * RIGHT_C * RIGHT_H * RIGHT_W;
+ float left_data[left_size] = {
+ 0.0f,
+ };
+ float right_data[right_size] = {
+ 0.0f,
+ };
+
+ // Fill left data with random data
+ {
+ std::normal_distribution<float> left_dist(-1.0f, +1.0f);
+ int value = 10;
+ for (uint32_t off = 0; off < left_size; ++off)
+ {
+ left_data[off] = value;
+ std::cout << left_data[off] << std::endl;
+ }
+ value = 1;
+ for (uint32_t off = 0; off < right_size; ++off)
+ {
+ right_data[off] = value++;
+ std::cout << right_data[off] << std::endl;
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization,
+ reinterpret_cast<const char *>(left_data),
+ left_size * sizeof(float));
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteFloat32 /* type */, "right" /* name */, {RIGHT_C} /* dims */, quantization,
+ //{RIGHT_W, RIGHT_C} /* dims */, quantization,
+ reinterpret_cast<const char *>(right_data), right_size * sizeof(float));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read LHS from Tensor #1
+ // - Read RHS from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_8.lst b/tools/nnapi_quickcheck/tests/add_8.lst
new file mode 100644
index 000000000..3119c7f65
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_8.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_H, 3)
+INT_VALUE(LEFT_W, 2)
+INT_VALUE(LEFT_C, 4)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_H, 1)
+INT_VALUE(RIGHT_W, 1)
+INT_VALUE(RIGHT_C, 4)
diff --git a/tools/nnapi_quickcheck/tests/add_9.cpp b/tools/nnapi_quickcheck/tests/add_9.cpp
new file mode 100644
index 000000000..f3cf02875
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_9.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_9, simple_test)
+{
+ int verbose = 1;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_9.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_VALUE(LEFT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+ PRINT_VALUE(OFM_C);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure left data
+ const uint32_t left_size = LEFT_N * LEFT_C * LEFT_H * LEFT_W;
+ const uint32_t right_size = RIGHT_N * RIGHT_C * RIGHT_H * RIGHT_W;
+ float left_data[left_size] = {
+ 0.0f,
+ };
+ float right_data[right_size] = {
+ 0.0f,
+ };
+
+ // Fill left data with random data
+ {
+ std::normal_distribution<float> left_dist(-1.0f, +1.0f);
+ float value = 10.0f;
+ for (uint32_t off = 0; off < left_size; ++off)
+ {
+ left_data[off] = value;
+ }
+ value = 1.0f;
+ for (uint32_t off = 0; off < right_size; ++off)
+ {
+ right_data[off] = value++;
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(
+ 1, kTfLiteFloat32 /* type */, "left" /* name */, {LEFT_W, LEFT_C} /* dims */, quantization,
+ reinterpret_cast<const char *>(left_data), left_size * sizeof(float));
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(right_data),
+ right_size * sizeof(float));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read LHS from Tensor #1
+ // - Read RHS from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_9.lst b/tools/nnapi_quickcheck/tests/add_9.lst
new file mode 100644
index 000000000..52a1f1acc
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_9.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_H, 1)
+INT_VALUE(LEFT_W, 3)
+INT_VALUE(LEFT_C, 4)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_H, 2)
+INT_VALUE(RIGHT_W, 3)
+INT_VALUE(RIGHT_C, 4)
diff --git a/tools/nnapi_quickcheck/tests/add_quan_1.cpp b/tools/nnapi_quickcheck/tests/add_quan_1.cpp
new file mode 100644
index 000000000..45f0ba681
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_quan_1.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ quantization.scale = 2.0f;
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ quantization.scale = 1.0f;
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteUInt8 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_quan_1.lst b/tools/nnapi_quickcheck/tests/add_quan_1.lst
new file mode 100644
index 000000000..fa17caebb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_quan_1.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 16)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/avg_pool_1.cpp b/tools/nnapi_quickcheck/tests/avg_pool_1.cpp
new file mode 100644
index 000000000..c938ed690
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/avg_pool_1.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_avg_pool_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "avg_pool_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = (IFM_H - KER_H) + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) + 1;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Max Pooling Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLitePoolParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = 1;
+ param->stride_height = 1;
+ param->filter_width = KER_W;
+ param->filter_height = KER_H;
+ param->activation = kTfLiteActNone;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_AVERAGE_POOL_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/avg_pool_1.lst b/tools/nnapi_quickcheck/tests/avg_pool_1.lst
new file mode 100644
index 000000000..02d86d470
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/avg_pool_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/avg_pool_quan_1.cpp b/tools/nnapi_quickcheck/tests/avg_pool_quan_1.cpp
new file mode 100644
index 000000000..ba41c030c
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/avg_pool_quan_1.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_avg_pool_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "avg_pool_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = (IFM_H - KER_H) + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) + 1;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Max Pooling Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLitePoolParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = 1;
+ param->stride_height = 1;
+ param->filter_width = KER_W;
+ param->filter_height = KER_H;
+ param->activation = kTfLiteActNone;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_AVERAGE_POOL_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/avg_pool_quan_1.lst b/tools/nnapi_quickcheck/tests/avg_pool_quan_1.lst
new file mode 100644
index 000000000..02d86d470
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/avg_pool_quan_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/cast_1.cpp b/tools/nnapi_quickcheck/tests/cast_1.cpp
new file mode 100644
index 000000000..01d49cd59
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_1.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_cast_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "cast_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Cast Node
+ // Run CAST and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_CAST, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/cast_1.lst b/tools/nnapi_quickcheck/tests/cast_1.lst
new file mode 100644
index 000000000..a0077cb95
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_H, 320)
+INT_VALUE(IFM_W, 320)
diff --git a/tools/nnapi_quickcheck/tests/cast_2.cpp b/tools/nnapi_quickcheck/tests/cast_2.cpp
new file mode 100644
index 000000000..b0032210d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_2.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_cast_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "cast_2.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteInt32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Cast Node
+ // Run CAST and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_CAST, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/cast_2.lst b/tools/nnapi_quickcheck/tests/cast_2.lst
new file mode 100644
index 000000000..a0077cb95
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_2.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_H, 320)
+INT_VALUE(IFM_W, 320)
diff --git a/tools/nnapi_quickcheck/tests/cast_q_to_f_1.cpp b/tools/nnapi_quickcheck/tests/cast_q_to_f_1.cpp
new file mode 100644
index 000000000..763ca940c
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_q_to_f_1.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_cast_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "cast_q_to_f_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Cast Node
+ // Run CAST and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_CAST, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/cast_q_to_f_1.lst b/tools/nnapi_quickcheck/tests/cast_q_to_f_1.lst
new file mode 100644
index 000000000..a0077cb95
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_q_to_f_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_H, 320)
+INT_VALUE(IFM_W, 320)
diff --git a/tools/nnapi_quickcheck/tests/concat_1.cpp b/tools/nnapi_quickcheck/tests/concat_1.cpp
new file mode 100644
index 000000000..77d670fed
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/concat_1.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_concat_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "concat_1.lst"
+#undef INT_VALUE
+
+ // TODO Allow users to set concat axis!
+ const int32_t CONCAT_COUNT = CONCAT_COUNT_Value();
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ int32_t OFM_C = 0;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(CONCAT_COUNT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Randomize IFM depth
+ std::default_random_engine generator(SEED);
+ std::uniform_int_distribution<int> distribution(1, 8);
+
+ std::vector<int32_t> depths;
+
+ for (int32_t n = 0; n < CONCAT_COUNT; ++n)
+ {
+ const auto depth = distribution(generator);
+
+ OFM_C += depth;
+ depths.emplace_back(depth);
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(depths.size() + 1);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM(s)
+ std::vector<int> ifm_indexes;
+
+ for (uint32_t n = 0; n < depths.size(); ++n)
+ {
+ const auto ifm_index = 1 + n;
+ const auto IFM_C = depths.at(n);
+
+ interp.SetTensorParametersReadWrite(ifm_index, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ ifm_indexes.emplace_back(ifm_index);
+ }
+
+ // Add Concat Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteConcatenationParams>();
+
+ param->activation = kTfLiteActNone;
+ param->axis = 3;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters(ifm_indexes, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_CONCATENATION, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs(ifm_indexes);
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/concat_1.lst b/tools/nnapi_quickcheck/tests/concat_1.lst
new file mode 100644
index 000000000..db70d4c8b
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/concat_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(CONCAT_COUNT, 3)
+
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/concat_quan_1.cpp b/tools/nnapi_quickcheck/tests/concat_quan_1.cpp
new file mode 100644
index 000000000..cd522b049
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/concat_quan_1.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_concat_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "concat_quan_1.lst"
+#undef INT_VALUE
+
+ // TODO Allow users to set concat axis!
+ const int32_t CONCAT_COUNT = CONCAT_COUNT_Value();
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ int32_t OFM_C = 0;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(CONCAT_COUNT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Randomize IFM depth
+ std::default_random_engine generator(SEED);
+ std::uniform_int_distribution<int> distribution(1, 8);
+
+ std::vector<int32_t> depths;
+
+ for (int32_t n = 0; n < CONCAT_COUNT; ++n)
+ {
+ const auto depth = distribution(generator);
+
+ OFM_C += depth;
+ depths.emplace_back(depth);
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(depths.size() + 1);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM(s)
+ std::vector<int> ifm_indexes;
+
+ for (uint32_t n = 0; n < depths.size(); ++n)
+ {
+ const auto ifm_index = 1 + n;
+ const auto IFM_C = depths.at(n);
+
+ interp.SetTensorParametersReadWrite(ifm_index, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ ifm_indexes.emplace_back(ifm_index);
+ }
+
+ // Add Concat Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteConcatenationParams>();
+
+ param->activation = kTfLiteActNone;
+ param->axis = 3;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters(ifm_indexes, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_CONCATENATION, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs(ifm_indexes);
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/concat_quan_1.lst b/tools/nnapi_quickcheck/tests/concat_quan_1.lst
new file mode 100644
index 000000000..db70d4c8b
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/concat_quan_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(CONCAT_COUNT, 3)
+
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/conv_1.cpp b/tools/nnapi_quickcheck/tests/conv_1.cpp
new file mode 100644
index 000000000..10046d1ce
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/conv_1.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_conv_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "conv_1.lst"
+#undef INT_VALUE
+
+ const int32_t STRIDE_H = STRIDE_H_Value();
+ const int32_t STRIDE_W = STRIDE_W_Value();
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_N = KER_N_Value();
+ const int32_t KER_C = IFM_C_Value();
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = KER_N;
+ const int32_t OFM_H = (IFM_H - KER_H) / STRIDE_H + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) / STRIDE_W + 1;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(STRIDE_H);
+ PRINT_VALUE(STRIDE_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_N);
+ PRINT_VALUE(KER_C);
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_N * KER_C * KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off++] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_N;
+ float bias_data[bias_size] = {
+ 0.0f,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = bias_dist(random);
+ }
+ }
+
+ // Assumption on this example
+ assert(IFM_C == KER_C);
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(5);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteFloat32 /* type */, "filter" /* name */, {KER_N, KER_H, KER_W, KER_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(float));
+
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteFloat32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(float));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteConvParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = STRIDE_W;
+ param->stride_height = STRIDE_H;
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_CONV_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/conv_1.lst b/tools/nnapi_quickcheck/tests/conv_1.lst
new file mode 100644
index 000000000..c01fc90ee
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/conv_1.lst
@@ -0,0 +1,14 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_N, 1)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(STRIDE_H, 1)
+INT_VALUE(STRIDE_W, 1)
diff --git a/tools/nnapi_quickcheck/tests/conv_quan_1.cpp b/tools/nnapi_quickcheck/tests/conv_quan_1.cpp
new file mode 100644
index 000000000..aebf2333a
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/conv_quan_1.cpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_conv_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "conv_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t STRIDE_H = STRIDE_H_Value();
+ const int32_t STRIDE_W = STRIDE_W_Value();
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_N = KER_N_Value();
+ const int32_t KER_C = IFM_C_Value();
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = KER_N;
+ const int32_t OFM_H = (IFM_H - KER_H) / STRIDE_H + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) / STRIDE_W + 1;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(STRIDE_H);
+ PRINT_VALUE(STRIDE_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_N);
+ PRINT_VALUE(KER_C);
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_N * KER_C * KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off++] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_N;
+ int32_t bias_data[bias_size] = {
+ 0,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = static_cast<int32_t>(bias_dist(random));
+ }
+ }
+
+ // Assumption on this example
+ assert(IFM_C == KER_C);
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(5);
+
+ // Configure OFM
+ float max_scale = (KER_N, KER_C * KER_H * KER_W) *
+ std::numeric_limits<uint8_t>::max(); // * IFM_scale(1.0f) * kernel_scale(1.0f)
+ quantization.scale = max_scale;
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ quantization.scale = 1.0f;
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteUInt8 /* type */, "filter" /* name */, {KER_N, KER_H, KER_W, KER_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(uint8_t));
+
+ quantization.scale *= quantization.scale;
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteInt32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(int32_t));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteConvParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = STRIDE_W;
+ param->stride_height = STRIDE_H;
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_CONV_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/conv_quan_1.lst b/tools/nnapi_quickcheck/tests/conv_quan_1.lst
new file mode 100644
index 000000000..c01fc90ee
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/conv_quan_1.lst
@@ -0,0 +1,14 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_N, 1)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(STRIDE_H, 1)
+INT_VALUE(STRIDE_W, 1)
diff --git a/tools/nnapi_quickcheck/tests/dconv_1.cpp b/tools/nnapi_quickcheck/tests/dconv_1.cpp
new file mode 100644
index 000000000..bd0cacfd0
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dconv_1.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_dconv_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "dconv_1.lst"
+#undef INT_VALUE
+
+ const int32_t STRIDE_H = STRIDE_H_Value();
+ const int32_t STRIDE_W = STRIDE_W_Value();
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_C = KER_C_Value();
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = KER_C;
+ const int32_t OFM_H = (IFM_H - KER_H) / STRIDE_H + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) / STRIDE_W + 1;
+
+ const int32_t MULTIPLIER = MULTIPLIER_Value();
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_C);
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(STRIDE_H);
+ PRINT_VALUE(STRIDE_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(MULTIPLIER);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ assert(MULTIPLIER * IFM_C == KER_C);
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_C * KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_C;
+ float bias_data[bias_size] = {
+ 0.0f,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = bias_dist(random);
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(4);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteFloat32 /* type */, "filter" /* name */, {1, KER_H, KER_W, KER_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(float));
+
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteFloat32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(float));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteDepthwiseConvParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = STRIDE_W;
+ param->stride_height = STRIDE_H;
+ param->depth_multiplier = MULTIPLIER;
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_DEPTHWISE_CONV_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/dconv_1.lst b/tools/nnapi_quickcheck/tests/dconv_1.lst
new file mode 100644
index 000000000..da851ae2d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dconv_1.lst
@@ -0,0 +1,16 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_C, 2)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(MULTIPLIER, 1)
+
+INT_VALUE(STRIDE_H, 1)
+INT_VALUE(STRIDE_W, 1)
diff --git a/tools/nnapi_quickcheck/tests/dconv_quan_1.cpp b/tools/nnapi_quickcheck/tests/dconv_quan_1.cpp
new file mode 100644
index 000000000..43f305f06
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dconv_quan_1.cpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_dconv_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "dconv_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t STRIDE_H = STRIDE_H_Value();
+ const int32_t STRIDE_W = STRIDE_W_Value();
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_C = KER_C_Value();
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = KER_C;
+ const int32_t OFM_H = (IFM_H - KER_H) / STRIDE_H + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) / STRIDE_W + 1;
+
+ const int32_t MULTIPLIER = MULTIPLIER_Value();
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_C);
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(STRIDE_H);
+ PRINT_VALUE(STRIDE_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(MULTIPLIER);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ assert(MULTIPLIER * IFM_C == KER_C);
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_C * KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_C;
+ int32_t bias_data[bias_size] = {
+ 0,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = static_cast<int32_t>(bias_dist(random));
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(4);
+
+ // Configure OFM
+ float max_scale = (1 * KER_C * KER_H * KER_W) *
+ std::numeric_limits<uint8_t>::max(); // * IFM_scale(1.0f) * kernel_scale(1.0f)
+ quantization.scale = max_scale;
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ quantization.scale = 1.0f;
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteUInt8 /* type */, "filter" /* name */, {1, KER_H, KER_W, KER_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(uint8_t));
+
+ quantization.scale *= quantization.scale;
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteInt32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(int32_t));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteDepthwiseConvParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = STRIDE_W;
+ param->stride_height = STRIDE_H;
+ param->depth_multiplier = MULTIPLIER;
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_DEPTHWISE_CONV_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/dconv_quan_1.lst b/tools/nnapi_quickcheck/tests/dconv_quan_1.lst
new file mode 100644
index 000000000..da851ae2d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dconv_quan_1.lst
@@ -0,0 +1,16 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_C, 2)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(MULTIPLIER, 1)
+
+INT_VALUE(STRIDE_H, 1)
+INT_VALUE(STRIDE_W, 1)
diff --git a/tools/nnapi_quickcheck/tests/dequantize_1.cpp b/tools/nnapi_quickcheck/tests/dequantize_1.cpp
new file mode 100644
index 000000000..fe310a11d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dequantize_1.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_dequantize_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "dequantize_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add DEQUANTIZE Node
+ // Run DEQUANTIZE and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_DEQUANTIZE, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/dequantize_1.lst b/tools/nnapi_quickcheck/tests/dequantize_1.lst
new file mode 100644
index 000000000..a0077cb95
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dequantize_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_H, 320)
+INT_VALUE(IFM_W, 320)
diff --git a/tools/nnapi_quickcheck/tests/div_1.cpp b/tools/nnapi_quickcheck/tests/div_1.cpp
new file mode 100644
index 000000000..ffa0d6cac
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/div_1.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_div_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "div_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Division Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Div and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_DIV, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/div_1.lst b/tools/nnapi_quickcheck/tests/div_1.lst
new file mode 100644
index 000000000..fa17caebb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/div_1.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 16)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/div_2.cpp b/tools/nnapi_quickcheck/tests/div_2.cpp
new file mode 100644
index 000000000..c836c259a
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/div_2.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_div_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "div_2.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT = RIGHT_Value();
+
+ const int32_t OFM_N = LEFT_N;
+ const int32_t OFM_C = LEFT_C;
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT} /* dims */, quantization);
+
+ // Add Division Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Div and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_DIV, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/div_2.lst b/tools/nnapi_quickcheck/tests/div_2.lst
new file mode 100644
index 000000000..cd36ac199
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/div_2.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT, 1)
diff --git a/tools/nnapi_quickcheck/tests/fully_connected_1.cpp b/tools/nnapi_quickcheck/tests/fully_connected_1.cpp
new file mode 100644
index 000000000..57af253f5
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/fully_connected_1.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+template <typename T> T *make_malloc(void) { return reinterpret_cast<T *>(malloc(sizeof(T))); }
+
+TEST(NNAPI_Quickcheck_fully_connected_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "conv_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_N_Value();
+ const int32_t KER_W = IFM_C_Value() * IFM_H_Value() * IFM_W_Value();
+
+ const int32_t OUT_LEN = KER_H;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUT_LEN);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off++] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_H;
+ float bias_data[bias_size] = {
+ 0.0f,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = bias_dist(random);
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(4);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, KER_H} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteFloat32 /* type */, "filter" /* name */, {KER_H, KER_W} /* dims */, quantization,
+ reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(float));
+
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteFloat32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(float));
+
+ // Add Fully Connected Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_malloc<TfLiteFullyConnectedParams>();
+
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_FULLY_CONNECTED, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/fully_connected_1.lst b/tools/nnapi_quickcheck/tests/fully_connected_1.lst
new file mode 100644
index 000000000..22acb9f7f
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/fully_connected_1.lst
@@ -0,0 +1,9 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_H, 1)
diff --git a/tools/nnapi_quickcheck/tests/fully_connected_quan_1.cpp b/tools/nnapi_quickcheck/tests/fully_connected_quan_1.cpp
new file mode 100644
index 000000000..1cb75fea0
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/fully_connected_quan_1.cpp
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+template <typename T> T *make_malloc(void) { return reinterpret_cast<T *>(malloc(sizeof(T))); }
+
+TEST(NNAPI_Quickcheck_fully_connected_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "fully_connected_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = IFM_C_Value() * IFM_H_Value() * IFM_W_Value();
+
+ const int32_t OUT_LEN = KER_H;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUT_LEN);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off++] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_H;
+ int32_t bias_data[bias_size] = {
+ 0,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = static_cast<int32_t>(bias_dist(random));
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+ quantization.scale = FLOAT_NEAREST_TO_1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(4);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, KER_H} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteUInt8 /* type */, "filter" /* name */, {KER_H, KER_W} /* dims */, quantization,
+ reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(uint8_t));
+
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteInt32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(int32_t));
+
+ // Add Fully Connected Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_malloc<TfLiteFullyConnectedParams>();
+
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_FULLY_CONNECTED, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/fully_connected_quan_1.lst b/tools/nnapi_quickcheck/tests/fully_connected_quan_1.lst
new file mode 100644
index 000000000..22acb9f7f
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/fully_connected_quan_1.lst
@@ -0,0 +1,9 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_H, 1)
diff --git a/tools/nnapi_quickcheck/tests/gather_1.cpp b/tools/nnapi_quickcheck/tests/gather_1.cpp
new file mode 100644
index 000000000..0d5b30eb6
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/gather_1.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_gather_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "gather_1.lst"
+#undef INT_VALUE
+
+ const int32_t INPUT_DATA = INPUT_DATA_Value();
+ const int32_t INDEX_DATA = INDEX_DATA_Value();
+
+ const int32_t OUTPUT_DATA = INDEX_DATA;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(INPUT_DATA);
+ PRINT_VALUE(INDEX_DATA);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUTPUT_DATA);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure INPUT_DATA
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "input" /* name */,
+ {INPUT_DATA} /* dims */, quantization);
+
+ // Configure INDEX_DATA
+ interp.SetTensorParametersReadWrite(1, kTfLiteInt32 /* type */, "index" /* name */,
+ {INDEX_DATA} /* dims */, quantization);
+
+ // Configure OUTPUT_VALUES
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "output_data" /* name */,
+ {OUTPUT_DATA} /* dims */, quantization);
+
+ auto *param = reinterpret_cast<TfLiteGatherParams *>(malloc(sizeof(TfLiteGatherParams)));
+
+ param->axis = 0;
+
+ // Add GATHER Node
+ // Run GATHER and store its result into Tensor #2
+ // - Read input data and index_data from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, {2}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_GATHER, 1));
+
+ // Set Tensor #0 and #1 as Input, and Tensor #2 as Output
+ interp.SetInputs({0, 1});
+ interp.SetOutputs({2});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/gather_1.lst b/tools/nnapi_quickcheck/tests/gather_1.lst
new file mode 100644
index 000000000..923a05677
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/gather_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(INPUT_DATA, 8192)
+INT_VALUE(INDEX_DATA, 300)
diff --git a/tools/nnapi_quickcheck/tests/gather_2.cpp b/tools/nnapi_quickcheck/tests/gather_2.cpp
new file mode 100644
index 000000000..b3cb3c6ef
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/gather_2.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_gather_2, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "gather_2.lst"
+#undef INT_VALUE
+
+ const int32_t INPUT_DATA_H = INPUT_DATA_H_Value();
+ const int32_t INPUT_DATA_W = INPUT_DATA_W_Value();
+ const int32_t INDEX_DATA = INDEX_DATA_Value();
+
+ const int32_t OUTPUT_DATA_H = INPUT_DATA_H;
+ const int32_t OUTPUT_DATA_W = INDEX_DATA;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(INPUT_DATA_H);
+ PRINT_VALUE(INPUT_DATA_W);
+ PRINT_VALUE(INDEX_DATA);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUTPUT_DATA_H);
+ PRINT_VALUE(OUTPUT_DATA_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure INPUT_DATA
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "input" /* name */,
+ {INPUT_DATA_H, INPUT_DATA_W} /* dims */, quantization);
+
+ // Configure INDEX_DATA
+ interp.SetTensorParametersReadWrite(1, kTfLiteInt32 /* type */, "index" /* name */,
+ {INDEX_DATA} /* dims */, quantization);
+
+ // Configure OUTPUT_VALUES
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "output_data" /* name */,
+ {OUTPUT_DATA_H, OUTPUT_DATA_W} /* dims */, quantization);
+
+ auto *param = reinterpret_cast<TfLiteGatherParams *>(malloc(sizeof(TfLiteGatherParams)));
+
+ param->axis = 0;
+
+ // Add GATHER Node
+ // Run GATHER and store its result into Tensor #2
+ // - Read input data and index_data from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, {2}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_GATHER, 1));
+
+ // Set Tensor #0 and #1 as Input, and Tensor #2 as Output
+ interp.SetInputs({0, 1});
+ interp.SetOutputs({2});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/gather_2.lst b/tools/nnapi_quickcheck/tests/gather_2.lst
new file mode 100644
index 000000000..5bf6bd33a
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/gather_2.lst
@@ -0,0 +1,7 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(INPUT_DATA_H, 128192)
+INT_VALUE(INPUT_DATA_W, 4)
+INT_VALUE(INDEX_DATA, 300)
diff --git a/tools/nnapi_quickcheck/tests/logistic_quan_1.cpp b/tools/nnapi_quickcheck/tests/logistic_quan_1.cpp
new file mode 100644
index 000000000..dc6902d66
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/logistic_quan_1.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_logistic_quan_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "logistic_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams in_quantization;
+ in_quantization.scale = 0.5f;
+ in_quantization.zero_point = 0;
+
+ TfLiteQuantizationParams out_quantization;
+ out_quantization.scale = 1.f / 256;
+ out_quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, out_quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, in_quantization);
+
+ // Add Logistic Node
+ // Run Logistic and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_LOGISTIC, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/logistic_quan_1.lst b/tools/nnapi_quickcheck/tests/logistic_quan_1.lst
new file mode 100644
index 000000000..9b3d8ebcf
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/logistic_quan_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 1)
+INT_VALUE(IFM_H, 2)
+INT_VALUE(IFM_W, 2)
diff --git a/tools/nnapi_quickcheck/tests/max_pool_1.cpp b/tools/nnapi_quickcheck/tests/max_pool_1.cpp
new file mode 100644
index 000000000..bb538141d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/max_pool_1.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_max_pool_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "max_pool_1.lst"
+#undef INT_VALUE
+
+ const TfLitePadding PADDING_TYPE = static_cast<TfLitePadding>(PADDING_TYPE_Value());
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = OFM_H_Value();
+ const int32_t OFM_W = OFM_W_Value();
+
+ assert((OFM_H >= (IFM_H - KER_H)));
+ assert((OFM_W >= (IFM_W - KER_W)));
+ assert((kTfLitePaddingSame == PADDING_TYPE) || (kTfLitePaddingValid == PADDING_TYPE));
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(PADDING_TYPE);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Max Pooling Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLitePoolParams>();
+
+ param->padding = PADDING_TYPE;
+ param->stride_width = 1;
+ param->stride_height = 1;
+ param->filter_width = KER_W;
+ param->filter_height = KER_H;
+ param->activation = kTfLiteActNone;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_MAX_POOL_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/max_pool_1.lst b/tools/nnapi_quickcheck/tests/max_pool_1.lst
new file mode 100644
index 000000000..4b5c1304e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/max_pool_1.lst
@@ -0,0 +1,17 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_N, 1)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(OFM_H, 1)
+INT_VALUE(OFM_W, 1)
+
+// Default is kTfLitePaddingValid (= 2)
+INT_VALUE(PADDING_TYPE, 2)
diff --git a/tools/nnapi_quickcheck/tests/max_pool_quan_1.cpp b/tools/nnapi_quickcheck/tests/max_pool_quan_1.cpp
new file mode 100644
index 000000000..5768ddde8
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/max_pool_quan_1.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_max_pool_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "max_pool_quan_1.lst"
+#undef INT_VALUE
+
+ const TfLitePadding PADDING_TYPE = static_cast<TfLitePadding>(PADDING_TYPE_Value());
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = OFM_H_Value();
+ const int32_t OFM_W = OFM_W_Value();
+
+ assert((OFM_H >= (IFM_H - KER_H)));
+ assert((OFM_W >= (IFM_W - KER_W)));
+ assert((kTfLitePaddingSame == PADDING_TYPE) || (kTfLitePaddingValid == PADDING_TYPE));
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(PADDING_TYPE);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Max Pooling Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLitePoolParams>();
+
+ param->padding = PADDING_TYPE;
+ param->stride_width = 1;
+ param->stride_height = 1;
+ param->filter_width = KER_W;
+ param->filter_height = KER_H;
+ param->activation = kTfLiteActNone;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_MAX_POOL_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/max_pool_quan_1.lst b/tools/nnapi_quickcheck/tests/max_pool_quan_1.lst
new file mode 100644
index 000000000..4b5c1304e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/max_pool_quan_1.lst
@@ -0,0 +1,17 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_N, 1)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(OFM_H, 1)
+INT_VALUE(OFM_W, 1)
+
+// Default is kTfLitePaddingValid (= 2)
+INT_VALUE(PADDING_TYPE, 2)
diff --git a/tools/nnapi_quickcheck/tests/mul_1.cpp b/tools/nnapi_quickcheck/tests/mul_1.cpp
new file mode 100644
index 000000000..3a4ae5c8e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_1.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_mul_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "mul_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_1D = LEFT_1D_Value();
+ const int32_t LEFT_2D = LEFT_2D_Value();
+ const int32_t LEFT_3D = LEFT_3D_Value();
+
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_1D = LEFT_1D_Value();
+ const int32_t OFM_2D = LEFT_2D_Value();
+ const int32_t OFM_3D = LEFT_3D_Value();
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_1D);
+ PRINT_VALUE(LEFT_2D);
+ PRINT_VALUE(LEFT_3D);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_1D);
+ PRINT_VALUE(OFM_2D);
+ PRINT_VALUE(OFM_3D);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_1D, OFM_2D, OFM_3D} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_1D, LEFT_2D, LEFT_3D} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_W} /* dims */, quantization);
+
+ // Add MUL Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run MUL and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_MUL, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+ param.tensor_logging = 1;
+ param.log_path = "report/tensor_mul_1.log";
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/mul_1.lst b/tools/nnapi_quickcheck/tests/mul_1.lst
new file mode 100644
index 000000000..1d42159de
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+// (3, 1, 4)
+INT_VALUE(LEFT_1D, 3)
+INT_VALUE(LEFT_2D, 1)
+INT_VALUE(LEFT_3D, 4)
+
+INT_VALUE(RIGHT_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/mul_2.cpp b/tools/nnapi_quickcheck/tests/mul_2.cpp
new file mode 100644
index 000000000..b117cd602
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_2.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_mul_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "mul_2.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_D1 = LEFT_D1_Value();
+ const int32_t LEFT_D2 = LEFT_D2_Value();
+ const int32_t LEFT_D3 = LEFT_D3_Value();
+
+ const int32_t RIGHT_D1 = RIGHT_D1_Value();
+
+ const int32_t OFM_D1 = LEFT_D1;
+ const int32_t OFM_D2 = LEFT_D2;
+ const int32_t OFM_D3 = LEFT_D3;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_D1);
+ PRINT_VALUE(LEFT_D2);
+ PRINT_VALUE(LEFT_D3);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_D1);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_D1);
+ PRINT_VALUE(OFM_D2);
+ PRINT_VALUE(OFM_D3);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_D1, OFM_D2, OFM_D3} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_D1, LEFT_D2, LEFT_D3} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_D1} /* dims */, quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_MUL, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/mul_2.lst b/tools/nnapi_quickcheck/tests/mul_2.lst
new file mode 100644
index 000000000..da53e7eee
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_2.lst
@@ -0,0 +1,9 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_D1, 5)
+INT_VALUE(LEFT_D2, 3)
+INT_VALUE(LEFT_D3, 12)
+
+INT_VALUE(RIGHT_D1, 12)
diff --git a/tools/nnapi_quickcheck/tests/mul_quan_1.cpp b/tools/nnapi_quickcheck/tests/mul_quan_1.cpp
new file mode 100644
index 000000000..7207a90fb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_quan_1.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_mul_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "mul_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_1D = LEFT_1D_Value();
+ const int32_t LEFT_2D = LEFT_2D_Value();
+ const int32_t LEFT_3D = LEFT_3D_Value();
+
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_1D = LEFT_1D_Value();
+ const int32_t OFM_2D = LEFT_2D_Value();
+ const int32_t OFM_3D = LEFT_3D_Value();
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_1D);
+ PRINT_VALUE(LEFT_2D);
+ PRINT_VALUE(LEFT_3D);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_1D);
+ PRINT_VALUE(OFM_2D);
+ PRINT_VALUE(OFM_3D);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ float max_scale =
+ std::numeric_limits<uint8_t>::max(); // * input1_scale(1.0f) * input2_scale(1.0f)
+ quantization.scale = max_scale;
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_1D, OFM_2D, OFM_3D} /* dims */, quantization);
+
+ // Configure input(s)
+ quantization.scale = 1.0f;
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "left" /* name */,
+ {LEFT_1D, LEFT_2D, LEFT_3D} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteUInt8 /* type */, "right" /* name */,
+ {RIGHT_W} /* dims */, quantization);
+
+ // Add MUL Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run MUL and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_MUL, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/mul_quan_1.lst b/tools/nnapi_quickcheck/tests/mul_quan_1.lst
new file mode 100644
index 000000000..d850f375a
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_quan_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+// (300, 1, 4)
+INT_VALUE(LEFT_1D, 300)
+INT_VALUE(LEFT_2D, 1)
+INT_VALUE(LEFT_3D, 4)
+
+INT_VALUE(RIGHT_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/relu1_1.cpp b/tools/nnapi_quickcheck/tests/relu1_1.cpp
new file mode 100644
index 000000000..aeefe2f06
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu1_1.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+int main(int argc, char **argv)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu1_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU_N1_TO_1, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ return RandomTestRunner{SEED, param}.run(builder);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu1_1.lst b/tools/nnapi_quickcheck/tests/relu1_1.lst
new file mode 100644
index 000000000..4f61845a7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu1_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/relu6_1.cpp b/tools/nnapi_quickcheck/tests/relu6_1.cpp
new file mode 100644
index 000000000..76bbe954c
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu6_1.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_relu6_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu6_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU6, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu6_1.lst b/tools/nnapi_quickcheck/tests/relu6_1.lst
new file mode 100644
index 000000000..4f61845a7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu6_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/relu6_quan_1.cpp b/tools/nnapi_quickcheck/tests/relu6_quan_1.cpp
new file mode 100644
index 000000000..fe849f955
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu6_quan_1.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+int main(int argc, char **argv)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu6_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU6, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ return RandomTestRunner{SEED, param}.run(builder);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu6_quan_1.lst b/tools/nnapi_quickcheck/tests/relu6_quan_1.lst
new file mode 100644
index 000000000..4f61845a7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu6_quan_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/relu_1.cpp b/tools/nnapi_quickcheck/tests/relu_1.cpp
new file mode 100644
index 000000000..f754c3d8e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_1.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_relu_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu_1.lst b/tools/nnapi_quickcheck/tests/relu_1.lst
new file mode 100644
index 000000000..4f61845a7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/relu_2.cpp b/tools/nnapi_quickcheck/tests/relu_2.cpp
new file mode 100644
index 000000000..c08764520
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_2.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_relu_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu_2.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu_2.lst b/tools/nnapi_quickcheck/tests/relu_2.lst
new file mode 100644
index 000000000..343bff819
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_2.lst
@@ -0,0 +1,7 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
+INT_VALUE(IFM_C, 3)
diff --git a/tools/nnapi_quickcheck/tests/relu_3.cpp b/tools/nnapi_quickcheck/tests/relu_3.cpp
new file mode 100644
index 000000000..6c41bc12a
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_3.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_relu_3, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu_3.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu_3.lst b/tools/nnapi_quickcheck/tests/relu_3.lst
new file mode 100644
index 000000000..a3a405c10
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_3.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_N, 1)
diff --git a/tools/nnapi_quickcheck/tests/relu_quan_1.cpp b/tools/nnapi_quickcheck/tests/relu_quan_1.cpp
new file mode 100644
index 000000000..59fe5d254
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_quan_1.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+int main(int argc, char **argv)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ return RandomTestRunner{SEED, param}.run(builder);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu_quan_1.lst b/tools/nnapi_quickcheck/tests/relu_quan_1.lst
new file mode 100644
index 000000000..4f61845a7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_quan_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/reshape_1.cpp b/tools/nnapi_quickcheck/tests/reshape_1.cpp
new file mode 100644
index 000000000..21d35a5ca
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/reshape_1.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_reshape_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "max_pool_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OUT_L = IFM_C * IFM_H * IFM_W;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUT_L);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t dims[2] = {1, OUT_L};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ // A: This may be necessary, because quantization values(scale, zero_point) of TENSOR_INT32 and
+ // TENSOR_QUANT8_ASYMM are passed on to the runtime.
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OUT_L} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Shape
+ interp.SetTensorParametersReadOnly(2, kTfLiteInt32 /* type */, "shape" /* name */,
+ {2} /* dims */, quantization,
+ reinterpret_cast<const char *>(dims), 2 * sizeof(int32_t));
+
+ // Add Reshape Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteReshapeParams>();
+
+ param->num_dimensions = 2;
+ param->shape[0] = 1;
+ param->shape[1] = OUT_L;
+
+ // Run Reshapeand store its result into Tensor #0
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_RESHAPE, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/reshape_1.lst b/tools/nnapi_quickcheck/tests/reshape_1.lst
new file mode 100644
index 000000000..fcaaff016
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/reshape_1.lst
@@ -0,0 +1,7 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 4)
+INT_VALUE(IFM_W, 8)
diff --git a/tools/nnapi_quickcheck/tests/reshape_quan_1.cpp b/tools/nnapi_quickcheck/tests/reshape_quan_1.cpp
new file mode 100644
index 000000000..7f852fd80
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/reshape_quan_1.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_reshape_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "reshape_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OUT_L = IFM_C * IFM_H * IFM_W;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUT_L);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t dims[2] = {1, OUT_L};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ // A: This may be necessary, because quantization values(scale, zero_point) of TENSOR_INT32 and
+ // TENSOR_QUANT8_ASYMM are passed on to the runtime.
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, OUT_L} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Shape
+ interp.SetTensorParametersReadOnly(2, kTfLiteInt32 /* type */, "shape" /* name */,
+ {2} /* dims */, quantization,
+ reinterpret_cast<const char *>(dims), 2 * sizeof(int32_t));
+
+ // Add Reshape Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteReshapeParams>();
+
+ param->num_dimensions = 2;
+ param->shape[0] = 1;
+ param->shape[1] = OUT_L;
+
+ // Run Reshapeand store its result into Tensor #0
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_RESHAPE, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/reshape_quan_1.lst b/tools/nnapi_quickcheck/tests/reshape_quan_1.lst
new file mode 100644
index 000000000..fcaaff016
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/reshape_quan_1.lst
@@ -0,0 +1,7 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 4)
+INT_VALUE(IFM_W, 8)
diff --git a/tools/nnapi_quickcheck/tests/resize_bilinear_1.cpp b/tools/nnapi_quickcheck/tests/resize_bilinear_1.cpp
new file mode 100644
index 000000000..37d8ab525
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/resize_bilinear_1.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_resize_bilinear_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "resize_bilinear_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = OFM_H_Value();
+ const int32_t OFM_W = OFM_W_Value();
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ int32_t size_data[2] = {OFM_H, OFM_W};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ // A: This may be necessary, because quantization values(scale, zero_point) of TENSOR_INT32 and
+ // TENSOR_QUANT8_ASYMM are passed on to the runtime.
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Size
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteInt32 /* type */, "size" /* name */, {2} /* dims */, quantization,
+ reinterpret_cast<const char *>(size_data), 2 * sizeof(int32_t));
+
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteResizeBilinearParams>();
+
+ // NOTE What is this?
+ param->align_corners = false;
+
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_RESIZE_BILINEAR, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/resize_bilinear_1.lst b/tools/nnapi_quickcheck/tests/resize_bilinear_1.lst
new file mode 100644
index 000000000..cc3dbd5cc
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/resize_bilinear_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(OFM_H, 30)
+INT_VALUE(OFM_W, 40)
diff --git a/tools/nnapi_quickcheck/tests/softmax_1.cpp b/tools/nnapi_quickcheck/tests/softmax_1.cpp
new file mode 100644
index 000000000..5e15b6169
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_1.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_softmax_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "softmax_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = 1;
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ const nnfw::util::feature::Shape ifm_shape{IFM_C, IFM_H, IFM_W};
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1, IFM_H * IFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1, IFM_H * IFM_W} /* batch_size, input_size */,
+ quantization);
+
+ // Add Softmax Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteSoftmaxParams>();
+
+ param->beta = 1.0;
+
+ // Run Softmax and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SOFTMAX, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/softmax_1.lst b/tools/nnapi_quickcheck/tests/softmax_1.lst
new file mode 100644
index 000000000..1ef9da075
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 2)
+INT_VALUE(IFM_W, 2)
diff --git a/tools/nnapi_quickcheck/tests/softmax_2.cpp b/tools/nnapi_quickcheck/tests/softmax_2.cpp
new file mode 100644
index 000000000..489016af5
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_2.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_softmax_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define FLOAT_VALUE(NAME, VALUE) FloatVar NAME##_Value(#NAME, VALUE);
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "softmax_2.lst"
+#undef INT_VALUE
+#undef FLOAT_VALUE
+
+ const int32_t IFM_C = 1;
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+ const float BETA = BETA_Value();
+
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_VALUE(BETA);
+ PRINT_NEWLINE();
+
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ const nnfw::util::feature::Shape ifm_shape{IFM_C, IFM_H, IFM_W};
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1, IFM_H * IFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1, IFM_H * IFM_W} /* batch_size, input_size */,
+ quantization);
+
+ // Add Softmax Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteSoftmaxParams>();
+
+ param->beta = BETA;
+
+ // Run Softmax and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SOFTMAX, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/softmax_2.lst b/tools/nnapi_quickcheck/tests/softmax_2.lst
new file mode 100644
index 000000000..1c381bf49
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_2.lst
@@ -0,0 +1,11 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+#ifndef FLOAT_VALUE
+#error "FLOAT_VALUE should be defined"
+#endif // FLOAT_VALUE
+
+INT_VALUE(IFM_H, 2)
+INT_VALUE(IFM_W, 2)
+FLOAT_VALUE(BETA, 0.1)
diff --git a/tools/nnapi_quickcheck/tests/softmax_quan_1.cpp b/tools/nnapi_quickcheck/tests/softmax_quan_1.cpp
new file mode 100644
index 000000000..347262fa6
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_quan_1.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_softmax_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "softmax_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = 1;
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ const nnfw::util::feature::Shape ifm_shape{IFM_C, IFM_H, IFM_W};
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f / 256;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1, IFM_H * IFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1, IFM_H * IFM_W} /* batch_size, input_size */,
+ quantization);
+
+ // Add Softmax Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteSoftmaxParams>();
+
+ param->beta = 1.0;
+
+ // Run Softmax and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SOFTMAX, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/softmax_quan_1.lst b/tools/nnapi_quickcheck/tests/softmax_quan_1.lst
new file mode 100644
index 000000000..1ef9da075
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_quan_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 2)
+INT_VALUE(IFM_W, 2)
diff --git a/tools/nnapi_quickcheck/tests/split_1.cpp b/tools/nnapi_quickcheck/tests/split_1.cpp
new file mode 100644
index 000000000..742c5dbed
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_1.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_split_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "split_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+ const int32_t NUM_SPLIT = NUM_SPLIT_Value();
+ const int32_t AXIS = AXIS_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_VALUE(NUM_SPLIT);
+ PRINT_VALUE(AXIS);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+ const int32_t axis[1] = {AXIS};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(NUM_SPLIT + 2);
+
+ // Configure Input Tensor(s)
+ interp.SetTensorParametersReadOnly(0, kTfLiteInt32 /* type */, "axis" /* name */,
+ {1} /* dims */, quantization,
+ reinterpret_cast<const char *>(axis), 1 * sizeof(int32_t));
+
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Output Tensor
+ std::vector<int> ofm_indexes;
+
+ for (uint32_t n = 0; n < NUM_SPLIT; ++n)
+ {
+ const auto ofm_index = 2 + n;
+
+ interp.SetTensorParametersReadWrite(ofm_index, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ ofm_indexes.emplace_back(ofm_index);
+ }
+
+ auto *param = reinterpret_cast<TfLiteSplitParams *>(malloc(sizeof(TfLiteSplitParams)));
+
+ param->num_splits = NUM_SPLIT;
+
+ // Add SPLIT Node
+ // Run SPLIT and store its result into Tensor #0
+ // - Read axis and IFM from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, ofm_indexes, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SPLIT, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #2 ~ #NUM_SPLIT+1 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs(ofm_indexes);
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/split_1.lst b/tools/nnapi_quickcheck/tests/split_1.lst
new file mode 100644
index 000000000..823bf24fa
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 1)
+INT_VALUE(IFM_H, 5)
+INT_VALUE(IFM_W, 30)
+INT_VALUE(NUM_SPLIT, 5)
+INT_VALUE(AXIS, 1)
diff --git a/tools/nnapi_quickcheck/tests/split_2.cpp b/tools/nnapi_quickcheck/tests/split_2.cpp
new file mode 100644
index 000000000..d70e35ca7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_2.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_split_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "split_2.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+ const int32_t NUM_SPLIT = NUM_SPLIT_Value();
+ const int32_t AXIS = AXIS_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_VALUE(NUM_SPLIT);
+ PRINT_VALUE(AXIS);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+ const int32_t axis[1] = {AXIS};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(NUM_SPLIT + 2);
+
+ // Configure Input Tensor(s)
+ interp.SetTensorParametersReadOnly(0, kTfLiteInt32 /* type */, "axis" /* name */,
+ {1} /* dims */, quantization,
+ reinterpret_cast<const char *>(axis), 1 * sizeof(int32_t));
+
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Output Tensor
+ std::vector<int> ofm_indexes;
+
+ for (uint32_t n = 0; n < NUM_SPLIT; ++n)
+ {
+ const auto ofm_index = 2 + n;
+
+ interp.SetTensorParametersReadWrite(ofm_index, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ ofm_indexes.emplace_back(ofm_index);
+ }
+
+ auto *param = reinterpret_cast<TfLiteSplitParams *>(malloc(sizeof(TfLiteSplitParams)));
+
+ param->num_splits = NUM_SPLIT;
+
+ // Add SPLIT Node
+ // Run SPLIT and store its result into Tensor #0
+ // - Read axis and IFM from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, ofm_indexes, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SPLIT, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #2 ~ #NUM_SPLIT+1 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs(ofm_indexes);
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/split_2.lst b/tools/nnapi_quickcheck/tests/split_2.lst
new file mode 100644
index 000000000..ebfbab2d5
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_2.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 1)
+INT_VALUE(IFM_H, 5)
+INT_VALUE(IFM_W, 30)
+INT_VALUE(NUM_SPLIT, 3)
+INT_VALUE(AXIS, 2)
diff --git a/tools/nnapi_quickcheck/tests/split_3.cpp b/tools/nnapi_quickcheck/tests/split_3.cpp
new file mode 100644
index 000000000..47359642d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_3.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_split_3, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "split_3.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+ const int32_t NUM_SPLIT = NUM_SPLIT_Value();
+ const int32_t AXIS = AXIS_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_VALUE(NUM_SPLIT);
+ PRINT_VALUE(AXIS);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+ const int32_t axis[1] = {AXIS};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(NUM_SPLIT + 2);
+
+ // Configure Input Tensor(s)
+ interp.SetTensorParametersReadOnly(0, kTfLiteInt32 /* type */, "axis" /* name */,
+ {1} /* dims */, quantization,
+ reinterpret_cast<const char *>(axis), 1 * sizeof(int32_t));
+
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Configure Output Tensor
+ std::vector<int> ofm_indexes;
+
+ for (uint32_t n = 0; n < NUM_SPLIT; ++n)
+ {
+ const auto ofm_index = 2 + n;
+
+ interp.SetTensorParametersReadWrite(ofm_index, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ ofm_indexes.emplace_back(ofm_index);
+ }
+
+ auto *param = reinterpret_cast<TfLiteSplitParams *>(malloc(sizeof(TfLiteSplitParams)));
+
+ param->num_splits = NUM_SPLIT;
+
+ // Add SPLIT Node
+ // Run SPLIT and store its result into Tensor #0
+ // - Read axis and IFM from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, ofm_indexes, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SPLIT, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #2 ~ #NUM_SPLIT+1 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs(ofm_indexes);
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/split_3.lst b/tools/nnapi_quickcheck/tests/split_3.lst
new file mode 100644
index 000000000..300bb02b7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_3.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 5)
+INT_VALUE(IFM_W, 30)
+INT_VALUE(NUM_SPLIT, 3)
+INT_VALUE(AXIS, 1)
diff --git a/tools/nnapi_quickcheck/tests/split_4.cpp b/tools/nnapi_quickcheck/tests/split_4.cpp
new file mode 100644
index 000000000..d16e75d5c
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_4.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_split_4, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "split_4.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+ const int32_t NUM_SPLIT = NUM_SPLIT_Value();
+ const int32_t AXIS = AXIS_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_VALUE(NUM_SPLIT);
+ PRINT_VALUE(AXIS);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+ const int32_t axis[1] = {AXIS};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(NUM_SPLIT + 2);
+
+ // Configure Input Tensor(s)
+ interp.SetTensorParametersReadOnly(0, kTfLiteInt32 /* type */, "axis" /* name */,
+ {1} /* dims */, quantization,
+ reinterpret_cast<const char *>(axis), 1 * sizeof(int32_t));
+
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Configure Output Tensor
+ std::vector<int> ofm_indexes;
+
+ for (uint32_t n = 0; n < NUM_SPLIT; ++n)
+ {
+ const auto ofm_index = 2 + n;
+
+ interp.SetTensorParametersReadWrite(ofm_index, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ ofm_indexes.emplace_back(ofm_index);
+ }
+
+ auto *param = reinterpret_cast<TfLiteSplitParams *>(malloc(sizeof(TfLiteSplitParams)));
+
+ param->num_splits = NUM_SPLIT;
+
+ // Add SPLIT Node
+ // Run SPLIT and store its result into Tensor #0
+ // - Read axis and IFM from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, ofm_indexes, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SPLIT, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #2 ~ #NUM_SPLIT+1 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs(ofm_indexes);
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/split_4.lst b/tools/nnapi_quickcheck/tests/split_4.lst
new file mode 100644
index 000000000..5b2882828
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_4.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 5)
+INT_VALUE(IFM_W, 30)
+INT_VALUE(NUM_SPLIT, 5)
+INT_VALUE(AXIS, 0)
diff --git a/tools/nnapi_quickcheck/tests/sub_1.cpp b/tools/nnapi_quickcheck/tests/sub_1.cpp
new file mode 100644
index 000000000..2734f525e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_1.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_1.lst b/tools/nnapi_quickcheck/tests/sub_1.lst
new file mode 100644
index 000000000..fa17caebb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_1.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 16)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/sub_2.cpp b/tools/nnapi_quickcheck/tests/sub_2.cpp
new file mode 100644
index 000000000..88e060847
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_2.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_2.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT = RIGHT_Value();
+
+ const int32_t OFM_N = LEFT_N;
+ const int32_t OFM_C = LEFT_C;
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT} /* dims */, quantization);
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_2.lst b/tools/nnapi_quickcheck/tests/sub_2.lst
new file mode 100644
index 000000000..cd36ac199
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_2.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT, 1)
diff --git a/tools/nnapi_quickcheck/tests/sub_3.cpp b/tools/nnapi_quickcheck/tests/sub_3.cpp
new file mode 100644
index 000000000..fd2d4aaea
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_3.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_3, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_3.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT = RIGHT_Value();
+
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_H, LEFT_W} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT, LEFT_W} /* dims */, quantization);
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_3.lst b/tools/nnapi_quickcheck/tests/sub_3.lst
new file mode 100644
index 000000000..c56875048
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_3.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_H, 8)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT, 1)
diff --git a/tools/nnapi_quickcheck/tests/sub_4.cpp b/tools/nnapi_quickcheck/tests/sub_4.cpp
new file mode 100644
index 000000000..993acddce
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_4.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_4, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_H, RIGHT_W, RIGHT_C} /* dims */, quantization);
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_4.lst b/tools/nnapi_quickcheck/tests/sub_4.lst
new file mode 100644
index 000000000..ce6128f83
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_4.lst
@@ -0,0 +1,11 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 8)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 1)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/sub_5.cpp b/tools/nnapi_quickcheck/tests/sub_5.cpp
new file mode 100644
index 000000000..610be5754
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_5.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_5, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_5.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_VALUE(LEFT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+ PRINT_VALUE(OFM_C);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure left data
+ const uint32_t left_size = LEFT_N * LEFT_C * LEFT_H * LEFT_W;
+ const uint32_t right_size = RIGHT_N * RIGHT_C * RIGHT_H * RIGHT_W;
+ float left_data[left_size] = {
+ 0.0f,
+ };
+ float right_data[right_size] = {
+ 0.0f,
+ };
+
+ // Fill left data with random data
+ {
+ std::normal_distribution<float> left_dist(-1.0f, +1.0f);
+ float value = 10.0f;
+ for (uint32_t off = 0; off < left_size; ++off)
+ {
+ left_data[off] = value;
+ }
+ value = 1.0f;
+ for (uint32_t off = 0; off < right_size; ++off)
+ {
+ right_data[off] = value++;
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization,
+ reinterpret_cast<const char *>(left_data),
+ left_size * sizeof(float));
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_W, RIGHT_C} /* dims: test with other shapes */,
+ quantization, reinterpret_cast<const char *>(right_data),
+ right_size * sizeof(float));
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_5.lst b/tools/nnapi_quickcheck/tests/sub_5.lst
new file mode 100644
index 000000000..0327e6b73
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_5.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_H, 2)
+INT_VALUE(LEFT_W, 3)
+INT_VALUE(LEFT_C, 4)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_H, 1)
+INT_VALUE(RIGHT_W, 3)
+INT_VALUE(RIGHT_C, 4)
diff --git a/tools/nnapi_quickcheck/tests/sub_6.cpp b/tools/nnapi_quickcheck/tests/sub_6.cpp
new file mode 100644
index 000000000..b9e37c8d7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_6.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_6, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_6.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_VALUE(LEFT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+ PRINT_VALUE(OFM_C);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure left data
+ const uint32_t left_size = LEFT_N * LEFT_C * LEFT_H * LEFT_W;
+ const uint32_t right_size = RIGHT_N * RIGHT_C * RIGHT_H * RIGHT_W;
+ float left_data[left_size] = {
+ 0.0f,
+ };
+ float right_data[right_size] = {
+ 0.0f,
+ };
+
+ // Fill left data with random data
+ {
+ std::normal_distribution<float> left_dist(-1.0f, +1.0f);
+ float value = 10.0f;
+ for (uint32_t off = 0; off < left_size; ++off)
+ {
+ left_data[off] = value;
+ }
+ value = 1.0f;
+ for (uint32_t off = 0; off < right_size; ++off)
+ {
+ right_data[off] = value++;
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_W, LEFT_C} /* dims: test with other shapes */,
+ quantization, reinterpret_cast<const char *>(left_data),
+ left_size * sizeof(float));
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(right_data),
+ right_size * sizeof(float));
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_6.lst b/tools/nnapi_quickcheck/tests/sub_6.lst
new file mode 100644
index 000000000..52a1f1acc
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_6.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_H, 1)
+INT_VALUE(LEFT_W, 3)
+INT_VALUE(LEFT_C, 4)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_H, 2)
+INT_VALUE(RIGHT_W, 3)
+INT_VALUE(RIGHT_C, 4)
diff --git a/tools/nnapi_quickcheck/tests/tanh_1.cpp b/tools/nnapi_quickcheck/tests/tanh_1.cpp
new file mode 100644
index 000000000..67847eceb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/tanh_1.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_tanh_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "tanh_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Tanh Node
+ // Run Tanh and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_TANH, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/tanh_1.lst b/tools/nnapi_quickcheck/tests/tanh_1.lst
new file mode 100644
index 000000000..a0077cb95
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/tanh_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_H, 320)
+INT_VALUE(IFM_W, 320)
diff --git a/tools/nnapi_quickcheck/tests/topk_v2_1.cpp b/tools/nnapi_quickcheck/tests/topk_v2_1.cpp
new file mode 100644
index 000000000..bb9d8535e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/topk_v2_1.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_topk_v2_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "topk_v2_1.lst"
+#undef INT_VALUE
+
+ const int32_t INPUT_DATA = INPUT_DATA_Value();
+ const int32_t K = K_Value();
+
+ const int32_t OUTPUT_VALUES = K;
+ const int32_t OUTPUT_INDICES = K;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(INPUT_DATA);
+ PRINT_VALUE(K);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUTPUT_VALUES);
+ PRINT_VALUE(OUTPUT_INDICES);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Fill the K data
+ int32_t k_data[1] = {K};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ // A: This may be necessary, because quantization values(scale, zero_point) of TENSOR_INT32 and
+ // TENSOR_QUANT8_ASYMM are passed on to the runtime.
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(4);
+
+ // Configure INPUT_DATA
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "input" /* name */,
+ {INPUT_DATA} /* dims */, quantization);
+
+ // Configure K
+ interp.SetTensorParametersReadOnly(1, kTfLiteInt32 /* type */, "k" /* name */, {1} /* dims */,
+ quantization, reinterpret_cast<const char *>(k_data),
+ sizeof(k_data));
+
+ // Configure OUTPUT_VALUES
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "output_values" /* name */,
+ {OUTPUT_VALUES} /* dims */, quantization);
+
+ // Configure OUTPUT_INDICES
+ interp.SetTensorParametersReadWrite(3, kTfLiteInt32 /* type */, "output_indices" /* name */,
+ {OUTPUT_INDICES} /* dims */, quantization);
+
+ // Add TopK_V2 Node
+ // Run TopK_V2 and store its result into Tensor #2 and #3
+ // - Read input data and K from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, {2, 3}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_TOPK_V2, 1));
+
+ // Set Tensor #0 as Input, and Tensor #2 and #3 as Output
+ interp.SetInputs({0});
+ interp.SetOutputs({2, 3});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/topk_v2_1.lst b/tools/nnapi_quickcheck/tests/topk_v2_1.lst
new file mode 100644
index 000000000..a40ee3c57
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/topk_v2_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(INPUT_DATA, 8192)
+INT_VALUE(K, 16)