summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/CMakeLists.txt8
-rwxr-xr-xtools/cross/build_android_rootfs.sh62
-rwxr-xr-xtools/cross/build_rootfs.sh25
-rwxr-xr-xtools/extract_weights_from_tflite/extract.py90
-rwxr-xr-xtools/extract_weights_from_tflite/extract_from_tflite.sh31
-rwxr-xr-xtools/extract_weights_from_tflite/print_op.py58
-rw-r--r--tools/image_importer/README.md15
-rwxr-xr-xtools/image_importer/image_importer.py33
-rwxr-xr-xtools/image_importer/imagegen.py40
-rw-r--r--tools/modelgen/CONV_2D.template.json102
-rwxr-xr-xtools/modelgen/modelgen.py98
-rwxr-xr-xtools/modelgen/modelgen.sh31
-rw-r--r--tools/nnapi_quickcheck/CMakeLists.txt82
-rw-r--r--tools/nnapi_quickcheck/inc/env.h60
-rw-r--r--tools/nnapi_quickcheck/inc/memory.h34
-rw-r--r--tools/nnapi_quickcheck/lib/env.cpp50
-rw-r--r--tools/nnapi_quickcheck/lib/env.test.cpp45
-rw-r--r--tools/nnapi_quickcheck/tests/add_1.cpp159
-rw-r--r--tools/nnapi_quickcheck/tests/add_1.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/add_2.cpp177
-rw-r--r--tools/nnapi_quickcheck/tests/add_2.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/add_3.cpp137
-rw-r--r--tools/nnapi_quickcheck/tests/add_3.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/add_4.cpp159
-rw-r--r--tools/nnapi_quickcheck/tests/add_4.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/add_5.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/add_5.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/add_6.cpp144
-rw-r--r--tools/nnapi_quickcheck/tests/add_6.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/add_7.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/add_7.lst11
-rw-r--r--tools/nnapi_quickcheck/tests/add_8.cpp190
-rw-r--r--tools/nnapi_quickcheck/tests/add_8.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/add_9.cpp187
-rw-r--r--tools/nnapi_quickcheck/tests/add_9.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/add_quan_1.cpp162
-rw-r--r--tools/nnapi_quickcheck/tests/add_quan_1.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/avg_pool_1.cpp150
-rw-r--r--tools/nnapi_quickcheck/tests/avg_pool_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/avg_pool_quan_1.cpp149
-rw-r--r--tools/nnapi_quickcheck/tests/avg_pool_quan_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/cast_1.cpp136
-rw-r--r--tools/nnapi_quickcheck/tests/cast_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/cast_2.cpp134
-rw-r--r--tools/nnapi_quickcheck/tests/cast_2.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/cast_q_to_f_1.cpp136
-rw-r--r--tools/nnapi_quickcheck/tests/cast_q_to_f_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/concat_1.cpp161
-rw-r--r--tools/nnapi_quickcheck/tests/concat_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/concat_quan_1.cpp163
-rw-r--r--tools/nnapi_quickcheck/tests/concat_quan_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/conv_1.cpp207
-rw-r--r--tools/nnapi_quickcheck/tests/conv_1.lst14
-rw-r--r--tools/nnapi_quickcheck/tests/conv_quan_1.cpp211
-rw-r--r--tools/nnapi_quickcheck/tests/conv_quan_1.lst14
-rw-r--r--tools/nnapi_quickcheck/tests/dconv_1.cpp205
-rw-r--r--tools/nnapi_quickcheck/tests/dconv_1.lst16
-rw-r--r--tools/nnapi_quickcheck/tests/dconv_quan_1.cpp209
-rw-r--r--tools/nnapi_quickcheck/tests/dconv_quan_1.lst16
-rw-r--r--tools/nnapi_quickcheck/tests/dequantize_1.cpp136
-rw-r--r--tools/nnapi_quickcheck/tests/dequantize_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/div_1.cpp159
-rw-r--r--tools/nnapi_quickcheck/tests/div_1.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/div_2.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/div_2.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/fully_connected_1.cpp187
-rw-r--r--tools/nnapi_quickcheck/tests/fully_connected_1.lst9
-rw-r--r--tools/nnapi_quickcheck/tests/fully_connected_quan_1.cpp189
-rw-r--r--tools/nnapi_quickcheck/tests/fully_connected_quan_1.lst9
-rw-r--r--tools/nnapi_quickcheck/tests/gather_1.cpp132
-rw-r--r--tools/nnapi_quickcheck/tests/gather_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/gather_2.cpp136
-rw-r--r--tools/nnapi_quickcheck/tests/gather_2.lst7
-rw-r--r--tools/nnapi_quickcheck/tests/logistic_quan_1.cpp140
-rw-r--r--tools/nnapi_quickcheck/tests/logistic_quan_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/max_pool_1.cpp156
-rw-r--r--tools/nnapi_quickcheck/tests/max_pool_1.lst17
-rw-r--r--tools/nnapi_quickcheck/tests/max_pool_quan_1.cpp158
-rw-r--r--tools/nnapi_quickcheck/tests/max_pool_quan_1.lst17
-rw-r--r--tools/nnapi_quickcheck/tests/mul_1.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/mul_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/mul_2.cpp150
-rw-r--r--tools/nnapi_quickcheck/tests/mul_2.lst9
-rw-r--r--tools/nnapi_quickcheck/tests/mul_quan_1.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/mul_quan_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/relu1_1.cpp121
-rw-r--r--tools/nnapi_quickcheck/tests/relu1_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/relu6_1.cpp125
-rw-r--r--tools/nnapi_quickcheck/tests/relu6_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/relu6_quan_1.cpp123
-rw-r--r--tools/nnapi_quickcheck/tests/relu6_quan_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/relu_1.cpp125
-rw-r--r--tools/nnapi_quickcheck/tests/relu_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/relu_2.cpp128
-rw-r--r--tools/nnapi_quickcheck/tests/relu_2.lst7
-rw-r--r--tools/nnapi_quickcheck/tests/relu_3.cpp131
-rw-r--r--tools/nnapi_quickcheck/tests/relu_3.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/relu_quan_1.cpp123
-rw-r--r--tools/nnapi_quickcheck/tests/relu_quan_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/reshape_1.cpp141
-rw-r--r--tools/nnapi_quickcheck/tests/reshape_1.lst7
-rw-r--r--tools/nnapi_quickcheck/tests/reshape_quan_1.cpp143
-rw-r--r--tools/nnapi_quickcheck/tests/reshape_quan_1.lst7
-rw-r--r--tools/nnapi_quickcheck/tests/resize_bilinear_1.cpp141
-rw-r--r--tools/nnapi_quickcheck/tests/resize_bilinear_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_1.cpp120
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_2.cpp139
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_2.lst11
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_quan_1.cpp122
-rw-r--r--tools/nnapi_quickcheck/tests/softmax_quan_1.lst6
-rw-r--r--tools/nnapi_quickcheck/tests/split_1.cpp153
-rw-r--r--tools/nnapi_quickcheck/tests/split_1.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/split_2.cpp153
-rw-r--r--tools/nnapi_quickcheck/tests/split_2.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/split_3.cpp147
-rw-r--r--tools/nnapi_quickcheck/tests/split_3.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/split_4.cpp147
-rw-r--r--tools/nnapi_quickcheck/tests/split_4.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/sub_1.cpp159
-rw-r--r--tools/nnapi_quickcheck/tests/sub_1.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/sub_2.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/sub_2.lst10
-rw-r--r--tools/nnapi_quickcheck/tests/sub_3.cpp144
-rw-r--r--tools/nnapi_quickcheck/tests/sub_3.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/sub_4.cpp152
-rw-r--r--tools/nnapi_quickcheck/tests/sub_4.lst11
-rw-r--r--tools/nnapi_quickcheck/tests/sub_5.cpp188
-rw-r--r--tools/nnapi_quickcheck/tests/sub_5.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/sub_6.cpp188
-rw-r--r--tools/nnapi_quickcheck/tests/sub_6.lst13
-rw-r--r--tools/nnapi_quickcheck/tests/tanh_1.cpp134
-rw-r--r--tools/nnapi_quickcheck/tests/tanh_1.lst8
-rw-r--r--tools/nnapi_quickcheck/tests/topk_v2_1.cpp138
-rw-r--r--tools/nnapi_quickcheck/tests/topk_v2_1.lst6
-rw-r--r--tools/nnapi_test/src/nnapi_test.cc26
-rw-r--r--tools/opencl_tool/CMakeLists.txt12
-rw-r--r--tools/opencl_tool/src/opencl_info.cc154
-rw-r--r--tools/pbfile_tool/convert_ckpt_to_pb.py80
-rwxr-xr-xtools/pbfile_tool/pb_info.py158
-rw-r--r--tools/pbfile_tool/readme.md17
-rw-r--r--tools/tensorflow_model_freezer/__init__.py15
-rw-r--r--tools/tensorflow_model_freezer/base_freezer.py201
-rw-r--r--tools/tensorflow_model_freezer/model_freezer_util.py233
-rw-r--r--tools/tensorflow_model_freezer/readme.md20
-rwxr-xr-xtools/tensorflow_model_freezer/sample/DIV_gen.py148
-rwxr-xr-xtools/tensorflow_model_freezer/sample/MUL_gen.py128
-rw-r--r--tools/tensorflow_model_freezer/sample/Operation_gen.py214
-rwxr-xr-xtools/tensorflow_model_freezer/sample/SQUEEZE_gen.py127
-rwxr-xr-xtools/tensorflow_model_freezer/sample/TOPK_gen.py119
-rw-r--r--tools/tensorflow_model_freezer/sample/__init__.py15
-rw-r--r--tools/test_driver/README.md63
-rw-r--r--tools/test_driver/benchmark_op_list.txt11
-rwxr-xr-xtools/test_driver/common.sh34
-rw-r--r--tools/test_driver/neurun_frameworktest_list.txt10
-rwxr-xr-xtools/test_driver/print_to_json.sh35
-rwxr-xr-xtools/test_driver/py/common.py39
-rwxr-xr-xtools/test_driver/py/run_frameworktest.py199
-rwxr-xr-xtools/test_driver/py/run_unittest.py187
-rwxr-xr-xtools/test_driver/py/test_driver.py398
-rwxr-xr-xtools/test_driver/run_benchmark.sh146
-rwxr-xr-xtools/test_driver/run_benchmark_acl.sh113
-rwxr-xr-xtools/test_driver/run_benchmark_op.sh209
-rw-r--r--tools/test_driver/run_benchmark_tflite_model.in1
-rwxr-xr-xtools/test_driver/run_benchmark_tflite_model.sh125
-rwxr-xr-xtools/test_driver/run_frameworktest.sh95
-rwxr-xr-xtools/test_driver/run_unittest.sh109
-rwxr-xr-xtools/test_driver/test_driver.sh372
-rw-r--r--tools/tflite_benchmark/CMakeLists.txt5
-rw-r--r--tools/tflite_benchmark/src/tflite_benchmark.cc231
-rw-r--r--tools/tflite_benchmark_model/.FORMATDENY (renamed from tools/cross/apt_proxy)0
-rw-r--r--tools/tflite_benchmark_model/CMakeLists.txt6
-rw-r--r--tools/tflite_benchmark_model/README.md209
-rw-r--r--tools/tflite_benchmark_model/benchmark_main.cc53
-rw-r--r--tools/tflite_benchmark_model/benchmark_model.cc175
-rw-r--r--tools/tflite_benchmark_model/benchmark_model.h177
-rw-r--r--tools/tflite_benchmark_model/benchmark_params.cc73
-rw-r--r--tools/tflite_benchmark_model/benchmark_params.h118
-rw-r--r--tools/tflite_benchmark_model/benchmark_tflite_model.cc360
-rw-r--r--tools/tflite_benchmark_model/benchmark_tflite_model.h95
-rw-r--r--tools/tflite_benchmark_model/command_line_flags.cc214
-rw-r--r--tools/tflite_benchmark_model/command_line_flags.h141
-rw-r--r--tools/tflite_benchmark_model/logging.h92
-rw-r--r--tools/tflite_benchmark_model/profile_summarizer.cc164
-rw-r--r--tools/tflite_benchmark_model/profile_summarizer.h55
-rw-r--r--tools/tflite_examples/CMakeLists.txt2
-rw-r--r--tools/tflite_examples/src/conv.cpp330
-rw-r--r--tools/tflite_run/CMakeLists.txt26
-rw-r--r--tools/tflite_run/README.md91
-rw-r--r--tools/tflite_run/src/args.cc125
-rw-r--r--tools/tflite_run/src/args.h55
-rw-r--r--tools/tflite_run/src/bin_image.cc71
-rw-r--r--tools/tflite_run/src/bin_image.h43
-rw-r--r--tools/tflite_run/src/tensor_dumper.cc54
-rw-r--r--tools/tflite_run/src/tensor_dumper.h38
-rw-r--r--tools/tflite_run/src/tensor_loader.cc67
-rw-r--r--tools/tflite_run/src/tensor_loader.h35
-rw-r--r--tools/tflite_run/src/tflite_run.cc253
-rw-r--r--tools/tflite_run/src/tflite_test.cc19
-rw-r--r--tools/tflitefile_tool/README.md81
-rwxr-xr-xtools/tflitefile_tool/model_parser.py110
-rwxr-xr-xtools/tflitefile_tool/operation.py199
-rwxr-xr-xtools/tflitefile_tool/operator_parser.py113
-rwxr-xr-xtools/tflitefile_tool/operator_wrapping.py120
-rwxr-xr-xtools/tflitefile_tool/perf_predictor.py15
-rwxr-xr-xtools/tflitefile_tool/select_operator.py825
-rwxr-xr-xtools/tflitefile_tool/tensor_wrapping.py54
-rw-r--r--tools/tflitefile_tool/tflite/ActivationFunctionType.py12
-rw-r--r--tools/tflitefile_tool/tflite/AddOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/ArgMaxOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/ArgMinOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/BatchToSpaceNDOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/BidirectionalSequenceRNNOptions.py51
-rw-r--r--tools/tflitefile_tool/tflite/Buffer.py61
-rw-r--r--tools/tflitefile_tool/tflite/BuiltinOperator.py86
-rw-r--r--tools/tflitefile_tool/tflite/BuiltinOptions.py65
-rw-r--r--tools/tflitefile_tool/tflite/CallOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/CastOptions.py50
-rw-r--r--tools/tflitefile_tool/tflite/CombinerType.py9
-rw-r--r--tools/tflitefile_tool/tflite/ConcatEmbeddingsOptions.py105
-rw-r--r--tools/tflitefile_tool/tflite/ConcatenationOptions.py50
-rw-r--r--tools/tflitefile_tool/tflite/Conv2DOptions.py94
-rw-r--r--tools/tflitefile_tool/tflite/CustomOptionsFormat.py7
-rw-r--r--tools/tflitefile_tool/tflite/DepthwiseConv2DOptions.py83
-rw-r--r--tools/tflitefile_tool/tflite/DequantizeOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/DivOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/EmbeddingLookupSparseOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/EqualOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/ExpOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/ExpandDimsOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/FakeQuantOptions.py72
-rw-r--r--tools/tflitefile_tool/tflite/FullyConnectedOptions.py50
-rw-r--r--tools/tflitefile_tool/tflite/FullyConnectedOptionsWeightsFormat.py8
-rw-r--r--tools/tflitefile_tool/tflite/GatherOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/GreaterEqualOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/GreaterOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/L2NormOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/LSHProjectionOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/LSHProjectionType.py9
-rw-r--r--tools/tflitefile_tool/tflite/LSTMKernelType.py8
-rw-r--r--tools/tflitefile_tool/tflite/LSTMOptions.py72
-rw-r--r--tools/tflitefile_tool/tflite/LessEqualOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/LessOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/LocalResponseNormalizationOptions.py72
-rw-r--r--tools/tflitefile_tool/tflite/LogSoftmaxOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/MaximumMinimumOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/MeanOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/Model.py171
-rw-r--r--tools/tflitefile_tool/tflite/MulOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/NegOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/NotEqualOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/Operator.py208
-rw-r--r--tools/tflitefile_tool/tflite/OperatorCode.py62
-rw-r--r--tools/tflitefile_tool/tflite/PadOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/PadV2Options.py28
-rw-r--r--tools/tflitefile_tool/tflite/Padding.py8
-rw-r--r--tools/tflitefile_tool/tflite/Pool2DOptions.py94
-rw-r--r--tools/tflitefile_tool/tflite/PowOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/QuantizationParameters.py160
-rw-r--r--tools/tflitefile_tool/tflite/RNNOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/ReducerOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/ReshapeOptions.py61
-rw-r--r--tools/tflitefile_tool/tflite/ResizeBilinearOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/SVDFOptions.py50
-rw-r--r--tools/tflitefile_tool/tflite/SelectOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/SequenceRNNOptions.py50
-rw-r--r--tools/tflitefile_tool/tflite/ShapeOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/SkipGramOptions.py61
-rw-r--r--tools/tflitefile_tool/tflite/SliceOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/SoftmaxOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/SpaceToBatchNDOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/SpaceToDepthOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/SparseToDenseOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/SplitOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/SqueezeOptions.py61
-rw-r--r--tools/tflitefile_tool/tflite/StridedSliceOptions.py83
-rw-r--r--tools/tflitefile_tool/tflite/SubGraph.py164
-rw-r--r--tools/tflitefile_tool/tflite/SubOptions.py39
-rw-r--r--tools/tflitefile_tool/tflite/Tensor.py122
-rw-r--r--tools/tflitefile_tool/tflite/TensorType.py15
-rw-r--r--tools/tflitefile_tool/tflite/TileOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/TopKV2Options.py28
-rw-r--r--tools/tflitefile_tool/tflite/TransposeConvOptions.py61
-rw-r--r--tools/tflitefile_tool/tflite/TransposeOptions.py28
-rw-r--r--tools/tflitefile_tool/tflite/__init__.py0
285 files changed, 22685 insertions, 269 deletions
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index 9b82628cb..f57262f29 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -1 +1,9 @@
+add_subdirectory(tflite_run)
+add_subdirectory(tflite_benchmark)
+if (BUILD_TFLITE_BENCHMARK_MODEL)
+ add_subdirectory(tflite_benchmark_model)
+endif()
+add_subdirectory(tflite_examples)
add_subdirectory(nnapi_test)
+add_subdirectory(nnapi_quickcheck)
+add_subdirectory(opencl_tool)
diff --git a/tools/cross/build_android_rootfs.sh b/tools/cross/build_android_rootfs.sh
index 3baea2e2c..d0b4afd7b 100755
--- a/tools/cross/build_android_rootfs.sh
+++ b/tools/cross/build_android_rootfs.sh
@@ -7,10 +7,11 @@ set -x
usage()
{
- echo "Usage: $0 [BuildArch] [NDKVersion] [APILevel]"
+ echo "Usage: $0 [BuildArch] [NDKVersion] [APILevel] [ACL]"
echo "BuildArch : arm or arm64"
echo "NDKVersion : r16b or higher (Must start with 'r')"
echo "APILevel : 27 or higher"
+ echo "ACL : acl (default), noacl (exclude ACL)"
exit 1
}
@@ -18,6 +19,7 @@ __CrossDir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
__BuildArch=arm64
__NDKVersion=r16b
__APILevel="27"
+__ACL=1
__UnprocessedBuildArgs=
for i in "$@" ; do
@@ -39,24 +41,74 @@ for i in "$@" ; do
27)
__APILevel="27"
;;
+ acl)
+ __ACL=1
+ ;;
+ noacl)
+ __ACL=0
+ ;;
*)
__UnprocessedBuildArgs="$__UnprocessedBuildArgs $i"
;;
esac
done
+__ToolchainDir=${TOOLCHAIN_DIR:-"${__CrossDir}/ndk/${__NDKVersion}"}
__RootfsDir=${ROOTFS_DIR:-"${__CrossDir}/rootfs/${__BuildArch}.android"}
-__TempDir=${TEMP_DIR:-"/tmp"}
NDK_DIR=android-ndk-${__NDKVersion}
NDK_ZIP=${NDK_DIR}-linux-x86_64.zip
+if [[ -e $__RootfsDir ]]; then
+ echo "ERROR: $__RootfsDir already exists"
+ exit 255
+fi
+if [[ -e $__RootfsDir.gnustl ]]; then
+ echo "ERROR: $__RootfsDir.gnustl already exists"
+ exit 255
+fi
+
echo "Downloading Android NDK"
-wget -nv -nc https://dl.google.com/android/repository/$NDK_ZIP -O $__TempDir/$NDK_ZIP
+rm -rf "$__ToolchainDir"
+mkdir -p "$__ToolchainDir"
+wget -nv -nc https://dl.google.com/android/repository/$NDK_ZIP -O $__ToolchainDir/$NDK_ZIP
echo "Unzipping Android NDK"
-unzip -qq -o $__TempDir/$NDK_ZIP -d $__TempDir
+unzip -qq -o $__ToolchainDir/$NDK_ZIP -d $__ToolchainDir
+rm $__ToolchainDir/$NDK_ZIP
+mv $__ToolchainDir/${NDK_DIR} "$__ToolchainDir/ndk"
echo "Generating standalone toolchain and rootfs to $__RootfsDir"
-$__TempDir/$NDK_DIR/build/tools/make-standalone-toolchain.sh --arch=$__BuildArch --platform=android-$__APILevel --install-dir=$__RootfsDir
+$__ToolchainDir/ndk/build/tools/make-standalone-toolchain.sh --arch=$__BuildArch --platform=android-$__APILevel --install-dir=$__RootfsDir
+
+# ACL build from source needs --stl=gnustl
+echo "Generating standalone toolchain and rootfs with to $__RootfsDir.gnustl"
+$__ToolchainDir/ndk/build/tools/make-standalone-toolchain.sh --arch=$__BuildArch --platform=android-$__APILevel --install-dir=$__RootfsDir.gnustl --stl=gnustl
+
+# Install boost
+
+# NOTE This only copies headers so header-only libraries will work
+echo "Installing boost library (HEADER-ONLY)"
+
+BOOST_VERSION=1_67_0
+BOOST_BASENAME=boost_$BOOST_VERSION
+wget -nv -nc https://dl.bintray.com/boostorg/release/1.67.0/source/$BOOST_BASENAME.tar.gz -O $__ToolchainDir/$BOOST_BASENAME.tar.gz
+
+tar xzf $__ToolchainDir/$BOOST_BASENAME.tar.gz -C $__ToolchainDir
+cp -rv $__ToolchainDir/$BOOST_BASENAME/boost $__RootfsDir/sysroot/usr/include
+
+if [[ "$__ACL" == 1 ]]; then
+ echo "Installing arm compute library"
+
+ ACL_VERSION=18.03
+ ACL_BASENAME=arm_compute-v$ACL_VERSION-bin-android
+ wget -nv -nc https://github.com/ARM-software/ComputeLibrary/releases/download/v$ACL_VERSION/$ACL_BASENAME.tar.gz -O $__ToolchainDir/$ACL_BASENAME.tar.gz
+
+ tar xzf $__ToolchainDir/$ACL_BASENAME.tar.gz -C $__ToolchainDir
+ cp -rv $__ToolchainDir/$ACL_BASENAME/arm_compute $__RootfsDir/sysroot/usr/include
+ cp -rv $__ToolchainDir/$ACL_BASENAME/include/* $__RootfsDir/sysroot/usr/include
+ cp -rv $__ToolchainDir/$ACL_BASENAME/support $__RootfsDir/sysroot/usr/include
+ cp -rv $__ToolchainDir/$ACL_BASENAME/util $__RootfsDir/sysroot/usr/include
+ cp -rv $__ToolchainDir/$ACL_BASENAME/lib/android-arm64-v8a-cl/* $__RootfsDir/sysroot/usr/lib # TODO hardcoded path "arm64-v8a"
+fi
diff --git a/tools/cross/build_rootfs.sh b/tools/cross/build_rootfs.sh
index f52ca6338..11e5eff76 100755
--- a/tools/cross/build_rootfs.sh
+++ b/tools/cross/build_rootfs.sh
@@ -3,12 +3,13 @@ set -x
usage()
{
- echo "Usage: $0 [BuildArch] [LinuxCodeName] [--skipunmount] [--skipproxy]"
+ echo "Usage: $0 [BuildArch] [LinuxCodeName] [--setproxy=IP] [--skipunmount]"
echo "BuildArch can be: arm(default), arm64 and armel"
echo "LinuxCodeName - optional, Code name for Linux, can be: xenial(default), trusty"
echo " If BuildArch is armel, this can be tizen(default)"
+ echo "--setproxy=IP - optional, IP is the proxy server IP address or url with portnumber"
+ echo " default no proxy. Example: --setproxy=127.1.2.3:8080"
echo "--skipunmount - optional, will skip the unmount of rootfs folder."
- echo "--skipproxy - optional, will skip the adding proxy information for Seoul R&D Campus."
exit 1
}
@@ -20,13 +21,13 @@ __BuildArch=arm
__UbuntuArch=armhf
__LinuxCodeName=xenial
__SkipUnmount=0
-__SkipProxy=0
-
+__IsProxySet=0
+__Apt=""
# base development support
__UbuntuPackages="build-essential"
# other development supports
-__UbuntuPackages+=" libboost-all-dev"
+__UbuntuPackages+=" libboost-all-dev ocl-icd-opencl-dev"
# symlinks fixer
__UbuntuPackages+=" symlinks"
@@ -72,12 +73,16 @@ for i in "$@" ; do
xenial)
__LinuxCodeName=xenial
;;
+ --setproxy*)
+ proxyip="${i#*=}"
+ __Apt="Acquire::http::proxy \"http://$proxyip/\";\n"
+ __Apt+="Acquire::https::proxy \"http://$proxyip/\";\n"
+ __Apt+="Acquire::ftp::proxy \"ftp://$proxyip/\";"
+ __IsProxySet=1
+ ;;
--skipunmount)
__SkipUnmount=1
;;
- --skipproxy)
- __SkipProxy=1
- ;;
*)
__UnprocessedBuildArgs="$__UnprocessedBuildArgs $i"
;;
@@ -97,9 +102,9 @@ if [ -d "$__RootfsDir" ]; then
rm -rf $__RootfsDir
fi
-if [ $__SkipProxy == 0 ] && [ "$__Tizen" != "tizen" ]; then
+if [ $__IsProxySet == 1 ] && [ "$__Tizen" != "tizen" ]; then
mkdir -p $__RootfsDir/etc/apt/apt.conf.d
- cp $__CrossDir/apt_proxy $__RootfsDir/etc/apt/apt.conf.d/90proxy
+ echo -e "$__Apt" >> $__RootfsDir/etc/apt/apt.conf.d/90proxy
fi
if [[ -n $__LinuxCodeName ]]; then
diff --git a/tools/extract_weights_from_tflite/extract.py b/tools/extract_weights_from_tflite/extract.py
new file mode 100755
index 000000000..afde08c69
--- /dev/null
+++ b/tools/extract_weights_from_tflite/extract.py
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import sys
+import json
+import struct
+
+
+def printUsage(progname):
+ print("%s <.json>" % (progname))
+ print(" This program extracts weight and bias values in TFLite format [N,H,W,C]")
+ print(" to .npy files in ACL format [N,C,H,W]")
+ print(" .npy filenames is set according to the layer's name")
+
+
+if len(sys.argv) < 2:
+ printUsage(sys.argv[0])
+ exit()
+
+filename = sys.argv[1]
+f = open(filename)
+j = json.loads(f.read())
+
+tensors = j['subgraphs'][0]['tensors']
+buffer_name_map = {}
+
+for t in tensors:
+ if 'buffer' in t:
+ if t['buffer'] in buffer_name_map:
+ print 'find conflict!!'
+ print t
+ print buffer_name_map
+ comps = t['name'].split('/')
+ names = []
+ if len(comps) > 1 and comps[0] == comps[1]:
+ names = comps[2:]
+ else:
+ names = comps[1:]
+
+ layername = '_'.join(names)
+
+ shape = t['shape']
+ buffer_name_map[t['buffer']] = {'name': layername, "shape": shape}
+
+for i in range(len(j['buffers'])):
+ b = j['buffers'][i]
+ if 'data' in b:
+ if i not in buffer_name_map:
+ print "buffer %d is not found in buffer_name_map. skip printing the buffer..."
+ continue
+
+ filename = "%s.npy" % (buffer_name_map[i]['name'])
+ shape = buffer_name_map[i]['shape']
+ buf = struct.pack('%sB' % len(b['data']), *b['data'])
+
+ elem_size = 1
+ for s in shape:
+ elem_size *= s
+
+ l = struct.unpack('%sf' % elem_size, buf)
+ n = np.array(l, dtype='f')
+ n = n.reshape(shape)
+ if len(shape) == 4:
+ # [N,H,W,C] -> [N,C,H,W]
+ n = np.rollaxis(n, 3, 1)
+ elif len(shape) == 3:
+ # [H,W,C] -> [C,H,W]
+ n = np.rollaxis(n, 2, 0)
+ elif len(shape) == 1:
+ pass
+ else:
+ print "Undefined length: conversion skipped. shape=", shape
+ #print shape, filename, n.shape
+ np.save(filename, n)
+
+print "Done."
diff --git a/tools/extract_weights_from_tflite/extract_from_tflite.sh b/tools/extract_weights_from_tflite/extract_from_tflite.sh
new file mode 100755
index 000000000..be84f25f3
--- /dev/null
+++ b/tools/extract_weights_from_tflite/extract_from_tflite.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SCRIPT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ROOT_PATH=$SCRIPT_PATH/../..
+FLATC=$ROOT_PATH/Product/out/bin/flatc
+
+if [ ! -e "$1" ]; then
+ echo "file not exists: $1"
+ exit 1
+fi
+
+TFLITE_FILE=$1
+TFLITE_FILENAME=${TFLITE_FILE##*\/}
+TFLITE_JSON=${TFLITE_FILENAME%\.tflite}.json
+
+$FLATC --json --strict-json $ROOT_PATH/externals/tensorflow/tensorflow/contrib/lite/schema/schema.fbs -- $TFLITE_FILE
+$SCRIPT_PATH/extract.py $TFLITE_JSON
diff --git a/tools/extract_weights_from_tflite/print_op.py b/tools/extract_weights_from_tflite/print_op.py
new file mode 100755
index 000000000..16aff9720
--- /dev/null
+++ b/tools/extract_weights_from_tflite/print_op.py
@@ -0,0 +1,58 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import sys
+import json
+import struct
+
+
+def printUsage(progname):
+ print("%s <.json>" % (progname))
+ print(" This program shows TFLite operations with its input/output shapes.")
+
+
+if len(sys.argv) < 2:
+ printUsage(sys.argv[0])
+ exit()
+
+filename = sys.argv[1]
+f = open(filename)
+j = json.loads(f.read())
+
+tensors = j['subgraphs'][0]['tensors']
+operators = j['subgraphs'][0]['operators']
+opcodes = j['operator_codes']
+
+for o in operators:
+ op_name = "Undefined"
+ if 'opcode_index' in o:
+ op = opcodes[o['opcode_index']]
+ if 'custom_code' in op:
+ op_name = op['custom_code']
+ elif 'builtin_code' in op:
+ op_name = op['builtin_code']
+ elif 'builtin_options_type' in o:
+ # if we cannot find opcode_index, print option type instead.
+ op_name = o['builtin_options_type']
+ print "Layer:", op_name
+
+ print " Input shapes ---"
+ for inp in o['inputs']:
+ print " ", tensors[inp]['shape']
+ print " Output shapes ---"
+ for outp in o['outputs']:
+ print " ", tensors[outp]['shape']
diff --git a/tools/image_importer/README.md b/tools/image_importer/README.md
new file mode 100644
index 000000000..abc088fcc
--- /dev/null
+++ b/tools/image_importer/README.md
@@ -0,0 +1,15 @@
+# A simple image generator
+
+## How to execute:
+`$ ./imagegen.py`
+Two output images are generated: `image.bin` and `image.ppm`.
+
+## Changing the size of the output image
+Change variable `image_size` at the beginning of `imagegen.py`.
+
+## Changing output image format
+If you want to output another image format, you can use as below:
+`im.save("image.jpg")`
+
+# An image importer
+Need description.
diff --git a/tools/image_importer/image_importer.py b/tools/image_importer/image_importer.py
new file mode 100755
index 000000000..77508e1b6
--- /dev/null
+++ b/tools/image_importer/image_importer.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from PIL import Image
+import sys
+import struct
+
+if (len(sys.argv) < 3):
+ print("Usage: %s <input image file> <output bin file>" % (sys.argv[0]))
+ exit(0)
+
+img = Image.open(sys.argv[1])
+outfile = sys.argv[2]
+
+print "Image format = ", img.bits, img.size, img.format
+
+with open(outfile, 'wb') as f:
+ f.write(img.tobytes())
+
+print "Done."
diff --git a/tools/image_importer/imagegen.py b/tools/image_importer/imagegen.py
new file mode 100755
index 000000000..3c7af0d7f
--- /dev/null
+++ b/tools/image_importer/imagegen.py
@@ -0,0 +1,40 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from PIL import Image
+import numpy as np
+
+image_size = {
+ "H": 10,
+ "W": 10,
+ "C": 3 # C is fixed as 3 for R,G,B channels
+}
+
+rgb = np.zeros([image_size['H'], image_size['W'], image_size["C"]], dtype=np.uint8)
+for y in range(image_size["H"]):
+ for x in range(image_size["W"]):
+ for c in range(image_size["C"]):
+ rgb[y][x][c] = 255 #value range = [0~255]
+
+im = Image.fromarray(rgb)
+im.save("image.ppm")
+
+# image can be saved as .jpg or .png
+# im.save("image.jpg")
+# im.save("image.png")
+
+with open("image.bin", "wb") as f:
+ f.write(im.tobytes())
diff --git a/tools/modelgen/CONV_2D.template.json b/tools/modelgen/CONV_2D.template.json
new file mode 100644
index 000000000..34ce09f35
--- /dev/null
+++ b/tools/modelgen/CONV_2D.template.json
@@ -0,0 +1,102 @@
+{
+ "version":3,
+ "operator_codes": [
+ {
+ "builtin_code": "CONV_2D"
+ }
+ ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [
+ 1,
+ 3,
+ 3,
+ 3
+ ],
+ "buffer": 0,
+ "name": "input",
+ "quantization": {
+ "min": [
+ 0.0
+ ],
+ "max": [
+ 255.0
+ ]
+ }
+ },
+ {
+ "shape": [
+ 1,
+ 3,
+ 3,
+ 3
+ ],
+ "buffer": 1,
+ "name": "weights",
+ "quantization": {
+ }
+ },
+ {
+ "shape": [
+ 1
+ ],
+ "buffer": 2,
+ "name": "convolution_bias",
+ "quantization": {
+ }
+ },
+ {
+ "shape": [
+ 1,
+ 3,
+ 3,
+ 1
+ ],
+ "buffer": 3,
+ "name": "output",
+ "quantization": {
+ }
+ }
+ ],
+ "inputs": [
+ 0
+ ],
+ "outputs": [
+ 3
+ ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [
+ 0,
+ 1,
+ 2
+ ],
+ "outputs": [
+ 3
+ ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "SAME",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "RELU"
+ }
+ }
+ ]
+ }
+ ],
+ "description": "TOCO Converted.",
+ "buffers": [
+ {},
+ {
+ "data": []
+ },
+ {
+ "data": []
+ },
+ {}
+ ]
+}
diff --git a/tools/modelgen/modelgen.py b/tools/modelgen/modelgen.py
new file mode 100755
index 000000000..112a1e8a1
--- /dev/null
+++ b/tools/modelgen/modelgen.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import numpy as np
+import os
+import struct
+
+dir_path = os.path.dirname(os.path.realpath(__file__))
+
+builtin_ops = [
+ "CONV_2D",
+]
+
+# N,H,W,C
+input_shape = [1, 6, 6, 3]
+kernel_shape = [1, 3, 3, 3]
+
+# load template file.
+with open(dir_path + "/CONV_2D.template.json", 'r') as f:
+ graph = json.loads(f.read())
+ f.close()
+
+tensors = graph['subgraphs'][0]['tensors']
+buffers = graph['buffers']
+
+buffer_map = {}
+
+# shape setup
+for t in tensors:
+ if t['name'] == 'input':
+ t['shape'] = input_shape
+ elif t['name'] == 'weights':
+ t['shape'] = kernel_shape
+ elif t['name'] == 'convolution_bias':
+ # bias size = N of weight
+ t['shape'] = [kernel_shape[0]]
+ elif t['name'] == 'output':
+ # just for now, the same padding algorithm.
+ # stride = 1
+ t['shape'][0] = 1 # N
+ t['shape'][1] = input_shape[1] # H
+ t['shape'][2] = input_shape[2] # W
+ t['shape'][3] = kernel_shape[0] # C
+
+ buffer_map[t['buffer']] = {'name': t['name'], 'shape': t['shape']}
+
+# buffer setup
+for i in range(len(buffers)):
+ if buffer_map[i]['name'] == 'weights':
+ shape = buffer_map[i]['shape']
+
+ weight = np.ones(shape)
+ n = shape[0]
+ h = shape[1]
+ w = shape[2]
+ c = shape[3]
+ for nn in range(n):
+ for hh in range(h):
+ for ww in range(w):
+ for cc in range(c):
+ if cc == 0:
+ weight[nn][hh][ww][cc] = 1.0
+ else:
+ weight[nn][hh][ww][cc] = 0.0
+
+ weight_list = weight.flatten()
+ weight_bytes = struct.pack('%sf' % (len(weight_list)), *weight_list)
+ weight_uints = struct.unpack('%sB' % (len(weight_list) * 4), weight_bytes)
+
+ buffers[i]['data'] = list(weight_uints)
+
+ elif buffer_map[i]['name'] == 'convolution_bias':
+ # weight of N
+ shape = buffer_map[i]['shape']
+
+ bias = np.zeros(shape)
+ bias_list = bias.flatten()
+ bias_bytes = struct.pack('%sf' % (len(bias_list)), *bias_list)
+ bias_uints = struct.unpack('%sB' % (len(bias_list) * 4), bias_bytes)
+
+ buffers[i]['data'] = list(bias_uints)
+
+with open('model.json', 'w') as f:
+ f.write(json.dumps(graph, indent=2))
diff --git a/tools/modelgen/modelgen.sh b/tools/modelgen/modelgen.sh
new file mode 100755
index 000000000..563240da9
--- /dev/null
+++ b/tools/modelgen/modelgen.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SCRIPT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ROOT_PATH=$SCRIPT_PATH/../..
+FLATC=$ROOT_PATH/Product/out/bin/flatc
+
+if [ ! -e "$1" ]; then
+ echo "file not exists: $1"
+ exit 1
+fi
+
+JSON_FILE=$1
+JSON_FILENAME=${TFLITE_FILE##*\/}
+TFLITE_FILENAME=${TFLITE_FILENAME%\.json}.tflite
+
+$FLATC -b $ROOT_PATH/externals/tensorflow/tensorflow/contrib/lite/schema/schema.fbs $JSON_FILE
+
diff --git a/tools/nnapi_quickcheck/CMakeLists.txt b/tools/nnapi_quickcheck/CMakeLists.txt
new file mode 100644
index 000000000..9dd7f5b3b
--- /dev/null
+++ b/tools/nnapi_quickcheck/CMakeLists.txt
@@ -0,0 +1,82 @@
+if(NOT BUILD_NNAPI_QUICKCHECK)
+ return()
+endif(NOT BUILD_NNAPI_QUICKCHECK)
+
+file(GLOB_RECURSE NNAPI_QUICKCHECK_LIB_SOURCES "lib/*.cpp")
+file(GLOB_RECURSE NNAPI_QUICKCHECK_LIB_TESTS "lib/*.test.cpp")
+list(REMOVE_ITEM NNAPI_QUICKCHECK_LIB_SOURCES ${NNAPI_QUICKCHECK_LIB_TESTS})
+
+add_library(nnapi_quickcheck_common ${NNAPI_QUICKCHECK_LIB_SOURCES})
+target_include_directories(nnapi_quickcheck_common PUBLIC "inc")
+target_link_libraries(nnapi_quickcheck_common nnfw_util)
+target_link_libraries(nnapi_quickcheck_common nnfw_support_tflite)
+
+add_executable(nnapi_quickcheck_lib_env_test "lib/env.test.cpp")
+target_link_libraries(nnapi_quickcheck_lib_env_test nnapi_quickcheck_common)
+
+function(add_nnapi_quickcheck NAME)
+ add_executable(nnapi_quickcheck_${NAME} "tests/${NAME}.cpp")
+ nnfw_find_package(GTest)
+ target_link_libraries(nnapi_quickcheck_${NAME} gtest gtest_main pthread)
+ target_link_libraries(nnapi_quickcheck_${NAME} nnapi_quickcheck_common)
+endfunction(add_nnapi_quickcheck)
+
+add_nnapi_quickcheck(add_1)
+add_nnapi_quickcheck(add_2)
+add_nnapi_quickcheck(add_3)
+add_nnapi_quickcheck(add_4)
+add_nnapi_quickcheck(add_5)
+add_nnapi_quickcheck(add_6)
+add_nnapi_quickcheck(add_7)
+add_nnapi_quickcheck(add_8)
+add_nnapi_quickcheck(add_9)
+add_nnapi_quickcheck(add_quan_1)
+add_nnapi_quickcheck(div_1)
+add_nnapi_quickcheck(div_2)
+add_nnapi_quickcheck(sub_1)
+add_nnapi_quickcheck(sub_2)
+add_nnapi_quickcheck(sub_3)
+add_nnapi_quickcheck(sub_4)
+add_nnapi_quickcheck(sub_5)
+add_nnapi_quickcheck(sub_6)
+add_nnapi_quickcheck(mul_1)
+add_nnapi_quickcheck(mul_2)
+add_nnapi_quickcheck(mul_quan_1)
+add_nnapi_quickcheck(relu_1)
+add_nnapi_quickcheck(relu_quan_1)
+add_nnapi_quickcheck(relu_2)
+add_nnapi_quickcheck(relu_3)
+add_nnapi_quickcheck(relu6_1)
+add_nnapi_quickcheck(relu6_quan_1)
+add_nnapi_quickcheck(relu1_1)
+add_nnapi_quickcheck(conv_1)
+add_nnapi_quickcheck(conv_quan_1)
+add_nnapi_quickcheck(dconv_1)
+add_nnapi_quickcheck(dconv_quan_1)
+add_nnapi_quickcheck(max_pool_1)
+add_nnapi_quickcheck(max_pool_quan_1)
+add_nnapi_quickcheck(avg_pool_1)
+add_nnapi_quickcheck(avg_pool_quan_1)
+add_nnapi_quickcheck(concat_1)
+add_nnapi_quickcheck(concat_quan_1)
+add_nnapi_quickcheck(reshape_1)
+add_nnapi_quickcheck(reshape_quan_1)
+add_nnapi_quickcheck(fully_connected_1)
+add_nnapi_quickcheck(fully_connected_quan_1)
+add_nnapi_quickcheck(softmax_1)
+add_nnapi_quickcheck(softmax_2)
+add_nnapi_quickcheck(softmax_quan_1)
+add_nnapi_quickcheck(resize_bilinear_1)
+add_nnapi_quickcheck(topk_v2_1)
+add_nnapi_quickcheck(cast_1)
+add_nnapi_quickcheck(cast_q_to_f_1)
+add_nnapi_quickcheck(cast_2)
+add_nnapi_quickcheck(gather_1)
+add_nnapi_quickcheck(gather_2)
+add_nnapi_quickcheck(dequantize_1)
+add_nnapi_quickcheck(tanh_1)
+add_nnapi_quickcheck(logistic_quan_1)
+add_nnapi_quickcheck(split_1)
+add_nnapi_quickcheck(split_2)
+add_nnapi_quickcheck(split_3)
+add_nnapi_quickcheck(split_4)
diff --git a/tools/nnapi_quickcheck/inc/env.h b/tools/nnapi_quickcheck/inc/env.h
new file mode 100644
index 000000000..c2efcebc9
--- /dev/null
+++ b/tools/nnapi_quickcheck/inc/env.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENV_UTILS_H__
+#define __ENV_UTILS_H__
+
+#include <string>
+
+#include <cstdint>
+
+class IntVar
+{
+public:
+ IntVar(const std::string &name, int32_t value);
+
+public:
+ int32_t operator()(void) const { return _value; }
+
+private:
+ int32_t _value;
+};
+
+class FloatVar
+{
+public:
+ FloatVar(const std::string &name, float value);
+
+public:
+ float operator()(void) const { return _value; }
+
+private:
+ float _value;
+};
+
+class StrVar
+{
+public:
+ StrVar(const std::string &name, const std::string &value);
+
+public:
+ const std::string &operator()(void) const { return _value; }
+
+private:
+ std::string _value;
+};
+
+#endif // __ENV_UTILS_H__
diff --git a/tools/nnapi_quickcheck/inc/memory.h b/tools/nnapi_quickcheck/inc/memory.h
new file mode 100644
index 000000000..3f1bca8a4
--- /dev/null
+++ b/tools/nnapi_quickcheck/inc/memory.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEMORY_H__
+#define __MEMORY_H__
+
+#include <cstdlib>
+
+template <typename T> inline T *make_alloc(void)
+{
+ auto ptr = malloc(sizeof(T));
+
+ if (ptr == nullptr)
+ {
+ throw std::bad_alloc{};
+ }
+
+ return reinterpret_cast<T *>(ptr);
+}
+
+#endif // __MEMORY_H__
diff --git a/tools/nnapi_quickcheck/lib/env.cpp b/tools/nnapi_quickcheck/lib/env.cpp
new file mode 100644
index 000000000..758516752
--- /dev/null
+++ b/tools/nnapi_quickcheck/lib/env.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "env.h"
+
+#include "util/environment.h"
+
+//
+// Integer variable
+//
+IntVar::IntVar(const std::string &name, int32_t value) : _value{value}
+{
+ nnfw::util::env::IntAccessor{name}.access(_value);
+}
+
+//
+// Float variable
+//
+FloatVar::FloatVar(const std::string &name, float value) : _value{value}
+{
+ nnfw::util::env::FloatAccessor{name}.access(_value);
+}
+
+//
+// String variable
+//
+#include <cstdlib>
+
+StrVar::StrVar(const std::string &name, const std::string &value) : _value{value}
+{
+ auto env = std::getenv(name.c_str());
+
+ if (env)
+ {
+ _value = std::string{env};
+ }
+}
diff --git a/tools/nnapi_quickcheck/lib/env.test.cpp b/tools/nnapi_quickcheck/lib/env.test.cpp
new file mode 100644
index 000000000..dd9ac8be5
--- /dev/null
+++ b/tools/nnapi_quickcheck/lib/env.test.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "env.h"
+
+#include <string>
+
+#include <cstdlib>
+#include <cassert>
+
+inline void ensure(int err) { assert(err == 0); }
+
+int main(int argc, char **argv)
+{
+ const std::string key{"TEST"};
+ const int num{3};
+
+ const auto str = std::to_string(num);
+
+ ensure(unsetenv(key.c_str()));
+ ensure(setenv(key.c_str(), str.c_str(), 0));
+
+ int value = 0;
+
+ assert(value != num);
+
+ IntVar buffer(key, value);
+
+ assert(buffer() == num);
+
+ return 0;
+}
diff --git a/tools/nnapi_quickcheck/tests/add_1.cpp b/tools/nnapi_quickcheck/tests/add_1.cpp
new file mode 100644
index 000000000..52aa2afa0
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_1.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_1.lst b/tools/nnapi_quickcheck/tests/add_1.lst
new file mode 100644
index 000000000..fa17caebb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_1.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 16)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/add_2.cpp b/tools/nnapi_quickcheck/tests/add_2.cpp
new file mode 100644
index 000000000..9b5b19c06
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_2.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_2.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure left data
+ const uint32_t left_size = LEFT_N * LEFT_C * LEFT_H * LEFT_W;
+ float left_data[left_size] = {
+ 0.0f,
+ };
+
+ // Fill left data with random data
+ {
+ std::normal_distribution<float> left_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < left_size; ++off)
+ {
+ left_data[off++] = left_dist(random);
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization,
+ reinterpret_cast<const char *>(left_data),
+ left_size * sizeof(float));
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read LHS from Tensor #1
+ // - Read RHS from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_2.lst b/tools/nnapi_quickcheck/tests/add_2.lst
new file mode 100644
index 000000000..fa17caebb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_2.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 16)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/add_3.cpp b/tools/nnapi_quickcheck/tests/add_3.cpp
new file mode 100644
index 000000000..e692fe314
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_3.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/TensorShapeUtils.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_3, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+#define STR_VALUE(NAME, VALUE) StrVar NAME##_Value(#NAME, VALUE);
+#include "add_3.lst"
+#undef STR_VALUE
+
+ const auto LHS_SHAPE = nnfw::util::tensor::Shape::from(LHS_SHAPE_Value());
+ const auto RHS_SHAPE = nnfw::util::tensor::Shape::from(RHS_SHAPE_Value());
+ const auto OUT_SHAPE = nnfw::support::tflite::broadcast(LHS_SHAPE, RHS_SHAPE);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LHS_SHAPE);
+ PRINT_VALUE(RHS_SHAPE);
+ PRINT_VALUE(OUT_SHAPE);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ using nnfw::support::tflite::as_dims;
+
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ as_dims(OUT_SHAPE), quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ as_dims(LHS_SHAPE), quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ as_dims(RHS_SHAPE), quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = 0;
+ param.tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(param.verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(param.tolerance);
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_3.lst b/tools/nnapi_quickcheck/tests/add_3.lst
new file mode 100644
index 000000000..1981db4e1
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_3.lst
@@ -0,0 +1,6 @@
+#ifndef STR_VALUE
+#error "STR_VALUE should be defined"
+#endif // STR_VALUE
+
+STR_VALUE(LHS_SHAPE, "1,3,16,16")
+STR_VALUE(RHS_SHAPE, "1,3,16,16")
diff --git a/tools/nnapi_quickcheck/tests/add_4.cpp b/tools/nnapi_quickcheck/tests/add_4.cpp
new file mode 100644
index 000000000..e519f1731
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_4.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_4, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_4.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_4.lst b/tools/nnapi_quickcheck/tests/add_4.lst
new file mode 100644
index 000000000..6b289007f
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_4.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 2)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 8)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 2)
+INT_VALUE(RIGHT_H, 1)
+INT_VALUE(RIGHT_W, 8)
diff --git a/tools/nnapi_quickcheck/tests/add_5.cpp b/tools/nnapi_quickcheck/tests/add_5.cpp
new file mode 100644
index 000000000..cacb5e42d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_5.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_5, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_5.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT = RIGHT_Value();
+
+ const int32_t OFM_N = LEFT_N;
+ const int32_t OFM_C = LEFT_C;
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT} /* dims */, quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_5.lst b/tools/nnapi_quickcheck/tests/add_5.lst
new file mode 100644
index 000000000..eb316b6ad
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_5.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 8)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT, 1)
diff --git a/tools/nnapi_quickcheck/tests/add_6.cpp b/tools/nnapi_quickcheck/tests/add_6.cpp
new file mode 100644
index 000000000..245b7ad39
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_6.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_6, simple_test)
+{
+ int verbose = 1;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_6.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT = RIGHT_Value();
+
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_H, LEFT_W} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT} /* dims */, quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_6.lst b/tools/nnapi_quickcheck/tests/add_6.lst
new file mode 100644
index 000000000..75db4c8d0
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_6.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_H, 8)
+INT_VALUE(LEFT_W, 2)
+
+INT_VALUE(RIGHT, 1)
diff --git a/tools/nnapi_quickcheck/tests/add_7.cpp b/tools/nnapi_quickcheck/tests/add_7.cpp
new file mode 100644
index 000000000..43d285c72
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_7.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_7, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_7.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_C = LEFT_C;
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_H, RIGHT_W, RIGHT_C} /* dims */, quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_7.lst b/tools/nnapi_quickcheck/tests/add_7.lst
new file mode 100644
index 000000000..1dc8b6147
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_7.lst
@@ -0,0 +1,11 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 8)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 8)
+INT_VALUE(RIGHT_W, 1)
diff --git a/tools/nnapi_quickcheck/tests/add_8.cpp b/tools/nnapi_quickcheck/tests/add_8.cpp
new file mode 100644
index 000000000..ec11c3969
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_8.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_8, simple_test)
+{
+ int verbose = 1;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_8.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure left data
+ const uint32_t left_size = LEFT_N * LEFT_C * LEFT_H * LEFT_W;
+ const uint32_t right_size = RIGHT_N * RIGHT_C * RIGHT_H * RIGHT_W;
+ float left_data[left_size] = {
+ 0.0f,
+ };
+ float right_data[right_size] = {
+ 0.0f,
+ };
+
+ // Fill left data with random data
+ {
+ std::normal_distribution<float> left_dist(-1.0f, +1.0f);
+ int value = 10;
+ for (uint32_t off = 0; off < left_size; ++off)
+ {
+ left_data[off] = value;
+ std::cout << left_data[off] << std::endl;
+ }
+ value = 1;
+ for (uint32_t off = 0; off < right_size; ++off)
+ {
+ right_data[off] = value++;
+ std::cout << right_data[off] << std::endl;
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization,
+ reinterpret_cast<const char *>(left_data),
+ left_size * sizeof(float));
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteFloat32 /* type */, "right" /* name */, {RIGHT_C} /* dims */, quantization,
+ //{RIGHT_W, RIGHT_C} /* dims */, quantization,
+ reinterpret_cast<const char *>(right_data), right_size * sizeof(float));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read LHS from Tensor #1
+ // - Read RHS from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_8.lst b/tools/nnapi_quickcheck/tests/add_8.lst
new file mode 100644
index 000000000..3119c7f65
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_8.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_H, 3)
+INT_VALUE(LEFT_W, 2)
+INT_VALUE(LEFT_C, 4)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_H, 1)
+INT_VALUE(RIGHT_W, 1)
+INT_VALUE(RIGHT_C, 4)
diff --git a/tools/nnapi_quickcheck/tests/add_9.cpp b/tools/nnapi_quickcheck/tests/add_9.cpp
new file mode 100644
index 000000000..f3cf02875
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_9.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_9, simple_test)
+{
+ int verbose = 1;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_9.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_VALUE(LEFT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+ PRINT_VALUE(OFM_C);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure left data
+ const uint32_t left_size = LEFT_N * LEFT_C * LEFT_H * LEFT_W;
+ const uint32_t right_size = RIGHT_N * RIGHT_C * RIGHT_H * RIGHT_W;
+ float left_data[left_size] = {
+ 0.0f,
+ };
+ float right_data[right_size] = {
+ 0.0f,
+ };
+
+ // Fill left data with random data
+ {
+ std::normal_distribution<float> left_dist(-1.0f, +1.0f);
+ float value = 10.0f;
+ for (uint32_t off = 0; off < left_size; ++off)
+ {
+ left_data[off] = value;
+ }
+ value = 1.0f;
+ for (uint32_t off = 0; off < right_size; ++off)
+ {
+ right_data[off] = value++;
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(
+ 1, kTfLiteFloat32 /* type */, "left" /* name */, {LEFT_W, LEFT_C} /* dims */, quantization,
+ reinterpret_cast<const char *>(left_data), left_size * sizeof(float));
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(right_data),
+ right_size * sizeof(float));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read LHS from Tensor #1
+ // - Read RHS from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_9.lst b/tools/nnapi_quickcheck/tests/add_9.lst
new file mode 100644
index 000000000..52a1f1acc
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_9.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_H, 1)
+INT_VALUE(LEFT_W, 3)
+INT_VALUE(LEFT_C, 4)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_H, 2)
+INT_VALUE(RIGHT_W, 3)
+INT_VALUE(RIGHT_C, 4)
diff --git a/tools/nnapi_quickcheck/tests/add_quan_1.cpp b/tools/nnapi_quickcheck/tests/add_quan_1.cpp
new file mode 100644
index 000000000..45f0ba681
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_quan_1.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_add_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "add_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ quantization.scale = 2.0f;
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ quantization.scale = 1.0f;
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteUInt8 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_ADD, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/add_quan_1.lst b/tools/nnapi_quickcheck/tests/add_quan_1.lst
new file mode 100644
index 000000000..fa17caebb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/add_quan_1.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 16)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/avg_pool_1.cpp b/tools/nnapi_quickcheck/tests/avg_pool_1.cpp
new file mode 100644
index 000000000..c938ed690
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/avg_pool_1.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_avg_pool_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "avg_pool_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = (IFM_H - KER_H) + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) + 1;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Max Pooling Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLitePoolParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = 1;
+ param->stride_height = 1;
+ param->filter_width = KER_W;
+ param->filter_height = KER_H;
+ param->activation = kTfLiteActNone;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_AVERAGE_POOL_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/avg_pool_1.lst b/tools/nnapi_quickcheck/tests/avg_pool_1.lst
new file mode 100644
index 000000000..02d86d470
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/avg_pool_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/avg_pool_quan_1.cpp b/tools/nnapi_quickcheck/tests/avg_pool_quan_1.cpp
new file mode 100644
index 000000000..ba41c030c
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/avg_pool_quan_1.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_avg_pool_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "avg_pool_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = (IFM_H - KER_H) + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) + 1;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Max Pooling Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLitePoolParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = 1;
+ param->stride_height = 1;
+ param->filter_width = KER_W;
+ param->filter_height = KER_H;
+ param->activation = kTfLiteActNone;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_AVERAGE_POOL_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/avg_pool_quan_1.lst b/tools/nnapi_quickcheck/tests/avg_pool_quan_1.lst
new file mode 100644
index 000000000..02d86d470
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/avg_pool_quan_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/cast_1.cpp b/tools/nnapi_quickcheck/tests/cast_1.cpp
new file mode 100644
index 000000000..01d49cd59
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_1.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_cast_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "cast_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Cast Node
+ // Run CAST and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_CAST, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/cast_1.lst b/tools/nnapi_quickcheck/tests/cast_1.lst
new file mode 100644
index 000000000..a0077cb95
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_H, 320)
+INT_VALUE(IFM_W, 320)
diff --git a/tools/nnapi_quickcheck/tests/cast_2.cpp b/tools/nnapi_quickcheck/tests/cast_2.cpp
new file mode 100644
index 000000000..b0032210d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_2.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_cast_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "cast_2.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteInt32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Cast Node
+ // Run CAST and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_CAST, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/cast_2.lst b/tools/nnapi_quickcheck/tests/cast_2.lst
new file mode 100644
index 000000000..a0077cb95
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_2.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_H, 320)
+INT_VALUE(IFM_W, 320)
diff --git a/tools/nnapi_quickcheck/tests/cast_q_to_f_1.cpp b/tools/nnapi_quickcheck/tests/cast_q_to_f_1.cpp
new file mode 100644
index 000000000..763ca940c
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_q_to_f_1.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_cast_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "cast_q_to_f_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Cast Node
+ // Run CAST and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_CAST, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/cast_q_to_f_1.lst b/tools/nnapi_quickcheck/tests/cast_q_to_f_1.lst
new file mode 100644
index 000000000..a0077cb95
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/cast_q_to_f_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_H, 320)
+INT_VALUE(IFM_W, 320)
diff --git a/tools/nnapi_quickcheck/tests/concat_1.cpp b/tools/nnapi_quickcheck/tests/concat_1.cpp
new file mode 100644
index 000000000..77d670fed
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/concat_1.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_concat_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "concat_1.lst"
+#undef INT_VALUE
+
+ // TODO Allow users to set concat axis!
+ const int32_t CONCAT_COUNT = CONCAT_COUNT_Value();
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ int32_t OFM_C = 0;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(CONCAT_COUNT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Randomize IFM depth
+ std::default_random_engine generator(SEED);
+ std::uniform_int_distribution<int> distribution(1, 8);
+
+ std::vector<int32_t> depths;
+
+ for (int32_t n = 0; n < CONCAT_COUNT; ++n)
+ {
+ const auto depth = distribution(generator);
+
+ OFM_C += depth;
+ depths.emplace_back(depth);
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(depths.size() + 1);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM(s)
+ std::vector<int> ifm_indexes;
+
+ for (uint32_t n = 0; n < depths.size(); ++n)
+ {
+ const auto ifm_index = 1 + n;
+ const auto IFM_C = depths.at(n);
+
+ interp.SetTensorParametersReadWrite(ifm_index, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ ifm_indexes.emplace_back(ifm_index);
+ }
+
+ // Add Concat Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteConcatenationParams>();
+
+ param->activation = kTfLiteActNone;
+ param->axis = 3;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters(ifm_indexes, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_CONCATENATION, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs(ifm_indexes);
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/concat_1.lst b/tools/nnapi_quickcheck/tests/concat_1.lst
new file mode 100644
index 000000000..db70d4c8b
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/concat_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(CONCAT_COUNT, 3)
+
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/concat_quan_1.cpp b/tools/nnapi_quickcheck/tests/concat_quan_1.cpp
new file mode 100644
index 000000000..cd522b049
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/concat_quan_1.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_concat_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "concat_quan_1.lst"
+#undef INT_VALUE
+
+ // TODO Allow users to set concat axis!
+ const int32_t CONCAT_COUNT = CONCAT_COUNT_Value();
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ int32_t OFM_C = 0;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(CONCAT_COUNT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Randomize IFM depth
+ std::default_random_engine generator(SEED);
+ std::uniform_int_distribution<int> distribution(1, 8);
+
+ std::vector<int32_t> depths;
+
+ for (int32_t n = 0; n < CONCAT_COUNT; ++n)
+ {
+ const auto depth = distribution(generator);
+
+ OFM_C += depth;
+ depths.emplace_back(depth);
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(depths.size() + 1);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM(s)
+ std::vector<int> ifm_indexes;
+
+ for (uint32_t n = 0; n < depths.size(); ++n)
+ {
+ const auto ifm_index = 1 + n;
+ const auto IFM_C = depths.at(n);
+
+ interp.SetTensorParametersReadWrite(ifm_index, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ ifm_indexes.emplace_back(ifm_index);
+ }
+
+ // Add Concat Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteConcatenationParams>();
+
+ param->activation = kTfLiteActNone;
+ param->axis = 3;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters(ifm_indexes, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_CONCATENATION, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs(ifm_indexes);
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/concat_quan_1.lst b/tools/nnapi_quickcheck/tests/concat_quan_1.lst
new file mode 100644
index 000000000..db70d4c8b
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/concat_quan_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(CONCAT_COUNT, 3)
+
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/conv_1.cpp b/tools/nnapi_quickcheck/tests/conv_1.cpp
new file mode 100644
index 000000000..10046d1ce
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/conv_1.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_conv_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "conv_1.lst"
+#undef INT_VALUE
+
+ const int32_t STRIDE_H = STRIDE_H_Value();
+ const int32_t STRIDE_W = STRIDE_W_Value();
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_N = KER_N_Value();
+ const int32_t KER_C = IFM_C_Value();
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = KER_N;
+ const int32_t OFM_H = (IFM_H - KER_H) / STRIDE_H + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) / STRIDE_W + 1;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(STRIDE_H);
+ PRINT_VALUE(STRIDE_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_N);
+ PRINT_VALUE(KER_C);
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_N * KER_C * KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off++] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_N;
+ float bias_data[bias_size] = {
+ 0.0f,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = bias_dist(random);
+ }
+ }
+
+ // Assumption on this example
+ assert(IFM_C == KER_C);
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(5);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteFloat32 /* type */, "filter" /* name */, {KER_N, KER_H, KER_W, KER_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(float));
+
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteFloat32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(float));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteConvParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = STRIDE_W;
+ param->stride_height = STRIDE_H;
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_CONV_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/conv_1.lst b/tools/nnapi_quickcheck/tests/conv_1.lst
new file mode 100644
index 000000000..c01fc90ee
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/conv_1.lst
@@ -0,0 +1,14 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_N, 1)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(STRIDE_H, 1)
+INT_VALUE(STRIDE_W, 1)
diff --git a/tools/nnapi_quickcheck/tests/conv_quan_1.cpp b/tools/nnapi_quickcheck/tests/conv_quan_1.cpp
new file mode 100644
index 000000000..aebf2333a
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/conv_quan_1.cpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_conv_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "conv_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t STRIDE_H = STRIDE_H_Value();
+ const int32_t STRIDE_W = STRIDE_W_Value();
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_N = KER_N_Value();
+ const int32_t KER_C = IFM_C_Value();
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = KER_N;
+ const int32_t OFM_H = (IFM_H - KER_H) / STRIDE_H + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) / STRIDE_W + 1;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(STRIDE_H);
+ PRINT_VALUE(STRIDE_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_N);
+ PRINT_VALUE(KER_C);
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_N * KER_C * KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off++] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_N;
+ int32_t bias_data[bias_size] = {
+ 0,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = static_cast<int32_t>(bias_dist(random));
+ }
+ }
+
+ // Assumption on this example
+ assert(IFM_C == KER_C);
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(5);
+
+ // Configure OFM
+ float max_scale = (KER_N, KER_C * KER_H * KER_W) *
+ std::numeric_limits<uint8_t>::max(); // * IFM_scale(1.0f) * kernel_scale(1.0f)
+ quantization.scale = max_scale;
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ quantization.scale = 1.0f;
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteUInt8 /* type */, "filter" /* name */, {KER_N, KER_H, KER_W, KER_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(uint8_t));
+
+ quantization.scale *= quantization.scale;
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteInt32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(int32_t));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteConvParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = STRIDE_W;
+ param->stride_height = STRIDE_H;
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_CONV_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/conv_quan_1.lst b/tools/nnapi_quickcheck/tests/conv_quan_1.lst
new file mode 100644
index 000000000..c01fc90ee
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/conv_quan_1.lst
@@ -0,0 +1,14 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_N, 1)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(STRIDE_H, 1)
+INT_VALUE(STRIDE_W, 1)
diff --git a/tools/nnapi_quickcheck/tests/dconv_1.cpp b/tools/nnapi_quickcheck/tests/dconv_1.cpp
new file mode 100644
index 000000000..bd0cacfd0
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dconv_1.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_dconv_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "dconv_1.lst"
+#undef INT_VALUE
+
+ const int32_t STRIDE_H = STRIDE_H_Value();
+ const int32_t STRIDE_W = STRIDE_W_Value();
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_C = KER_C_Value();
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = KER_C;
+ const int32_t OFM_H = (IFM_H - KER_H) / STRIDE_H + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) / STRIDE_W + 1;
+
+ const int32_t MULTIPLIER = MULTIPLIER_Value();
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_C);
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(STRIDE_H);
+ PRINT_VALUE(STRIDE_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(MULTIPLIER);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ assert(MULTIPLIER * IFM_C == KER_C);
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_C * KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_C;
+ float bias_data[bias_size] = {
+ 0.0f,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = bias_dist(random);
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(4);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteFloat32 /* type */, "filter" /* name */, {1, KER_H, KER_W, KER_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(float));
+
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteFloat32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(float));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteDepthwiseConvParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = STRIDE_W;
+ param->stride_height = STRIDE_H;
+ param->depth_multiplier = MULTIPLIER;
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_DEPTHWISE_CONV_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/dconv_1.lst b/tools/nnapi_quickcheck/tests/dconv_1.lst
new file mode 100644
index 000000000..da851ae2d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dconv_1.lst
@@ -0,0 +1,16 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_C, 2)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(MULTIPLIER, 1)
+
+INT_VALUE(STRIDE_H, 1)
+INT_VALUE(STRIDE_W, 1)
diff --git a/tools/nnapi_quickcheck/tests/dconv_quan_1.cpp b/tools/nnapi_quickcheck/tests/dconv_quan_1.cpp
new file mode 100644
index 000000000..43f305f06
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dconv_quan_1.cpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_dconv_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "dconv_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t STRIDE_H = STRIDE_H_Value();
+ const int32_t STRIDE_W = STRIDE_W_Value();
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_C = KER_C_Value();
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = KER_C;
+ const int32_t OFM_H = (IFM_H - KER_H) / STRIDE_H + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) / STRIDE_W + 1;
+
+ const int32_t MULTIPLIER = MULTIPLIER_Value();
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_C);
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(STRIDE_H);
+ PRINT_VALUE(STRIDE_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(MULTIPLIER);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ assert(MULTIPLIER * IFM_C == KER_C);
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_C * KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_C;
+ int32_t bias_data[bias_size] = {
+ 0,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = static_cast<int32_t>(bias_dist(random));
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(4);
+
+ // Configure OFM
+ float max_scale = (1 * KER_C * KER_H * KER_W) *
+ std::numeric_limits<uint8_t>::max(); // * IFM_scale(1.0f) * kernel_scale(1.0f)
+ quantization.scale = max_scale;
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ quantization.scale = 1.0f;
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteUInt8 /* type */, "filter" /* name */, {1, KER_H, KER_W, KER_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(uint8_t));
+
+ quantization.scale *= quantization.scale;
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteInt32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(int32_t));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteDepthwiseConvParams>();
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = STRIDE_W;
+ param->stride_height = STRIDE_H;
+ param->depth_multiplier = MULTIPLIER;
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_DEPTHWISE_CONV_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/dconv_quan_1.lst b/tools/nnapi_quickcheck/tests/dconv_quan_1.lst
new file mode 100644
index 000000000..da851ae2d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dconv_quan_1.lst
@@ -0,0 +1,16 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_C, 2)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(MULTIPLIER, 1)
+
+INT_VALUE(STRIDE_H, 1)
+INT_VALUE(STRIDE_W, 1)
diff --git a/tools/nnapi_quickcheck/tests/dequantize_1.cpp b/tools/nnapi_quickcheck/tests/dequantize_1.cpp
new file mode 100644
index 000000000..fe310a11d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dequantize_1.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_dequantize_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "dequantize_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add DEQUANTIZE Node
+ // Run DEQUANTIZE and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_DEQUANTIZE, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/dequantize_1.lst b/tools/nnapi_quickcheck/tests/dequantize_1.lst
new file mode 100644
index 000000000..a0077cb95
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/dequantize_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_H, 320)
+INT_VALUE(IFM_W, 320)
diff --git a/tools/nnapi_quickcheck/tests/div_1.cpp b/tools/nnapi_quickcheck/tests/div_1.cpp
new file mode 100644
index 000000000..ffa0d6cac
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/div_1.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_div_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "div_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Division Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Div and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_DIV, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/div_1.lst b/tools/nnapi_quickcheck/tests/div_1.lst
new file mode 100644
index 000000000..fa17caebb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/div_1.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 16)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/div_2.cpp b/tools/nnapi_quickcheck/tests/div_2.cpp
new file mode 100644
index 000000000..c836c259a
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/div_2.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_div_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "div_2.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT = RIGHT_Value();
+
+ const int32_t OFM_N = LEFT_N;
+ const int32_t OFM_C = LEFT_C;
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT} /* dims */, quantization);
+
+ // Add Division Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Div and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_DIV, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/div_2.lst b/tools/nnapi_quickcheck/tests/div_2.lst
new file mode 100644
index 000000000..cd36ac199
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/div_2.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT, 1)
diff --git a/tools/nnapi_quickcheck/tests/fully_connected_1.cpp b/tools/nnapi_quickcheck/tests/fully_connected_1.cpp
new file mode 100644
index 000000000..57af253f5
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/fully_connected_1.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+template <typename T> T *make_malloc(void) { return reinterpret_cast<T *>(malloc(sizeof(T))); }
+
+TEST(NNAPI_Quickcheck_fully_connected_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "conv_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_N_Value();
+ const int32_t KER_W = IFM_C_Value() * IFM_H_Value() * IFM_W_Value();
+
+ const int32_t OUT_LEN = KER_H;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUT_LEN);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off++] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_H;
+ float bias_data[bias_size] = {
+ 0.0f,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = bias_dist(random);
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(4);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, KER_H} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteFloat32 /* type */, "filter" /* name */, {KER_H, KER_W} /* dims */, quantization,
+ reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(float));
+
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteFloat32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(float));
+
+ // Add Fully Connected Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_malloc<TfLiteFullyConnectedParams>();
+
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_FULLY_CONNECTED, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/fully_connected_1.lst b/tools/nnapi_quickcheck/tests/fully_connected_1.lst
new file mode 100644
index 000000000..22acb9f7f
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/fully_connected_1.lst
@@ -0,0 +1,9 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_H, 1)
diff --git a/tools/nnapi_quickcheck/tests/fully_connected_quan_1.cpp b/tools/nnapi_quickcheck/tests/fully_connected_quan_1.cpp
new file mode 100644
index 000000000..1cb75fea0
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/fully_connected_quan_1.cpp
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+template <typename T> T *make_malloc(void) { return reinterpret_cast<T *>(malloc(sizeof(T))); }
+
+TEST(NNAPI_Quickcheck_fully_connected_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "fully_connected_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = IFM_C_Value() * IFM_H_Value() * IFM_W_Value();
+
+ const int32_t OUT_LEN = KER_H;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUT_LEN);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure Kernel Data
+ const uint32_t kernel_size = KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data with random data
+ {
+ std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < kernel_size; ++off)
+ {
+ kernel_data[off++] = kernel_dist(random);
+ }
+ }
+
+ // Configure Bias Data
+ const auto bias_size = KER_H;
+ int32_t bias_data[bias_size] = {
+ 0,
+ };
+
+ // Fill bias data with random data
+ {
+ std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
+
+ for (uint32_t off = 0; off < bias_size; ++off)
+ {
+ bias_data[off] = static_cast<int32_t>(bias_dist(random));
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+ quantization.scale = FLOAT_NEAREST_TO_1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(4);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, KER_H} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // NOTE kernel_data & bias_data should live longer than interpreter!
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteUInt8 /* type */, "filter" /* name */, {KER_H, KER_W} /* dims */, quantization,
+ reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(uint8_t));
+
+ interp.SetTensorParametersReadOnly(
+ 3, kTfLiteInt32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), bias_size * sizeof(int32_t));
+
+ // Add Fully Connected Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_malloc<TfLiteFullyConnectedParams>();
+
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_FULLY_CONNECTED, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/fully_connected_quan_1.lst b/tools/nnapi_quickcheck/tests/fully_connected_quan_1.lst
new file mode 100644
index 000000000..22acb9f7f
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/fully_connected_quan_1.lst
@@ -0,0 +1,9 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_H, 1)
diff --git a/tools/nnapi_quickcheck/tests/gather_1.cpp b/tools/nnapi_quickcheck/tests/gather_1.cpp
new file mode 100644
index 000000000..0d5b30eb6
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/gather_1.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_gather_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "gather_1.lst"
+#undef INT_VALUE
+
+ const int32_t INPUT_DATA = INPUT_DATA_Value();
+ const int32_t INDEX_DATA = INDEX_DATA_Value();
+
+ const int32_t OUTPUT_DATA = INDEX_DATA;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(INPUT_DATA);
+ PRINT_VALUE(INDEX_DATA);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUTPUT_DATA);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure INPUT_DATA
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "input" /* name */,
+ {INPUT_DATA} /* dims */, quantization);
+
+ // Configure INDEX_DATA
+ interp.SetTensorParametersReadWrite(1, kTfLiteInt32 /* type */, "index" /* name */,
+ {INDEX_DATA} /* dims */, quantization);
+
+ // Configure OUTPUT_VALUES
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "output_data" /* name */,
+ {OUTPUT_DATA} /* dims */, quantization);
+
+ auto *param = reinterpret_cast<TfLiteGatherParams *>(malloc(sizeof(TfLiteGatherParams)));
+
+ param->axis = 0;
+
+ // Add GATHER Node
+ // Run GATHER and store its result into Tensor #2
+ // - Read input data and index_data from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, {2}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_GATHER, 1));
+
+ // Set Tensor #0 and #1 as Input, and Tensor #2 as Output
+ interp.SetInputs({0, 1});
+ interp.SetOutputs({2});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/gather_1.lst b/tools/nnapi_quickcheck/tests/gather_1.lst
new file mode 100644
index 000000000..923a05677
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/gather_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(INPUT_DATA, 8192)
+INT_VALUE(INDEX_DATA, 300)
diff --git a/tools/nnapi_quickcheck/tests/gather_2.cpp b/tools/nnapi_quickcheck/tests/gather_2.cpp
new file mode 100644
index 000000000..b3cb3c6ef
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/gather_2.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_gather_2, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "gather_2.lst"
+#undef INT_VALUE
+
+ const int32_t INPUT_DATA_H = INPUT_DATA_H_Value();
+ const int32_t INPUT_DATA_W = INPUT_DATA_W_Value();
+ const int32_t INDEX_DATA = INDEX_DATA_Value();
+
+ const int32_t OUTPUT_DATA_H = INPUT_DATA_H;
+ const int32_t OUTPUT_DATA_W = INDEX_DATA;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(INPUT_DATA_H);
+ PRINT_VALUE(INPUT_DATA_W);
+ PRINT_VALUE(INDEX_DATA);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUTPUT_DATA_H);
+ PRINT_VALUE(OUTPUT_DATA_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure INPUT_DATA
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "input" /* name */,
+ {INPUT_DATA_H, INPUT_DATA_W} /* dims */, quantization);
+
+ // Configure INDEX_DATA
+ interp.SetTensorParametersReadWrite(1, kTfLiteInt32 /* type */, "index" /* name */,
+ {INDEX_DATA} /* dims */, quantization);
+
+ // Configure OUTPUT_VALUES
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "output_data" /* name */,
+ {OUTPUT_DATA_H, OUTPUT_DATA_W} /* dims */, quantization);
+
+ auto *param = reinterpret_cast<TfLiteGatherParams *>(malloc(sizeof(TfLiteGatherParams)));
+
+ param->axis = 0;
+
+ // Add GATHER Node
+ // Run GATHER and store its result into Tensor #2
+ // - Read input data and index_data from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, {2}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_GATHER, 1));
+
+ // Set Tensor #0 and #1 as Input, and Tensor #2 as Output
+ interp.SetInputs({0, 1});
+ interp.SetOutputs({2});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/gather_2.lst b/tools/nnapi_quickcheck/tests/gather_2.lst
new file mode 100644
index 000000000..5bf6bd33a
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/gather_2.lst
@@ -0,0 +1,7 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(INPUT_DATA_H, 128192)
+INT_VALUE(INPUT_DATA_W, 4)
+INT_VALUE(INDEX_DATA, 300)
diff --git a/tools/nnapi_quickcheck/tests/logistic_quan_1.cpp b/tools/nnapi_quickcheck/tests/logistic_quan_1.cpp
new file mode 100644
index 000000000..dc6902d66
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/logistic_quan_1.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_logistic_quan_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "logistic_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams in_quantization;
+ in_quantization.scale = 0.5f;
+ in_quantization.zero_point = 0;
+
+ TfLiteQuantizationParams out_quantization;
+ out_quantization.scale = 1.f / 256;
+ out_quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, out_quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, in_quantization);
+
+ // Add Logistic Node
+ // Run Logistic and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_LOGISTIC, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/logistic_quan_1.lst b/tools/nnapi_quickcheck/tests/logistic_quan_1.lst
new file mode 100644
index 000000000..9b3d8ebcf
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/logistic_quan_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 1)
+INT_VALUE(IFM_H, 2)
+INT_VALUE(IFM_W, 2)
diff --git a/tools/nnapi_quickcheck/tests/max_pool_1.cpp b/tools/nnapi_quickcheck/tests/max_pool_1.cpp
new file mode 100644
index 000000000..bb538141d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/max_pool_1.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_max_pool_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "max_pool_1.lst"
+#undef INT_VALUE
+
+ const TfLitePadding PADDING_TYPE = static_cast<TfLitePadding>(PADDING_TYPE_Value());
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = OFM_H_Value();
+ const int32_t OFM_W = OFM_W_Value();
+
+ assert((OFM_H >= (IFM_H - KER_H)));
+ assert((OFM_W >= (IFM_W - KER_W)));
+ assert((kTfLitePaddingSame == PADDING_TYPE) || (kTfLitePaddingValid == PADDING_TYPE));
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(PADDING_TYPE);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Max Pooling Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLitePoolParams>();
+
+ param->padding = PADDING_TYPE;
+ param->stride_width = 1;
+ param->stride_height = 1;
+ param->filter_width = KER_W;
+ param->filter_height = KER_H;
+ param->activation = kTfLiteActNone;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_MAX_POOL_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/max_pool_1.lst b/tools/nnapi_quickcheck/tests/max_pool_1.lst
new file mode 100644
index 000000000..4b5c1304e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/max_pool_1.lst
@@ -0,0 +1,17 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_N, 1)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(OFM_H, 1)
+INT_VALUE(OFM_W, 1)
+
+// Default is kTfLitePaddingValid (= 2)
+INT_VALUE(PADDING_TYPE, 2)
diff --git a/tools/nnapi_quickcheck/tests/max_pool_quan_1.cpp b/tools/nnapi_quickcheck/tests/max_pool_quan_1.cpp
new file mode 100644
index 000000000..5768ddde8
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/max_pool_quan_1.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_max_pool_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "max_pool_quan_1.lst"
+#undef INT_VALUE
+
+ const TfLitePadding PADDING_TYPE = static_cast<TfLitePadding>(PADDING_TYPE_Value());
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t KER_H = KER_H_Value();
+ const int32_t KER_W = KER_W_Value();
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = OFM_H_Value();
+ const int32_t OFM_W = OFM_W_Value();
+
+ assert((OFM_H >= (IFM_H - KER_H)));
+ assert((OFM_W >= (IFM_W - KER_W)));
+ assert((kTfLitePaddingSame == PADDING_TYPE) || (kTfLitePaddingValid == PADDING_TYPE));
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(PADDING_TYPE);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(KER_H);
+ PRINT_VALUE(KER_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Max Pooling Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLitePoolParams>();
+
+ param->padding = PADDING_TYPE;
+ param->stride_width = 1;
+ param->stride_height = 1;
+ param->filter_width = KER_W;
+ param->filter_height = KER_H;
+ param->activation = kTfLiteActNone;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_MAX_POOL_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/max_pool_quan_1.lst b/tools/nnapi_quickcheck/tests/max_pool_quan_1.lst
new file mode 100644
index 000000000..4b5c1304e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/max_pool_quan_1.lst
@@ -0,0 +1,17 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(KER_N, 1)
+INT_VALUE(KER_H, 3)
+INT_VALUE(KER_W, 4)
+
+INT_VALUE(OFM_H, 1)
+INT_VALUE(OFM_W, 1)
+
+// Default is kTfLitePaddingValid (= 2)
+INT_VALUE(PADDING_TYPE, 2)
diff --git a/tools/nnapi_quickcheck/tests/mul_1.cpp b/tools/nnapi_quickcheck/tests/mul_1.cpp
new file mode 100644
index 000000000..3a4ae5c8e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_1.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_mul_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "mul_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_1D = LEFT_1D_Value();
+ const int32_t LEFT_2D = LEFT_2D_Value();
+ const int32_t LEFT_3D = LEFT_3D_Value();
+
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_1D = LEFT_1D_Value();
+ const int32_t OFM_2D = LEFT_2D_Value();
+ const int32_t OFM_3D = LEFT_3D_Value();
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_1D);
+ PRINT_VALUE(LEFT_2D);
+ PRINT_VALUE(LEFT_3D);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_1D);
+ PRINT_VALUE(OFM_2D);
+ PRINT_VALUE(OFM_3D);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_1D, OFM_2D, OFM_3D} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_1D, LEFT_2D, LEFT_3D} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_W} /* dims */, quantization);
+
+ // Add MUL Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run MUL and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_MUL, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+ param.tensor_logging = 1;
+ param.log_path = "report/tensor_mul_1.log";
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/mul_1.lst b/tools/nnapi_quickcheck/tests/mul_1.lst
new file mode 100644
index 000000000..1d42159de
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+// (3, 1, 4)
+INT_VALUE(LEFT_1D, 3)
+INT_VALUE(LEFT_2D, 1)
+INT_VALUE(LEFT_3D, 4)
+
+INT_VALUE(RIGHT_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/mul_2.cpp b/tools/nnapi_quickcheck/tests/mul_2.cpp
new file mode 100644
index 000000000..b117cd602
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_2.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_mul_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "mul_2.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_D1 = LEFT_D1_Value();
+ const int32_t LEFT_D2 = LEFT_D2_Value();
+ const int32_t LEFT_D3 = LEFT_D3_Value();
+
+ const int32_t RIGHT_D1 = RIGHT_D1_Value();
+
+ const int32_t OFM_D1 = LEFT_D1;
+ const int32_t OFM_D2 = LEFT_D2;
+ const int32_t OFM_D3 = LEFT_D3;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_D1);
+ PRINT_VALUE(LEFT_D2);
+ PRINT_VALUE(LEFT_D3);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_D1);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_D1);
+ PRINT_VALUE(OFM_D2);
+ PRINT_VALUE(OFM_D3);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_D1, OFM_D2, OFM_D3} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_D1, LEFT_D2, LEFT_D3} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_D1} /* dims */, quantization);
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Add and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Left from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_MUL, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/mul_2.lst b/tools/nnapi_quickcheck/tests/mul_2.lst
new file mode 100644
index 000000000..da53e7eee
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_2.lst
@@ -0,0 +1,9 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_D1, 5)
+INT_VALUE(LEFT_D2, 3)
+INT_VALUE(LEFT_D3, 12)
+
+INT_VALUE(RIGHT_D1, 12)
diff --git a/tools/nnapi_quickcheck/tests/mul_quan_1.cpp b/tools/nnapi_quickcheck/tests/mul_quan_1.cpp
new file mode 100644
index 000000000..7207a90fb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_quan_1.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_mul_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "mul_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_1D = LEFT_1D_Value();
+ const int32_t LEFT_2D = LEFT_2D_Value();
+ const int32_t LEFT_3D = LEFT_3D_Value();
+
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_1D = LEFT_1D_Value();
+ const int32_t OFM_2D = LEFT_2D_Value();
+ const int32_t OFM_3D = LEFT_3D_Value();
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_1D);
+ PRINT_VALUE(LEFT_2D);
+ PRINT_VALUE(LEFT_3D);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_1D);
+ PRINT_VALUE(OFM_2D);
+ PRINT_VALUE(OFM_3D);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ float max_scale =
+ std::numeric_limits<uint8_t>::max(); // * input1_scale(1.0f) * input2_scale(1.0f)
+ quantization.scale = max_scale;
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_1D, OFM_2D, OFM_3D} /* dims */, quantization);
+
+ // Configure input(s)
+ quantization.scale = 1.0f;
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "left" /* name */,
+ {LEFT_1D, LEFT_2D, LEFT_3D} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteUInt8 /* type */, "right" /* name */,
+ {RIGHT_W} /* dims */, quantization);
+
+ // Add MUL Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run MUL and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_MUL, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/mul_quan_1.lst b/tools/nnapi_quickcheck/tests/mul_quan_1.lst
new file mode 100644
index 000000000..d850f375a
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/mul_quan_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+// (300, 1, 4)
+INT_VALUE(LEFT_1D, 300)
+INT_VALUE(LEFT_2D, 1)
+INT_VALUE(LEFT_3D, 4)
+
+INT_VALUE(RIGHT_W, 4)
diff --git a/tools/nnapi_quickcheck/tests/relu1_1.cpp b/tools/nnapi_quickcheck/tests/relu1_1.cpp
new file mode 100644
index 000000000..aeefe2f06
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu1_1.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+int main(int argc, char **argv)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu1_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU_N1_TO_1, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ return RandomTestRunner{SEED, param}.run(builder);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu1_1.lst b/tools/nnapi_quickcheck/tests/relu1_1.lst
new file mode 100644
index 000000000..4f61845a7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu1_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/relu6_1.cpp b/tools/nnapi_quickcheck/tests/relu6_1.cpp
new file mode 100644
index 000000000..76bbe954c
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu6_1.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_relu6_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu6_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU6, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu6_1.lst b/tools/nnapi_quickcheck/tests/relu6_1.lst
new file mode 100644
index 000000000..4f61845a7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu6_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/relu6_quan_1.cpp b/tools/nnapi_quickcheck/tests/relu6_quan_1.cpp
new file mode 100644
index 000000000..fe849f955
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu6_quan_1.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+int main(int argc, char **argv)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu6_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU6, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ return RandomTestRunner{SEED, param}.run(builder);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu6_quan_1.lst b/tools/nnapi_quickcheck/tests/relu6_quan_1.lst
new file mode 100644
index 000000000..4f61845a7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu6_quan_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/relu_1.cpp b/tools/nnapi_quickcheck/tests/relu_1.cpp
new file mode 100644
index 000000000..f754c3d8e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_1.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_relu_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu_1.lst b/tools/nnapi_quickcheck/tests/relu_1.lst
new file mode 100644
index 000000000..4f61845a7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/relu_2.cpp b/tools/nnapi_quickcheck/tests/relu_2.cpp
new file mode 100644
index 000000000..c08764520
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_2.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_relu_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu_2.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu_2.lst b/tools/nnapi_quickcheck/tests/relu_2.lst
new file mode 100644
index 000000000..343bff819
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_2.lst
@@ -0,0 +1,7 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
+INT_VALUE(IFM_C, 3)
diff --git a/tools/nnapi_quickcheck/tests/relu_3.cpp b/tools/nnapi_quickcheck/tests/relu_3.cpp
new file mode 100644
index 000000000..6c41bc12a
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_3.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_relu_3, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu_3.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu_3.lst b/tools/nnapi_quickcheck/tests/relu_3.lst
new file mode 100644
index 000000000..a3a405c10
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_3.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_N, 1)
diff --git a/tools/nnapi_quickcheck/tests/relu_quan_1.cpp b/tools/nnapi_quickcheck/tests/relu_quan_1.cpp
new file mode 100644
index 000000000..59fe5d254
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_quan_1.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+int main(int argc, char **argv)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "relu_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Add ReLU Node
+ // Run ReLU and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_RELU, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ return RandomTestRunner{SEED, param}.run(builder);
+}
diff --git a/tools/nnapi_quickcheck/tests/relu_quan_1.lst b/tools/nnapi_quickcheck/tests/relu_quan_1.lst
new file mode 100644
index 000000000..4f61845a7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/relu_quan_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 16)
+INT_VALUE(IFM_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/reshape_1.cpp b/tools/nnapi_quickcheck/tests/reshape_1.cpp
new file mode 100644
index 000000000..21d35a5ca
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/reshape_1.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_reshape_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "max_pool_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OUT_L = IFM_C * IFM_H * IFM_W;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUT_L);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t dims[2] = {1, OUT_L};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ // A: This may be necessary, because quantization values(scale, zero_point) of TENSOR_INT32 and
+ // TENSOR_QUANT8_ASYMM are passed on to the runtime.
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OUT_L} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Shape
+ interp.SetTensorParametersReadOnly(2, kTfLiteInt32 /* type */, "shape" /* name */,
+ {2} /* dims */, quantization,
+ reinterpret_cast<const char *>(dims), 2 * sizeof(int32_t));
+
+ // Add Reshape Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteReshapeParams>();
+
+ param->num_dimensions = 2;
+ param->shape[0] = 1;
+ param->shape[1] = OUT_L;
+
+ // Run Reshapeand store its result into Tensor #0
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_RESHAPE, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/reshape_1.lst b/tools/nnapi_quickcheck/tests/reshape_1.lst
new file mode 100644
index 000000000..fcaaff016
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/reshape_1.lst
@@ -0,0 +1,7 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 4)
+INT_VALUE(IFM_W, 8)
diff --git a/tools/nnapi_quickcheck/tests/reshape_quan_1.cpp b/tools/nnapi_quickcheck/tests/reshape_quan_1.cpp
new file mode 100644
index 000000000..7f852fd80
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/reshape_quan_1.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_reshape_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "reshape_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OUT_L = IFM_C * IFM_H * IFM_W;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUT_L);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t dims[2] = {1, OUT_L};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ // A: This may be necessary, because quantization values(scale, zero_point) of TENSOR_INT32 and
+ // TENSOR_QUANT8_ASYMM are passed on to the runtime.
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1 /*N*/, OUT_L} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Shape
+ interp.SetTensorParametersReadOnly(2, kTfLiteInt32 /* type */, "shape" /* name */,
+ {2} /* dims */, quantization,
+ reinterpret_cast<const char *>(dims), 2 * sizeof(int32_t));
+
+ // Add Reshape Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteReshapeParams>();
+
+ param->num_dimensions = 2;
+ param->shape[0] = 1;
+ param->shape[1] = OUT_L;
+
+ // Run Reshapeand store its result into Tensor #0
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_RESHAPE, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/reshape_quan_1.lst b/tools/nnapi_quickcheck/tests/reshape_quan_1.lst
new file mode 100644
index 000000000..fcaaff016
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/reshape_quan_1.lst
@@ -0,0 +1,7 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 4)
+INT_VALUE(IFM_W, 8)
diff --git a/tools/nnapi_quickcheck/tests/resize_bilinear_1.cpp b/tools/nnapi_quickcheck/tests/resize_bilinear_1.cpp
new file mode 100644
index 000000000..37d8ab525
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/resize_bilinear_1.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_resize_bilinear_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "resize_bilinear_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = OFM_H_Value();
+ const int32_t OFM_W = OFM_W_Value();
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ int32_t size_data[2] = {OFM_H, OFM_W};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ // A: This may be necessary, because quantization values(scale, zero_point) of TENSOR_INT32 and
+ // TENSOR_QUANT8_ASYMM are passed on to the runtime.
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Size
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteInt32 /* type */, "size" /* name */, {2} /* dims */, quantization,
+ reinterpret_cast<const char *>(size_data), 2 * sizeof(int32_t));
+
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteResizeBilinearParams>();
+
+ // NOTE What is this?
+ param->align_corners = false;
+
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_RESIZE_BILINEAR, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/resize_bilinear_1.lst b/tools/nnapi_quickcheck/tests/resize_bilinear_1.lst
new file mode 100644
index 000000000..cc3dbd5cc
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/resize_bilinear_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_C, 2)
+INT_VALUE(IFM_H, 3)
+INT_VALUE(IFM_W, 4)
+
+INT_VALUE(OFM_H, 30)
+INT_VALUE(OFM_W, 40)
diff --git a/tools/nnapi_quickcheck/tests/softmax_1.cpp b/tools/nnapi_quickcheck/tests/softmax_1.cpp
new file mode 100644
index 000000000..5e15b6169
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_1.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_softmax_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "softmax_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = 1;
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ const nnfw::util::feature::Shape ifm_shape{IFM_C, IFM_H, IFM_W};
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1, IFM_H * IFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1, IFM_H * IFM_W} /* batch_size, input_size */,
+ quantization);
+
+ // Add Softmax Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteSoftmaxParams>();
+
+ param->beta = 1.0;
+
+ // Run Softmax and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SOFTMAX, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/softmax_1.lst b/tools/nnapi_quickcheck/tests/softmax_1.lst
new file mode 100644
index 000000000..1ef9da075
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 2)
+INT_VALUE(IFM_W, 2)
diff --git a/tools/nnapi_quickcheck/tests/softmax_2.cpp b/tools/nnapi_quickcheck/tests/softmax_2.cpp
new file mode 100644
index 000000000..489016af5
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_2.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_softmax_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define FLOAT_VALUE(NAME, VALUE) FloatVar NAME##_Value(#NAME, VALUE);
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "softmax_2.lst"
+#undef INT_VALUE
+#undef FLOAT_VALUE
+
+ const int32_t IFM_C = 1;
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+ const float BETA = BETA_Value();
+
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_VALUE(BETA);
+ PRINT_NEWLINE();
+
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ const nnfw::util::feature::Shape ifm_shape{IFM_C, IFM_H, IFM_W};
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1, IFM_H * IFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1, IFM_H * IFM_W} /* batch_size, input_size */,
+ quantization);
+
+ // Add Softmax Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteSoftmaxParams>();
+
+ param->beta = BETA;
+
+ // Run Softmax and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SOFTMAX, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/softmax_2.lst b/tools/nnapi_quickcheck/tests/softmax_2.lst
new file mode 100644
index 000000000..1c381bf49
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_2.lst
@@ -0,0 +1,11 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+#ifndef FLOAT_VALUE
+#error "FLOAT_VALUE should be defined"
+#endif // FLOAT_VALUE
+
+INT_VALUE(IFM_H, 2)
+INT_VALUE(IFM_W, 2)
+FLOAT_VALUE(BETA, 0.1)
diff --git a/tools/nnapi_quickcheck/tests/softmax_quan_1.cpp b/tools/nnapi_quickcheck/tests/softmax_quan_1.cpp
new file mode 100644
index 000000000..347262fa6
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_quan_1.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_softmax_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "softmax_quan_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_C = 1;
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ const nnfw::util::feature::Shape ifm_shape{IFM_C, IFM_H, IFM_W};
+
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+ quantization.scale = 1.0f / 256;
+ quantization.zero_point = 0;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure Output Tensor
+ interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
+ {1, IFM_H * IFM_W} /* dims */, quantization);
+
+ // Configure Input Tensor
+ interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
+ {1, IFM_H * IFM_W} /* batch_size, input_size */,
+ quantization);
+
+ // Add Softmax Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteSoftmaxParams>();
+
+ param->beta = 1.0;
+
+ // Run Softmax and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SOFTMAX, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/softmax_quan_1.lst b/tools/nnapi_quickcheck/tests/softmax_quan_1.lst
new file mode 100644
index 000000000..1ef9da075
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/softmax_quan_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 2)
+INT_VALUE(IFM_W, 2)
diff --git a/tools/nnapi_quickcheck/tests/split_1.cpp b/tools/nnapi_quickcheck/tests/split_1.cpp
new file mode 100644
index 000000000..742c5dbed
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_1.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_split_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "split_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+ const int32_t NUM_SPLIT = NUM_SPLIT_Value();
+ const int32_t AXIS = AXIS_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_VALUE(NUM_SPLIT);
+ PRINT_VALUE(AXIS);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+ const int32_t axis[1] = {AXIS};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(NUM_SPLIT + 2);
+
+ // Configure Input Tensor(s)
+ interp.SetTensorParametersReadOnly(0, kTfLiteInt32 /* type */, "axis" /* name */,
+ {1} /* dims */, quantization,
+ reinterpret_cast<const char *>(axis), 1 * sizeof(int32_t));
+
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Output Tensor
+ std::vector<int> ofm_indexes;
+
+ for (uint32_t n = 0; n < NUM_SPLIT; ++n)
+ {
+ const auto ofm_index = 2 + n;
+
+ interp.SetTensorParametersReadWrite(ofm_index, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ ofm_indexes.emplace_back(ofm_index);
+ }
+
+ auto *param = reinterpret_cast<TfLiteSplitParams *>(malloc(sizeof(TfLiteSplitParams)));
+
+ param->num_splits = NUM_SPLIT;
+
+ // Add SPLIT Node
+ // Run SPLIT and store its result into Tensor #0
+ // - Read axis and IFM from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, ofm_indexes, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SPLIT, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #2 ~ #NUM_SPLIT+1 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs(ofm_indexes);
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/split_1.lst b/tools/nnapi_quickcheck/tests/split_1.lst
new file mode 100644
index 000000000..823bf24fa
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_1.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 1)
+INT_VALUE(IFM_H, 5)
+INT_VALUE(IFM_W, 30)
+INT_VALUE(NUM_SPLIT, 5)
+INT_VALUE(AXIS, 1)
diff --git a/tools/nnapi_quickcheck/tests/split_2.cpp b/tools/nnapi_quickcheck/tests/split_2.cpp
new file mode 100644
index 000000000..d70e35ca7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_2.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_split_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "split_2.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+ const int32_t NUM_SPLIT = NUM_SPLIT_Value();
+ const int32_t AXIS = AXIS_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_VALUE(NUM_SPLIT);
+ PRINT_VALUE(AXIS);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+ const int32_t axis[1] = {AXIS};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(NUM_SPLIT + 2);
+
+ // Configure Input Tensor(s)
+ interp.SetTensorParametersReadOnly(0, kTfLiteInt32 /* type */, "axis" /* name */,
+ {1} /* dims */, quantization,
+ reinterpret_cast<const char *>(axis), 1 * sizeof(int32_t));
+
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Output Tensor
+ std::vector<int> ofm_indexes;
+
+ for (uint32_t n = 0; n < NUM_SPLIT; ++n)
+ {
+ const auto ofm_index = 2 + n;
+
+ interp.SetTensorParametersReadWrite(ofm_index, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ ofm_indexes.emplace_back(ofm_index);
+ }
+
+ auto *param = reinterpret_cast<TfLiteSplitParams *>(malloc(sizeof(TfLiteSplitParams)));
+
+ param->num_splits = NUM_SPLIT;
+
+ // Add SPLIT Node
+ // Run SPLIT and store its result into Tensor #0
+ // - Read axis and IFM from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, ofm_indexes, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SPLIT, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #2 ~ #NUM_SPLIT+1 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs(ofm_indexes);
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/split_2.lst b/tools/nnapi_quickcheck/tests/split_2.lst
new file mode 100644
index 000000000..ebfbab2d5
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_2.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 1)
+INT_VALUE(IFM_H, 5)
+INT_VALUE(IFM_W, 30)
+INT_VALUE(NUM_SPLIT, 3)
+INT_VALUE(AXIS, 2)
diff --git a/tools/nnapi_quickcheck/tests/split_3.cpp b/tools/nnapi_quickcheck/tests/split_3.cpp
new file mode 100644
index 000000000..47359642d
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_3.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_split_3, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "split_3.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+ const int32_t NUM_SPLIT = NUM_SPLIT_Value();
+ const int32_t AXIS = AXIS_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_VALUE(NUM_SPLIT);
+ PRINT_VALUE(AXIS);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+ const int32_t axis[1] = {AXIS};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(NUM_SPLIT + 2);
+
+ // Configure Input Tensor(s)
+ interp.SetTensorParametersReadOnly(0, kTfLiteInt32 /* type */, "axis" /* name */,
+ {1} /* dims */, quantization,
+ reinterpret_cast<const char *>(axis), 1 * sizeof(int32_t));
+
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Configure Output Tensor
+ std::vector<int> ofm_indexes;
+
+ for (uint32_t n = 0; n < NUM_SPLIT; ++n)
+ {
+ const auto ofm_index = 2 + n;
+
+ interp.SetTensorParametersReadWrite(ofm_index, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ ofm_indexes.emplace_back(ofm_index);
+ }
+
+ auto *param = reinterpret_cast<TfLiteSplitParams *>(malloc(sizeof(TfLiteSplitParams)));
+
+ param->num_splits = NUM_SPLIT;
+
+ // Add SPLIT Node
+ // Run SPLIT and store its result into Tensor #0
+ // - Read axis and IFM from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, ofm_indexes, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SPLIT, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #2 ~ #NUM_SPLIT+1 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs(ofm_indexes);
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/split_3.lst b/tools/nnapi_quickcheck/tests/split_3.lst
new file mode 100644
index 000000000..300bb02b7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_3.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 5)
+INT_VALUE(IFM_W, 30)
+INT_VALUE(NUM_SPLIT, 3)
+INT_VALUE(AXIS, 1)
diff --git a/tools/nnapi_quickcheck/tests/split_4.cpp b/tools/nnapi_quickcheck/tests/split_4.cpp
new file mode 100644
index 000000000..d16e75d5c
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_4.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+#include "util/feature/Shape.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <random>
+#include <iostream>
+#include <cassert>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_split_4, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "split_4.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+ const int32_t NUM_SPLIT = NUM_SPLIT_Value();
+ const int32_t AXIS = AXIS_Value();
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_VALUE(NUM_SPLIT);
+ PRINT_VALUE(AXIS);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+ const int32_t axis[1] = {AXIS};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(NUM_SPLIT + 2);
+
+ // Configure Input Tensor(s)
+ interp.SetTensorParametersReadOnly(0, kTfLiteInt32 /* type */, "axis" /* name */,
+ {1} /* dims */, quantization,
+ reinterpret_cast<const char *>(axis), 1 * sizeof(int32_t));
+
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_H, IFM_W} /* dims */, quantization);
+
+ // Configure Output Tensor
+ std::vector<int> ofm_indexes;
+
+ for (uint32_t n = 0; n < NUM_SPLIT; ++n)
+ {
+ const auto ofm_index = 2 + n;
+
+ interp.SetTensorParametersReadWrite(ofm_index, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ ofm_indexes.emplace_back(ofm_index);
+ }
+
+ auto *param = reinterpret_cast<TfLiteSplitParams *>(malloc(sizeof(TfLiteSplitParams)));
+
+ param->num_splits = NUM_SPLIT;
+
+ // Add SPLIT Node
+ // Run SPLIT and store its result into Tensor #0
+ // - Read axis and IFM from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, ofm_indexes, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SPLIT, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #2 ~ #NUM_SPLIT+1 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs(ofm_indexes);
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/split_4.lst b/tools/nnapi_quickcheck/tests/split_4.lst
new file mode 100644
index 000000000..5b2882828
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/split_4.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_H, 5)
+INT_VALUE(IFM_W, 30)
+INT_VALUE(NUM_SPLIT, 5)
+INT_VALUE(AXIS, 0)
diff --git a/tools/nnapi_quickcheck/tests/sub_1.cpp b/tools/nnapi_quickcheck/tests/sub_1.cpp
new file mode 100644
index 000000000..2734f525e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_1.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization);
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_1.lst b/tools/nnapi_quickcheck/tests/sub_1.lst
new file mode 100644
index 000000000..fa17caebb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_1.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 16)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/sub_2.cpp b/tools/nnapi_quickcheck/tests/sub_2.cpp
new file mode 100644
index 000000000..88e060847
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_2.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_2, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_2.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT = RIGHT_Value();
+
+ const int32_t OFM_N = LEFT_N;
+ const int32_t OFM_C = LEFT_C;
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT} /* dims */, quantization);
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_2.lst b/tools/nnapi_quickcheck/tests/sub_2.lst
new file mode 100644
index 000000000..cd36ac199
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_2.lst
@@ -0,0 +1,10 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 16)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT, 1)
diff --git a/tools/nnapi_quickcheck/tests/sub_3.cpp b/tools/nnapi_quickcheck/tests/sub_3.cpp
new file mode 100644
index 000000000..fd2d4aaea
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_3.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_3, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_3.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT = RIGHT_Value();
+
+ const int32_t OFM_H = LEFT_H;
+ const int32_t OFM_W = LEFT_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_H, LEFT_W} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT, LEFT_W} /* dims */, quantization);
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_3.lst b/tools/nnapi_quickcheck/tests/sub_3.lst
new file mode 100644
index 000000000..c56875048
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_3.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_H, 8)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT, 1)
diff --git a/tools/nnapi_quickcheck/tests/sub_4.cpp b/tools/nnapi_quickcheck/tests/sub_4.cpp
new file mode 100644
index 000000000..993acddce
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_4.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_4, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_1.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_C = LEFT_C_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+
+ const int32_t RIGHT_C = RIGHT_C_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_C);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_C);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
+
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_H, RIGHT_W, RIGHT_C} /* dims */, quantization);
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({1, 2});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_4.lst b/tools/nnapi_quickcheck/tests/sub_4.lst
new file mode 100644
index 000000000..ce6128f83
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_4.lst
@@ -0,0 +1,11 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_C, 3)
+INT_VALUE(LEFT_H, 8)
+INT_VALUE(LEFT_W, 16)
+
+INT_VALUE(RIGHT_C, 3)
+INT_VALUE(RIGHT_H, 1)
+INT_VALUE(RIGHT_W, 16)
diff --git a/tools/nnapi_quickcheck/tests/sub_5.cpp b/tools/nnapi_quickcheck/tests/sub_5.cpp
new file mode 100644
index 000000000..610be5754
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_5.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_5, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_5.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_VALUE(LEFT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+ PRINT_VALUE(OFM_C);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure left data
+ const uint32_t left_size = LEFT_N * LEFT_C * LEFT_H * LEFT_W;
+ const uint32_t right_size = RIGHT_N * RIGHT_C * RIGHT_H * RIGHT_W;
+ float left_data[left_size] = {
+ 0.0f,
+ };
+ float right_data[right_size] = {
+ 0.0f,
+ };
+
+ // Fill left data with random data
+ {
+ std::normal_distribution<float> left_dist(-1.0f, +1.0f);
+ float value = 10.0f;
+ for (uint32_t off = 0; off < left_size; ++off)
+ {
+ left_data[off] = value;
+ }
+ value = 1.0f;
+ for (uint32_t off = 0; off < right_size; ++off)
+ {
+ right_data[off] = value++;
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization,
+ reinterpret_cast<const char *>(left_data),
+ left_size * sizeof(float));
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_W, RIGHT_C} /* dims: test with other shapes */,
+ quantization, reinterpret_cast<const char *>(right_data),
+ right_size * sizeof(float));
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_5.lst b/tools/nnapi_quickcheck/tests/sub_5.lst
new file mode 100644
index 000000000..0327e6b73
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_5.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_H, 2)
+INT_VALUE(LEFT_W, 3)
+INT_VALUE(LEFT_C, 4)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_H, 1)
+INT_VALUE(RIGHT_W, 3)
+INT_VALUE(RIGHT_C, 4)
diff --git a/tools/nnapi_quickcheck/tests/sub_6.cpp b/tools/nnapi_quickcheck/tests/sub_6.cpp
new file mode 100644
index 000000000..b9e37c8d7
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_6.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_sub_6, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "sub_6.lst"
+#undef INT_VALUE
+
+ const int32_t LEFT_N = LEFT_N_Value();
+ const int32_t LEFT_H = LEFT_H_Value();
+ const int32_t LEFT_W = LEFT_W_Value();
+ const int32_t LEFT_C = LEFT_C_Value();
+
+ const int32_t RIGHT_N = RIGHT_N_Value();
+ const int32_t RIGHT_H = RIGHT_H_Value();
+ const int32_t RIGHT_W = RIGHT_W_Value();
+ const int32_t RIGHT_C = RIGHT_C_Value();
+
+ const int32_t OFM_N = std::max(LEFT_N, RIGHT_N);
+ const int32_t OFM_H = std::max(LEFT_H, RIGHT_H);
+ const int32_t OFM_W = std::max(LEFT_W, RIGHT_W);
+ const int32_t OFM_C = std::max(LEFT_C, RIGHT_C);
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(LEFT_N);
+ PRINT_VALUE(LEFT_H);
+ PRINT_VALUE(LEFT_W);
+ PRINT_VALUE(LEFT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(RIGHT_N);
+ PRINT_VALUE(RIGHT_H);
+ PRINT_VALUE(RIGHT_W);
+ PRINT_VALUE(RIGHT_C);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+ PRINT_VALUE(OFM_C);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Configure left data
+ const uint32_t left_size = LEFT_N * LEFT_C * LEFT_H * LEFT_W;
+ const uint32_t right_size = RIGHT_N * RIGHT_C * RIGHT_H * RIGHT_W;
+ float left_data[left_size] = {
+ 0.0f,
+ };
+ float right_data[right_size] = {
+ 0.0f,
+ };
+
+ // Fill left data with random data
+ {
+ std::normal_distribution<float> left_dist(-1.0f, +1.0f);
+ float value = 10.0f;
+ for (uint32_t off = 0; off < left_size; ++off)
+ {
+ left_data[off] = value;
+ }
+ value = 1.0f;
+ for (uint32_t off = 0; off < right_size; ++off)
+ {
+ right_data[off] = value++;
+ }
+ }
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(3);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(1, kTfLiteFloat32 /* type */, "left" /* name */,
+ {LEFT_W, LEFT_C} /* dims: test with other shapes */,
+ quantization, reinterpret_cast<const char *>(left_data),
+ left_size * sizeof(float));
+
+ // Configure input(s)
+ interp.SetTensorParametersReadOnly(2, kTfLiteFloat32 /* type */, "right" /* name */,
+ {RIGHT_N, RIGHT_H, RIGHT_W, RIGHT_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(right_data),
+ right_size * sizeof(float));
+
+ // Add Subtraction Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ auto param = make_alloc<TfLiteAddParams>();
+
+ param->activation = kTfLiteActNone;
+
+ // Run Sub and store the result into Tensor #0
+ // - Read Left from Tensor #1
+ // - Read Right from Tensor #2,
+ interp.AddNodeWithParameters({1, 2}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_SUB, 1));
+
+ interp.SetInputs({});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/sub_6.lst b/tools/nnapi_quickcheck/tests/sub_6.lst
new file mode 100644
index 000000000..52a1f1acc
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/sub_6.lst
@@ -0,0 +1,13 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(LEFT_N, 1)
+INT_VALUE(LEFT_H, 1)
+INT_VALUE(LEFT_W, 3)
+INT_VALUE(LEFT_C, 4)
+
+INT_VALUE(RIGHT_N, 1)
+INT_VALUE(RIGHT_H, 2)
+INT_VALUE(RIGHT_W, 3)
+INT_VALUE(RIGHT_C, 4)
diff --git a/tools/nnapi_quickcheck/tests/tanh_1.cpp b/tools/nnapi_quickcheck/tests/tanh_1.cpp
new file mode 100644
index 000000000..67847eceb
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/tanh_1.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <iostream>
+#include <cassert>
+
+#include <chrono>
+#include <random>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_tanh_1, simple_test)
+{
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "tanh_1.lst"
+#undef INT_VALUE
+
+ const int32_t IFM_N = IFM_N_Value();
+ const int32_t IFM_C = IFM_C_Value();
+ const int32_t IFM_H = IFM_H_Value();
+ const int32_t IFM_W = IFM_W_Value();
+
+ const int32_t OFM_N = IFM_N;
+ const int32_t OFM_C = IFM_C;
+ const int32_t OFM_H = IFM_H;
+ const int32_t OFM_W = IFM_W;
+
+ // Initialize random number generator
+ std::minstd_rand random(SEED);
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(IFM_N);
+ PRINT_VALUE(IFM_C);
+ PRINT_VALUE(IFM_H);
+ PRINT_VALUE(IFM_W);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OFM_N);
+ PRINT_VALUE(OFM_C);
+ PRINT_VALUE(OFM_H);
+ PRINT_VALUE(OFM_W);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(2);
+
+ // Configure output
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure input
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {IFM_N, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Add Tanh Node
+ // Run Tanh and store the result into Tensor #0
+ // - Read input from Tensor #1
+ interp.AddNodeWithParameters({1}, {0}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_TANH, 1));
+
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/tanh_1.lst b/tools/nnapi_quickcheck/tests/tanh_1.lst
new file mode 100644
index 000000000..a0077cb95
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/tanh_1.lst
@@ -0,0 +1,8 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(IFM_N, 1)
+INT_VALUE(IFM_C, 3)
+INT_VALUE(IFM_H, 320)
+INT_VALUE(IFM_W, 320)
diff --git a/tools/nnapi_quickcheck/tests/topk_v2_1.cpp b/tools/nnapi_quickcheck/tests/topk_v2_1.cpp
new file mode 100644
index 000000000..bb9d8535e
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/topk_v2_1.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include "env.h"
+#include "memory.h"
+#include "util/environment.h"
+
+#include "support/tflite/Diff.h"
+#include "support/tflite/Quantization.h"
+#include "support/tflite/interp/FunctionBuilder.h"
+
+#include <chrono>
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+TEST(NNAPI_Quickcheck_topk_v2_1, simple_test)
+{
+ // Set random seed
+ int SEED = std::chrono::system_clock::now().time_since_epoch().count();
+
+ nnfw::util::env::IntAccessor("SEED").access(SEED);
+
+ // Set random test parameters
+ int verbose = 0;
+ int tolerance = 1;
+
+ nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
+#include "topk_v2_1.lst"
+#undef INT_VALUE
+
+ const int32_t INPUT_DATA = INPUT_DATA_Value();
+ const int32_t K = K_Value();
+
+ const int32_t OUTPUT_VALUES = K;
+ const int32_t OUTPUT_INDICES = K;
+
+ std::cout << "Configurations:" << std::endl;
+#define PRINT_NEWLINE() \
+ { \
+ std::cout << std::endl; \
+ }
+#define PRINT_VALUE(value) \
+ { \
+ std::cout << " " << #value << ": " << (value) << std::endl; \
+ }
+ PRINT_VALUE(SEED);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(INPUT_DATA);
+ PRINT_VALUE(K);
+ PRINT_NEWLINE();
+
+ PRINT_VALUE(OUTPUT_VALUES);
+ PRINT_VALUE(OUTPUT_INDICES);
+#undef PRINT_VALUE
+#undef PRINT_NEWLINE
+
+ // Fill the K data
+ int32_t k_data[1] = {K};
+
+ auto setup = [&](Interpreter &interp) {
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ // A: This may be necessary, because quantization values(scale, zero_point) of TENSOR_INT32 and
+ // TENSOR_QUANT8_ASYMM are passed on to the runtime.
+ TfLiteQuantizationParams quantization = make_default_quantization();
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(4);
+
+ // Configure INPUT_DATA
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "input" /* name */,
+ {INPUT_DATA} /* dims */, quantization);
+
+ // Configure K
+ interp.SetTensorParametersReadOnly(1, kTfLiteInt32 /* type */, "k" /* name */, {1} /* dims */,
+ quantization, reinterpret_cast<const char *>(k_data),
+ sizeof(k_data));
+
+ // Configure OUTPUT_VALUES
+ interp.SetTensorParametersReadWrite(2, kTfLiteFloat32 /* type */, "output_values" /* name */,
+ {OUTPUT_VALUES} /* dims */, quantization);
+
+ // Configure OUTPUT_INDICES
+ interp.SetTensorParametersReadWrite(3, kTfLiteInt32 /* type */, "output_indices" /* name */,
+ {OUTPUT_INDICES} /* dims */, quantization);
+
+ // Add TopK_V2 Node
+ // Run TopK_V2 and store its result into Tensor #2 and #3
+ // - Read input data and K from Tensor #0 and #1, respectively
+ interp.AddNodeWithParameters({0, 1}, {2, 3}, nullptr, 0, nullptr,
+ BuiltinOpResolver().FindOp(BuiltinOperator_TOPK_V2, 1));
+
+ // Set Tensor #0 as Input, and Tensor #2 and #3 as Output
+ interp.SetInputs({0});
+ interp.SetOutputs({2, 3});
+ };
+
+ const nnfw::support::tflite::interp::FunctionBuilder builder(setup);
+
+ RandomTestParam param;
+
+ param.verbose = verbose;
+ param.tolerance = tolerance;
+
+ int res = RandomTestRunner{SEED, param}.run(builder);
+
+ EXPECT_EQ(res, 0);
+}
diff --git a/tools/nnapi_quickcheck/tests/topk_v2_1.lst b/tools/nnapi_quickcheck/tests/topk_v2_1.lst
new file mode 100644
index 000000000..a40ee3c57
--- /dev/null
+++ b/tools/nnapi_quickcheck/tests/topk_v2_1.lst
@@ -0,0 +1,6 @@
+#ifndef INT_VALUE
+#error "INT_VALUE should be defined"
+#endif // INT_VALUE
+
+INT_VALUE(INPUT_DATA, 8192)
+INT_VALUE(K, 16)
diff --git a/tools/nnapi_test/src/nnapi_test.cc b/tools/nnapi_test/src/nnapi_test.cc
index 515311a3c..1f98bb07d 100644
--- a/tools/nnapi_test/src/nnapi_test.cc
+++ b/tools/nnapi_test/src/nnapi_test.cc
@@ -14,27 +14,20 @@
* limitations under the License.
*/
-#include "tensorflow/contrib/lite/kernels/register.h"
+#include "support/tflite/kernels/register.h"
#include "tensorflow/contrib/lite/model.h"
-#include "util/environment.h"
-
#include "support/tflite/interp/FlatBufferBuilder.h"
#include "support/tflite/Diff.h"
#include <iostream>
+#include <stdexcept>
using namespace tflite;
using namespace tflite::ops::builtin;
int main(const int argc, char **argv)
{
- int verbose = 0;
- int tolerance = 1;
-
- nnfw::util::env::IntAccessor("VERBOSE").access(verbose);
- nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
-
if (argc < 2)
{
std::cerr << "nnapi_test\n\n";
@@ -50,10 +43,13 @@ int main(const int argc, char **argv)
const nnfw::support::tflite::interp::FlatBufferBuilder builder(*model);
- RandomTestParam param;
-
- param.verbose = verbose;
- param.tolerance = tolerance;
-
- return RandomTestRunner{0, param}.run(builder);
+ try
+ {
+ return RandomTestRunner::make(0).run(builder);
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << e.what() << std::endl;
+ return 1;
+ }
}
diff --git a/tools/opencl_tool/CMakeLists.txt b/tools/opencl_tool/CMakeLists.txt
new file mode 100644
index 000000000..66b92854c
--- /dev/null
+++ b/tools/opencl_tool/CMakeLists.txt
@@ -0,0 +1,12 @@
+if(NOT ${TARGET_ARCH_BASE} STREQUAL "arm")
+ return()
+endif(NOT ${TARGET_ARCH_BASE} STREQUAL "arm")
+
+list(APPEND OPENCL_INFO_SOURCE "src/opencl_info.cc")
+
+add_executable(opencl_info ${OPENCL_INFO_SOURCE})
+target_include_directories(opencl_info PUBLIC ${CMAKE_SOURCE_DIR}/externals/acl)
+target_include_directories(opencl_info PUBLIC ${CMAKE_SOURCE_DIR}/externals/acl/include)
+target_link_libraries(opencl_info arm_compute)
+
+install(TARGETS opencl_info DESTINATION bin)
diff --git a/tools/opencl_tool/src/opencl_info.cc b/tools/opencl_tool/src/opencl_info.cc
new file mode 100644
index 000000000..49673d1de
--- /dev/null
+++ b/tools/opencl_tool/src/opencl_info.cc
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*******************************************************************************
+ * Copyright (c) 2008-2015 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ ******************************************************************************/
+
+#include "arm_compute/core/CL/OpenCL.h"
+
+#include <iostream>
+#include <vector>
+
+void printDeviceInfo(int n, cl::Device &device, cl::Device &default_device)
+{
+ bool is_default = (device() == default_device());
+ std::cout << "\t\t\t#" << n << " Device: (id: " << device() << ") "
+ << (is_default ? " -> default" : "") << "\n";
+
+ const auto name = device.getInfo<CL_DEVICE_NAME>();
+ std::cout << "\t\t\t\tName: " << name << "\n";
+
+ const auto compute_unit = device.getInfo<CL_DEVICE_MAX_COMPUTE_UNITS>();
+ std::cout << "\t\t\t\tMax Compute Unit: " << compute_unit << "\n";
+
+ const auto max_work_item_size = device.getInfo<CL_DEVICE_MAX_WORK_ITEM_SIZES>();
+ std::cout << "\t\t\t\tMax Work Item Size: [";
+ for (auto size : max_work_item_size)
+ std::cout << size << ",";
+ std::cout << "]\n";
+
+ const auto max_work_group_size = device.getInfo<CL_DEVICE_MAX_WORK_GROUP_SIZE>();
+ std::cout << "\t\t\t\tMax Work Grpup Size: " << max_work_group_size << "\n";
+
+ const auto max_clock_frequency = device.getInfo<CL_DEVICE_MAX_CLOCK_FREQUENCY>();
+ std::cout << "\t\t\t\tMax Clock Frequency: " << max_clock_frequency << "\n";
+}
+
+void printContext(int n, cl::Platform &plat, int device_type, cl::Context &default_context)
+{
+ if (device_type == CL_DEVICE_TYPE_DEFAULT)
+ std::cout << "\t #" << n << " context when CL_DEVICE_TYPE_DEFAULT";
+ else if (device_type == CL_DEVICE_TYPE_GPU)
+ std::cout << "\t #" << n << " context when CL_DEVICE_TYPE_GPU";
+ else if (device_type == CL_DEVICE_TYPE_CPU)
+ std::cout << "\t #" << n << " context when CL_DEVICE_TYPE_CPU";
+ else if (device_type == CL_DEVICE_TYPE_ACCELERATOR)
+ std::cout << "\t #" << n << " context when CL_DEVICE_TYPE_ACCELERATOR";
+ else if (device_type == CL_DEVICE_TYPE_CUSTOM)
+ std::cout << "\t #" << n << " context when CL_DEVICE_TYPE_CUSTOM";
+ else if (device_type == CL_DEVICE_TYPE_ALL)
+ std::cout << "\t #" << n << " context when CL_DEVICE_TYPE_ALL";
+
+ cl::Context context;
+
+ try
+ {
+ cl_context_properties properties[3] = {CL_CONTEXT_PLATFORM, (cl_context_properties)plat(), 0};
+
+ cl_int default_error;
+
+ context = cl::Context(device_type, properties, NULL, NULL, &default_error);
+ }
+ catch (cl::Error &err) // thrown when there is no Context for this platform
+ {
+ std::cout << "\t\t No Context Found\n";
+ return;
+ }
+
+ bool is_default = (context() == default_context());
+
+ std::cout << " (id: " << context() << ") " << (is_default ? " -> default" : "") << "\n";
+
+ const auto device_num = context.getInfo<CL_CONTEXT_NUM_DEVICES>();
+ std::cout << "\t\t\tDevice num: " << device_num << "\n";
+ if (device_num == 0)
+ return;
+
+ auto devices = context.getInfo<CL_CONTEXT_DEVICES>();
+ auto default_device = cl::Device::getDefault();
+
+ int d = 0;
+ for (auto device : devices)
+ printDeviceInfo(++d, device, default_device);
+}
+
+void printPlatform(int n, cl::Platform &plat, cl::Platform &default_platform)
+{
+ bool is_default = (plat() == default_platform());
+
+ std::cout << "#" << n << ". Platform: (id: " << plat() << ") "
+ << (is_default ? " -> default" : "") << "\n";
+
+ cl::Context default_context = cl::Context::getDefault();
+ std::cout << "\t"
+ << "default context: " << default_context() << "\n";
+
+ int x = 0;
+ printContext(++x, plat, CL_DEVICE_TYPE_DEFAULT, default_context);
+ printContext(++x, plat, CL_DEVICE_TYPE_GPU, default_context);
+ printContext(++x, plat, CL_DEVICE_TYPE_CPU, default_context);
+ printContext(++x, plat, CL_DEVICE_TYPE_ACCELERATOR, default_context);
+ printContext(++x, plat, CL_DEVICE_TYPE_CUSTOM, default_context);
+ printContext(++x, plat, CL_DEVICE_TYPE_ALL, default_context);
+}
+
+int main(const int argc, char **argv)
+{
+ std::cout << "\nOpenCL Platform, Context, Device Info are as follows:\n\n";
+
+ std::vector<cl::Platform> platforms;
+ cl::Platform::get(&platforms);
+
+ cl::Platform defaultPlatform = cl::Platform::getDefault();
+
+ int n = 0;
+ for (auto &p : platforms)
+ {
+ printPlatform(++n, p, defaultPlatform);
+ }
+
+ return 0;
+}
diff --git a/tools/pbfile_tool/convert_ckpt_to_pb.py b/tools/pbfile_tool/convert_ckpt_to_pb.py
new file mode 100644
index 000000000..cd43143ca
--- /dev/null
+++ b/tools/pbfile_tool/convert_ckpt_to_pb.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# this file is added by NNFW to convert checkpoint file
+# to frozen pb file
+# and generate tensorboard for convenience
+
+import os
+import argparse
+import tensorflow as tf
+import model_freezer_util as util
+
+
+def convert(checkpoint_dir, checkpoint_file_path):
+
+ meta_path = os.path.join(checkpoint_file_path + '.meta') # Your .meta file
+ output_node_name = 'Model/concat'
+ output_node_names = [output_node_name] # Output nodes
+
+ with tf.Session() as sess:
+
+ # Restore the graph
+ saver = tf.train.import_meta_graph(meta_path)
+
+ # Load weights
+ saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
+
+ # save the graph into pb
+ saved_graph_def = tf.graph_util.convert_variables_to_constants(
+ sess, sess.graph_def, output_node_names)
+
+ pb_path = os.path.join(checkpoint_dir, 'graph.pb')
+ with open(pb_path, 'wb') as f:
+ f.write(saved_graph_def.SerializeToString())
+
+ # freeze
+ (frozen_pb_path, frozen_pbtxt_path) = util.freezeGraph(pb_path, checkpoint_file_path,
+ output_node_name)
+
+ print("Freeze() Finished. Created :")
+ print("\t-{}\n\t-{}\n".format(frozen_pb_path, frozen_pbtxt_path))
+
+ # tensor board
+ tensorboardLogDir = util.generateTensorboardLog([frozen_pb_path], [''],
+ os.path.join(
+ checkpoint_dir, ".tensorboard"))
+
+ print("")
+ print(
+ "\t# Tensorboard: You can view original graph and frozen graph with tensorboard.")
+ print("\t Run the following:")
+ print("\t $ tensorboard --logdir={} ".format(tensorboardLogDir))
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description='convert checkpoint file to pb file and freeze the pb file')
+ parser.add_argument(
+ "checkpoint_dir",
+ help=
+ "directory where checkpoint files are located. pb, pbtxt will also be generated into this folder."
+ )
+ parser.add_argument("checkpoint_file_name", help="name of checkpoint file")
+
+ args = parser.parse_args()
+ checkpoint_dir = args.checkpoint_dir
+ checkpoint_file_path = os.path.join(checkpoint_dir, args.checkpoint_file_name)
+
+ convert(checkpoint_dir, checkpoint_file_path)
diff --git a/tools/pbfile_tool/pb_info.py b/tools/pbfile_tool/pb_info.py
new file mode 100755
index 000000000..110b15e02
--- /dev/null
+++ b/tools/pbfile_tool/pb_info.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import tensorflow as tf
+from google.protobuf import text_format
+from tensorflow.python.platform import gfile
+from tensorflow.python.tools import freeze_graph
+from tensorflow.python.tools import optimize_for_inference_lib
+
+import argparse
+import os
+
+
+def splitDirFilenameExt(path):
+ # in case of '/tmp/.ssh/my.key.dat'
+ # this returns ('/tmp/.ssh', 'my.key', 'dat')
+ directory = os.path.split(path)[0]
+ ext = os.path.splitext(path)[1][1:] # remove '.', e.g., '.dat' -> 'dat'
+ filename = os.path.splitext(os.path.split(path)[1])[0]
+
+ return (directory, filename, ext)
+
+
+def importGraphIntoSession(sess, filename):
+ # this should be called inside
+ # with tf.Session() as sess:
+ assert sess
+ (_, _, ext) = splitDirFilenameExt(filename)
+ if (ext.lower() == 'pb'):
+ with gfile.FastGFile(filename, 'rb') as f:
+ graph_def = tf.GraphDef()
+ graph_def.ParseFromString(f.read())
+
+ elif (ext.lower() == 'pbtxt'):
+ with open(filename, 'r') as reader:
+ graph_def = tf.GraphDef()
+ text_format.Parse(reader.read(), graph_def)
+ else:
+ print("# Error: unknown extension - " + ext)
+
+ tf.import_graph_def(graph_def)
+
+
+def print_operation(op, op_count):
+ print("") # new line
+ print("OP #{}: {}, name = {}".format(op_count, op.type, op.name))
+
+ print("\tinputs:")
+ for input_tensor in op.inputs:
+ print("\t\t{} : name = {}".format(input_tensor.shape, input_tensor.name))
+
+ print("\toutputs:")
+ for output_tensor in op.outputs:
+ print("\t\t{}, name = {}".format(output_tensor.shape, output_tensor.name))
+
+ print("\tattributes:")
+ op_def = op.op_def
+ for attr_def in op.op_def.attr:
+ attr = op.get_attr(attr_def.name)
+ # skip Const value
+ if op.type == "Const" and attr_def.name == "value":
+ print("\t\t{}, name = {}".format("skipping value", attr_def.name))
+ else:
+ print("\t\t{}, name = {}".format(attr, attr_def.name))
+ print("") # new line
+
+
+def print_graph_info(pb_path, optype_substring, name_prefix):
+ with tf.Session() as sess:
+ importGraphIntoSession(sess, pb_path)
+
+ op_seq = 1
+ op_count = 1
+ graph = sess.graph
+ ops = graph.get_operations()
+ for op in ops:
+ if optype_substring == "*" and (name_prefix == None
+ or op.name.startswith(name_prefix)):
+ print_operation(op, op_seq)
+ op_count += 1
+ elif op.type.lower().find(optype_substring.lower()) != -1 and (
+ name_prefix == None or op.name.startswith(name_prefix)):
+ print_operation(op, op_seq)
+ op_count += 1
+ else:
+ print("skipping {}, name = {}".format(op.type, op.name))
+ op_seq += 1
+
+ print("")
+ print("Total number of operations : " + str(op_count))
+ print("")
+
+
+def print_summary(pb_path, optype_substring, name_prefix):
+ op_map = {}
+ op_count = 0
+ with tf.Session() as sess:
+ importGraphIntoSession(sess, pb_path)
+
+ graph = sess.graph
+ ops = graph.get_operations()
+ for op in ops:
+ process = False
+ if optype_substring == "*" and (name_prefix == None
+ or op.name.startswith(name_prefix)):
+ process = True
+ elif op.type.lower().find(optype_substring.lower()) != -1 and (
+ name_prefix == None or op.name.startswith(name_prefix)):
+ process = True
+
+ if process:
+ op_count += 1
+ if op_map.get(op.type) == None:
+ op_map[op.type] = 1
+ else:
+ op_map[op.type] += 1
+
+ # print op list
+ print("")
+ for op_type, count in op_map.items():
+ print("\t" + op_type + " : \t" + str(count))
+ print("")
+ print("Total number of operations : " + str(op_count))
+ print("Total number of operation types : " + str(len(op_map.keys())))
+ print("")
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser(description='Prints information inside pb file')
+
+ parser.add_argument("pb_file", help="pb file to read")
+ parser.add_argument(
+ "op_subst",
+ help="substring of operations. only info of these operasions will be printed.")
+ parser.add_argument(
+ "--summary", help="print summary of operations", action="store_true")
+ parser.add_argument("--name_prefix", help="filtered by speficied name prefix")
+
+ args = parser.parse_args()
+
+ if args.summary:
+ print_summary(args.pb_file, args.op_subst, args.name_prefix)
+ else:
+ print_graph_info(args.pb_file, args.op_subst, args.name_prefix)
diff --git a/tools/pbfile_tool/readme.md b/tools/pbfile_tool/readme.md
new file mode 100644
index 000000000..8eb5c2285
--- /dev/null
+++ b/tools/pbfile_tool/readme.md
@@ -0,0 +1,17 @@
+## pb_info.py
+- prints information inside `pb` file.
+- how to run:
+ - `./tools/pbfile_tool/pb_info.py pbfile_path "conv"`
+ - first arg: pb file
+ - second arg: substring of operation. Only operations that has "conv" substring as its type will be printed. (case-insensitive)
+ - `./tools/pbfile_tool/pb_info.py pbfile_path "*"`
+ - pass "*" as the second param to print all operations
+ - `./tools/pbfile_tool/pb_info.py pbfile_path "*" --summary`
+ - prints the list of operations and their counts
+ - `./tools/pbfile_tool/pb_info.py pbfile_path "*" --summary --name_prefix=Model/rnn`
+ - prints the summary of operations of which names start `Model/rnn`
+
+## convert_ckpt_to_pb.py
+- convert checkpoint file to pb file and _freeze_ the `pb` file.
+- how to run:
+ - `$ PYTHONPATH=tools/tensorflow_model_freezer/ python convert_ckpt_to_pb.py checkpoint_dir checkpoint_file_name`
diff --git a/tools/tensorflow_model_freezer/__init__.py b/tools/tensorflow_model_freezer/__init__.py
new file mode 100644
index 000000000..89d760b4a
--- /dev/null
+++ b/tools/tensorflow_model_freezer/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# indicating that this folder is python package
diff --git a/tools/tensorflow_model_freezer/base_freezer.py b/tools/tensorflow_model_freezer/base_freezer.py
new file mode 100644
index 000000000..ccfd811cf
--- /dev/null
+++ b/tools/tensorflow_model_freezer/base_freezer.py
@@ -0,0 +1,201 @@
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import traceback
+import tensorflow as tf
+import model_freezer_util as util
+
+
+class Tensor(object):
+ def __init__(self, shape, dtype=tf.float32, const_val=None):
+ '''
+ Keyword arguments:
+ shape -- shaps in a form of list, e.g., 3x2x4 tensor's shape => [3,2,4]. shape of a scalar is []
+ dtype -- type of values in this Tensor
+ const_val -- if this Tensor is const, provide this
+ '''
+ self._shape = shape
+ self._const_val = const_val
+ self._dtype = dtype
+
+ def getShape(self):
+ return self._shape
+
+ def getConstVal(self):
+ return self._const_val
+
+ def getDType(self):
+ return self._dtype
+
+
+class BaseFreezer(object):
+ def __init__(self, path):
+ # files generated by child class will be stored under this path.
+ self.root_output_path = path
+
+ def getTestCases(self):
+ ''' abstract class
+ override method should return dic containing test cases '''
+ raise NotImplementedError("please implement this")
+
+ def getOutputDirectory(self):
+ ''' abstract class
+ override method should return directory under self.root_output_path where all pb, pbtxt, checkpoing, tensorboard log are saved '''
+ raise NotImplementedError("please implement this")
+
+ def buildModel(self, sess, test_case_tensor, tc_name):
+ ''' abstract class
+ override this method to define models when getTestCases() is defined.
+ this method returns ( input_node_list, output_node_list ) after building a model with all variables set '''
+ raise NotImplementedError("please implement this")
+
+ def createTFInput(self, tensor, input_list):
+ '''
+ create TensorFlow tensor and if it is a placeholder, put it into input_list
+
+ keyword argument:
+ tensor -- base_freezer.Tensor
+ input_list -- vector
+ '''
+ if (util.isScalar(tensor)): # check if scalar
+ if (tensor.getConstVal() == None):
+ tf_tensor = tf.placeholder(shape=[], dtype=tensor.getDType())
+ input_list.append(tf_tensor)
+ else:
+ tf_tensor = tf.constant(
+ value=tensor.getConstVal(), dtype=tensor.getDType())
+ else:
+ if (tensor.getConstVal() == None):
+ tf_tensor = tf.placeholder(
+ shape=tensor.getShape(), dtype=tensor.getDType())
+ input_list.append(tf_tensor)
+ else:
+ tf_tensor = tf.constant(
+ shape=tensor.getShape(),
+ value=tensor.getConstVal(),
+ dtype=tensor.getDType())
+
+ return tf_tensor
+
+ def saveRelatedFiles(self, sess, input_node_list, output_node_list, fn_prefix):
+ ''' saves pb, pbtxt, chpt files and then freeze graph under top_node_name into directory '''
+ ''' produce pb, pbtxt, and ckpt files '''
+ (pb_path, pbtxt_path, checkpoint_path) = util.savePbAndCkpt(
+ sess, self.getOutputDirectory(), fn_prefix)
+
+ print("")
+ print("# 1. Created Tensorflow model files :\n\t-{}\n\t-{}\n\t-{}\n".format(
+ pb_path, pbtxt_path, checkpoint_path))
+ '''
+ produce frozen files
+ include only nodes below softmax node. nodes for gradient descent (reduce_mean, GradientDescentOptimizer, ...) will not be included
+ '''
+ sess.close()
+
+ output_node_name = fn_prefix
+ (frozen_pb_path, frozen_pbtxt_path) = util.freezeGraph(pb_path, checkpoint_path,
+ output_node_name)
+
+ print("")
+ print("# 2. Freeze() Finished. Created :")
+ print("\t-{}\n\t-{}\n".format(frozen_pb_path, frozen_pbtxt_path))
+
+ self.generateTensorboardLog(pb_path, frozen_pb_path, fn_prefix)
+ print("")
+ ''' generate tflite file. '''
+ # manually put back imported modules. refer to https://github.com/tensorflow/tensorflow/issues/15410#issuecomment-352189481
+ import tempfile
+ import subprocess
+ tf.contrib.lite.tempfile = tempfile
+ tf.contrib.lite.subprocess = subprocess
+
+ tflite_path = os.path.join(self.getOutputDirectory(), fn_prefix + ".tflite")
+
+ tf.reset_default_graph()
+ sess = tf.Session()
+
+ util.importGraphIntoSession(sess, frozen_pb_path, "")
+ try:
+ tflite_model = tf.contrib.lite.toco_convert(sess.graph_def, input_node_list,
+ output_node_list)
+ open(tflite_path, "wb").write(tflite_model)
+ print("# 3. TOCO : Created TFLITE file :\n\t-{}\n".format(tflite_path))
+ except Exception:
+ print("# 3. TOCO failed\n")
+ print(traceback.format_exc())
+
+ return (pb_path, frozen_pb_path, tflite_path)
+
+ def generateTensorboardLog(self, pb_path, frozen_pb_path, fn_prefix):
+ ''' generating tensorboard logs to compare original pb and frozen pb '''
+ tensorboardLogDir = util.generateTensorboardLog(
+ [pb_path, frozen_pb_path], ['original', 'frozen'],
+ os.path.join(self.getOutputDirectory(), ".tensorboard", fn_prefix))
+
+ print("")
+ print(
+ "\t# Tensorboard: You can view original graph and frozen graph with tensorboard."
+ )
+ print("\t Run the following:")
+ print("\t $ tensorboard --logdir={} ".format(tensorboardLogDir))
+
+ def createSaveFreezeModel(self):
+ ''' method that actually called by main() function. '''
+
+ test_cases = self.getTestCases()
+
+ # when there are defined test cases
+ if test_cases != None:
+ for tc_name in test_cases:
+
+ # without this, graph used previous session is reused : https://stackoverflow.com/questions/42706761/closing-session-in-tensorflow-doesnt-reset-graph
+ tf.reset_default_graph()
+
+ # TODO-nnfw session life cycle here is too tangled
+ sess = tf.Session()
+
+ print("")
+ print("------------ Generating files for {} ------------".format(tc_name))
+ print("# files will be saved into " + self.getOutputDirectory())
+
+ # build model
+ (input_node_list, output_node_list) = self.buildModel(
+ sess, test_cases.get(tc_name), tc_name)
+ ''' Now, save to proto buffer format and checkpoint '''
+ (pb_path, frozen_pb_path, tflite_path) = self.saveRelatedFiles(
+ sess, input_node_list, output_node_list, tc_name)
+
+ sess.close()
+ # when there is not test cases but the model itself
+ else:
+ # without this, graph used previous session is reused : https://stackoverflow.com/questions/42706761/closing-session-in-tensorflow-doesnt-reset-graph
+ tf.reset_default_graph()
+
+ # TODO-nnfw session life cycle here is too tangled
+ sess = tf.Session()
+
+ print("")
+ print("------------ Generating files for {} ------------".format(tc_name))
+ print("# files will be saved into " + self.getOutputDirectory())
+
+ # build model
+ (input_node_list, output_node_list) = self.buildModel(
+ sess, test_cases.get(tc_name), tc_name)
+ ''' Now, save to proto buffer format and checkpoint '''
+ (pb_path, frozen_pb_path, tflite_path) = self.saveRelatedFiles(
+ sess, input_node_list, output_node_list, tc_name)
+
+ sess.close()
diff --git a/tools/tensorflow_model_freezer/model_freezer_util.py b/tools/tensorflow_model_freezer/model_freezer_util.py
new file mode 100644
index 000000000..3b847f043
--- /dev/null
+++ b/tools/tensorflow_model_freezer/model_freezer_util.py
@@ -0,0 +1,233 @@
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# utility for nncc
+
+import os
+import sys
+
+import tensorflow as tf
+from google.protobuf import text_format
+from tensorflow.python.platform import gfile
+from tensorflow.python.tools import freeze_graph
+from tensorflow.python.tools import optimize_for_inference_lib
+
+
+# --------
+def file_validity_check(fn, ext_must_be=''):
+ ''' check if file exist and file extention is corrent '''
+ if os.path.exists(fn) == False:
+ print("# error: file does not exist " + fn)
+ return False
+
+ if ext_must_be != '':
+ ext = os.path.splitext(fn)[1]
+ if ext[1:].lower(
+ ) != ext_must_be: # ext contains , e.g., '.pb'. need to exclud '.'
+ print("# error: wrong extension {}. Should be {} ".format(ext, ext_must_be))
+ return False
+
+ return True
+
+
+# --------
+def importGraphIntoSession(sess, filename, graphNameAfterImporting):
+ # this should be called inside
+ # with tf.Session() as sess:
+ assert sess
+ (_, _, ext) = splitDirFilenameExt(filename)
+ if (ext.lower() == 'pb'):
+ with gfile.FastGFile(filename, 'rb') as f:
+ graph_def = tf.GraphDef()
+ graph_def.ParseFromString(f.read())
+
+ elif (ext.lower() == 'pbtxt'):
+ with open(filename, 'r') as reader:
+ graph_def = tf.GraphDef()
+ text_format.Parse(reader.read(), graph_def)
+ else:
+ print("# Error: unknown extension - " + ext)
+
+ tf.import_graph_def(graph_def, name=graphNameAfterImporting)
+
+
+# --------
+def splitDirFilenameExt(path):
+ # in case of '/tmp/.ssh/my.key.dat'
+ # this returns ('/tmp/.ssh', 'my.key', 'dat')
+ directory = os.path.split(path)[0]
+ ext = os.path.splitext(path)[1][1:] # remove '.', e.g., '.dat' -> 'dat'
+ filename = os.path.splitext(os.path.split(path)[1])[0]
+
+ return (directory, filename, ext)
+
+
+# --------
+def convertPbtxt2Pb(pbtxtPath):
+ ''' convert pbtxt file to pb file. e.g., /tmp/a.pbtxt --> /tmp/a.pb '''
+ with open(pbtxtPath) as f:
+ txt = f.read()
+
+ gdef = text_format.Parse(txt, tf.GraphDef())
+
+ (directory, filename, ext) = splitDirFilenameExt(pbtxtPath)
+
+ tf.train.write_graph(gdef, directory, filename + '.pb', as_text=False)
+
+ return os.path.join(directory, filename + '.pb')
+
+
+# --------
+def convertPb2Pbtxt(pbPath):
+ ''' convert pb file to pbtxt file. e.g., /tmp/a.pb --> /tmp/a.pbtxt '''
+
+ from tensorflow.python.platform import gfile
+
+ (directory, filename, ext) = splitDirFilenameExt(pbPath)
+
+ with gfile.FastGFile(pbPath, 'rb') as f:
+ content = f.read()
+
+ graph_def = tf.GraphDef()
+ graph_def.ParseFromString(content)
+ tf.import_graph_def(graph_def, name='')
+
+ tf.train.write_graph(graph_def, directory, filename + '.pbtxt', as_text=True)
+
+ return os.path.join(directory, filename + '.pbtxt')
+
+
+# --------
+def savePbAndCkpt(sess, directory, fn_prefix):
+ ''' save files related to session's graph into directory.
+ - fn_prefix.pb : binary protocol buffer file
+ - fn_prefix.pbtxt : text format of protocol buffer file
+ - fn_prefix.ckpt.* : checkpoing files contains values of variables
+
+ returns (path of pb file, path of pbtxt file, path of ckpt files)
+ '''
+
+ tf.train.write_graph(sess.graph_def, directory, fn_prefix + '.pb', as_text=False)
+ tf.train.write_graph(sess.graph_def, directory, fn_prefix + '.pbtxt', as_text=True)
+
+ # save a checkpoint file, which will store the above assignment
+ saver = tf.train.Saver()
+ saver.save(sess, os.path.join(directory, 'checkoiint', fn_prefix + '.ckpt'))
+
+ return (os.path.join(directory, fn_prefix + '.pb'),
+ os.path.join(directory, fn_prefix + '.pbtxt'),
+ os.path.join(directory, 'checkoiint', fn_prefix + '.ckpt'))
+
+
+def optimizeGraph(input_graph_path, input_node_name, output_node_name):
+ ''' this function calls optimize_for_inference of tensorflow and generates '*_optimized.pb'.
+
+ - input_graph_path : must be a path to pb file
+ - input_node_name : name of input operation node
+ - output_node_name : name of head(top) operation node
+ '''
+
+ (directory, fn, ext) = splitDirFilenameExt(input_graph_path)
+ output_optimized_graph_path = os.path.join(directory, fn + '_optimized.pb')
+
+ # Optimize for inference
+ input_graph_def = tf.GraphDef()
+ with tf.gfile.Open(input_graph_path, "rb") as f:
+ data = f.read()
+ input_graph_def.ParseFromString(data)
+ output_graph_def = optimize_for_inference_lib.optimize_for_inference(
+ input_graph_def, input_node_name.split(","), output_node_name.split(","),
+ tf.float32.as_datatype_enum)
+
+ # Save the optimized graph
+ f = tf.gfile.FastGFile(output_optimized_graph_path, "w")
+ f.write(output_graph_def.SerializeToString())
+
+ return output_optimized_graph_path
+
+
+# --------
+def freezeGraph(input_graph_path, checkpoint_path, output_node_name):
+ ''' this function calls freeze_grapy.py of tensorflow and generates '*_frozen.pb' and '*_frozen.pbtxt'.
+
+ - input_graph_path : must be a path to pb file
+ - checkpoint_path : path of *.ckpt, e.g., '/tmp/inception_v3/graph.ckpt'
+ - output_node_name : name of head(top) operation node
+ '''
+
+ input_saver_def_path = ""
+ input_binary = True
+
+ restore_op_name = "save/restore_all"
+ filename_tensor_name = "save/Const:0"
+ clear_devices = True
+
+ (directory, fn, ext) = splitDirFilenameExt(input_graph_path)
+ output_frozen_graph_path = os.path.join(directory, fn + '_frozen.pb')
+
+ if file_validity_check(input_graph_path, 'pb') == False:
+ print("Error: {} not found or not have pb extension".format(input_graph_path))
+ sys.exit(0)
+
+ freeze_graph.freeze_graph(input_graph_path, input_saver_def_path, input_binary,
+ checkpoint_path, output_node_name, restore_op_name,
+ filename_tensor_name, output_frozen_graph_path,
+ clear_devices, "")
+
+ pbtxtPath = convertPb2Pbtxt(output_frozen_graph_path)
+
+ return (output_frozen_graph_path, pbtxtPath)
+
+
+# --------
+def generateTensorboardLog(pbFiles, graphNames, directory):
+ ''' Generate logs for tensorboard. after calling this, graph(s) can be viewed inside tensorboard.
+ This function creates a new Session(), so call this outside of 'with Session():'
+
+ parameters:
+ - pbFiles: if multiple graphs needs to be shown, pass the list of pb (or pbtxt) files
+ - directory: parent directory of '/.tensorboard' directory where log files are saved
+
+ how to run tensorboard:
+ $ tensorboard --logdir=directory_in_parameter
+ '''
+ assert len(pbFiles) == len(graphNames)
+
+ # without this, graph used previous session is reused : https://stackoverflow.com/questions/42706761/closing-session-in-tensorflow-doesnt-reset-graph
+ tf.reset_default_graph()
+ with tf.Session() as sess:
+
+ i = 0
+ for pbFile in pbFiles:
+ graphName = graphNames[i]
+ importGraphIntoSession(sess, pbFile, graphName)
+ i = i + 1
+
+ tbLogPath = directory
+ train_writer = tf.summary.FileWriter(tbLogPath)
+ train_writer.add_graph(sess.graph)
+ train_writer.flush()
+ train_writer.close()
+
+ return tbLogPath
+
+
+#--------
+def isScalar(x):
+ '''
+ keyword argument:
+ x - base_freezer.Tensor
+ '''
+
+ return (type(x.getShape()) == [])
diff --git a/tools/tensorflow_model_freezer/readme.md b/tools/tensorflow_model_freezer/readme.md
new file mode 100644
index 000000000..f627f11a5
--- /dev/null
+++ b/tools/tensorflow_model_freezer/readme.md
@@ -0,0 +1,20 @@
+## What this tool is about
+
+This tool generaes the following files:
+1. __Tensorflow model__ files in *.pb, *.pbtxt
+1. Tensorflow model after __freezing__ in *.pb, *.pbtxt
+1. __Tensorboard__ log file to visually see the above 1 and 2.
+1. __TFLITE__ file after running TOCO
+
+By define `Test Cases`, you can easily and quickly generate files for various ranks of operands.
+
+## How to use
+
+- Copy `MUL_gen.py` or `TOPK_gen.py` and modify for your taste.
+ - Note that `TOPK_gen.py` fails while generating TFLITE file since TOCO does not support `TOPK` oeration.
+
+- Run `~/nnfw$ PYTHONPATH=$PYTHONPATH:./tools/tensorflow_model_freezer/ python tools/tensorflow_model_freezer/sample/MUL_gen.py ~/temp`
+ - Files will be generated under `~/temp`
+
+## Note
+- This tool is tested with Python 2.7 and 3
diff --git a/tools/tensorflow_model_freezer/sample/DIV_gen.py b/tools/tensorflow_model_freezer/sample/DIV_gen.py
new file mode 100755
index 000000000..c4e9cde07
--- /dev/null
+++ b/tools/tensorflow_model_freezer/sample/DIV_gen.py
@@ -0,0 +1,148 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import platform
+import tensorflow as tf
+import argparse
+
+import base_freezer as base
+import model_freezer_util as util
+
+
+class Gen(base.BaseFreezer):
+ '''
+ class to generate tflite files for div
+ '''
+
+ def __init__(self, path):
+ super(self.__class__, self).__init__(path)
+
+ def getOutputDirectory(self):
+ return os.path.join(self.root_output_path,
+ 'div') # the root path of generated files
+
+ def getTestCases(self):
+ '''
+ this returns a a hash containg test cases.
+ key of return hash is test case name and
+ value of return hash is test is a list of input tensor metadata.
+ test name (key of hash) is used as
+ - prefix of file name to be generated (don't use white space or special characters)
+ - output node name pf graph
+ '''
+ # yapf: disable
+ return {
+ "div_scalarConst_scalarConst":
+ [base.Tensor([], const_val=1.2),
+ base.Tensor([], const_val=-2.3)],
+ "div_1d_1d": [base.Tensor([5]), base.Tensor([5])],
+ "div_2d_2d": [base.Tensor([5, 3]), base.Tensor([5, 3])],
+ "div_3d_3d": [base.Tensor([5, 4, 3]),
+ base.Tensor([5, 4, 3])],
+ "div_4d_4d": [base.Tensor([2, 5, 4, 3]),
+ base.Tensor([2, 5, 4, 3])],
+ # broadcasting by scalar
+ "div_1d_scalarConst": [base.Tensor([5]),
+ base.Tensor([], const_val=1.1)],
+ "div_2d_scalarConst": [base.Tensor([5, 3]),
+ base.Tensor([], const_val=1.1)],
+ "div_3d_scalarConst": [base.Tensor([5, 4, 3]),
+ base.Tensor([], const_val=1.1)],
+ "div_4d_scalarConst": [base.Tensor([2, 5, 4, 3]),
+ base.Tensor([], const_val=1.1)],
+ # broadcasting by 1d
+ "div_2d_1d": [base.Tensor([5, 3]),
+ base.Tensor( [3])],
+ "div_3d_1d": [base.Tensor([5, 4, 3]),
+ base.Tensor( [3])],
+ "div_4d_1d": [base.Tensor([2, 5, 4, 3]),
+ base.Tensor( [3])],
+ # broadcasting by 2d
+ "div_3d_2d": [base.Tensor([5, 4, 3]),
+ base.Tensor( [4, 3])],
+ "div_4d_2d": [base.Tensor([2, 5, 4, 3]),
+ base.Tensor( [4, 3])],
+ # broadcasting by 3d
+ "div_4d_3d": [base.Tensor([2, 5, 4, 3]),
+ base.Tensor( [5, 4, 3])]
+ }
+ # yapf: enable
+
+ def buildModel(self, sess, test_case_tensor, tc_name):
+ '''
+ This method is called per test case (defined by getTestCases()).
+
+ keyword argument:
+ test_case_tensor -- test case tensor metadata
+ For example, if a test case is { "div_1d_1d": [base.Tensor([5]), base.Tensor([5])] }
+ test_case_tensor is [base.Tensor([5]), base.Tensor([5])]
+ '''
+
+ input_list = []
+
+ # ------ modify below for your model FROM here -------#
+
+ x_tensor = self.createTFInput(test_case_tensor[0], input_list)
+ y_tensor = self.createTFInput(test_case_tensor[1], input_list)
+
+ # defining output node = x_input * y_input
+ # and input list
+ output_node = tf.div(x_tensor, y_tensor, name=tc_name) # do not modify name
+
+ # ------ modify UNTIL here for your model -------#
+
+ # Note if don't have any CONST value, creating checkpoint file fails.
+ # The next lines insert such (CONST) to prevent such error.
+ # So, Graph.pb/pbtxt contains this garbage info,
+ # but this garbage info will be removed in Graph_frozen.pb/pbtxt
+ garbage = tf.get_variable(
+ "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer())
+ init_op = tf.global_variables_initializer()
+ garbage_value = [0]
+ sess.run(tf.assign(garbage, garbage_value))
+
+ sess.run(init_op)
+
+ # ------ modify appropriate return value -------#
+
+ # returning (input_node_list, output_node_list)
+ return (input_list, [output_node])
+
+
+'''
+How to run
+$ chmod +x tools/tensorflow_model_freezer/sample/name_of_this_file.py
+$ PYTHONPATH=$PYTHONPATH:./tools/tensorflow_model_freezer/ \
+ tools/tensorflow_model_freezer/sample/name_of_this_file.py \
+ ~/temp # directory where model files are saved
+'''
+# --------
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser(
+ description='Converted Tensorflow model in python to frozen model.')
+ parser.add_argument(
+ "out_dir",
+ help=
+ "directory where generated pb, pbtxt, checkpoint and Tensorboard log files are stored."
+ )
+
+ args = parser.parse_args()
+ root_output_path = args.out_dir
+
+ Gen(root_output_path).createSaveFreezeModel()
diff --git a/tools/tensorflow_model_freezer/sample/MUL_gen.py b/tools/tensorflow_model_freezer/sample/MUL_gen.py
new file mode 100755
index 000000000..f2a92547b
--- /dev/null
+++ b/tools/tensorflow_model_freezer/sample/MUL_gen.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import platform
+import tensorflow as tf
+import argparse
+
+import base_freezer as base
+import model_freezer_util as util
+
+
+class Gen(base.BaseFreezer):
+ '''
+ class to generate tflite files for MUL
+ '''
+
+ def __init__(self, path):
+ super(self.__class__, self).__init__(path)
+
+ def getOutputDirectory(self):
+ return os.path.join(self.root_output_path,
+ 'mul') # the root path of generated files
+
+ def getTestCases(self):
+ '''
+ this returns a a hash containg test cases.
+ key of return hash is test case name and
+ value of return hash is test is a list of input tensor metadata.
+ test name (key of hash) is used as
+ - prefix of file name to be generated (don't use white space or special characters)
+ - output node name pf graph
+ '''
+ return {
+ "mul_scalarConst_scalarConst":
+ [base.Tensor([], const_val=1.2),
+ base.Tensor([], const_val=-2.3)],
+ "mul_1d_1d": [base.Tensor([5]), base.Tensor([5])],
+ "mul_2d_2d": [base.Tensor([5, 3]), base.Tensor([5, 3])],
+ "mul_3d_3d": [base.Tensor([5, 4, 3]),
+ base.Tensor([5, 4, 3])],
+ "mul_2d_1d": [base.Tensor([5, 3]), base.Tensor([3])], # broadcasting
+ "mul_3d_1d": [base.Tensor([5, 4, 3]),
+ base.Tensor([3])],
+ "mul_1d_scalarConst": [base.Tensor([5]),
+ base.Tensor([], const_val=1.1)], # mul by scalar
+ "mul_2d_scalarConst": [base.Tensor([5, 3]),
+ base.Tensor([], const_val=1.1)],
+ "mul_1d_scalar": [base.Tensor([5, 3]), base.Tensor([])]
+ }
+
+ def buildModel(self, sess, test_case_tensor, tc_name):
+ '''
+ This method is called per test case (defined by getTestCases()).
+
+ keyword argument:
+ test_case_tensor -- test case tensor metadata
+ For example, if a test case is { "mul_1d_1d": [base.Tensor([5]), base.Tensor([5])] }
+ test_case_tensor is [base.Tensor([5]), base.Tensor([5])]
+ '''
+
+ input_list = []
+
+ # ------ modify below for your model FROM here -------#
+
+ x_tensor = self.createTFInput(test_case_tensor[0], input_list)
+ y_tensor = self.createTFInput(test_case_tensor[1], input_list)
+
+ # defining output node = x_input * y_input
+ # and input list
+ output_node = tf.multiply(x_tensor, y_tensor, name=tc_name) # do not modify name
+
+ # ------ modify UNTIL here for your model -------#
+
+ # Note if don't have any CONST value, creating checkpoint file fails.
+ # The next lines insert such (CONST) to prevent such error.
+ # So, Graph.pb/pbtxt contains this garbage info,
+ # but this garbage info will be removed in Graph_frozen.pb/pbtxt
+ garbage = tf.get_variable(
+ "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer())
+ init_op = tf.global_variables_initializer()
+ garbage_value = [0]
+ sess.run(tf.assign(garbage, garbage_value))
+
+ sess.run(init_op)
+
+ # ------ modify appropriate return value -------#
+
+ # returning (input_node_list, output_node_list)
+ return (input_list, [output_node])
+
+
+'''
+How to run
+$ chmod +x tools/tensorflow_model_freezer/sample/name_of_this_file.py
+$ PYTHONPATH=$PYTHONPATH:./tools/tensorflow_model_freezer/ \
+ tools/tensorflow_model_freezer/sample/name_of_this_file.py \
+ ~/temp # directory where model files are saved
+'''
+# --------
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser(
+ description='Converted Tensorflow model in python to frozen model.')
+ parser.add_argument(
+ "out_dir",
+ help=
+ "directory where generated pb, pbtxt, checkpoint and Tensorboard log files are stored."
+ )
+
+ args = parser.parse_args()
+ root_output_path = args.out_dir
+
+ Gen(root_output_path).createSaveFreezeModel()
diff --git a/tools/tensorflow_model_freezer/sample/Operation_gen.py b/tools/tensorflow_model_freezer/sample/Operation_gen.py
new file mode 100644
index 000000000..be4d74b2c
--- /dev/null
+++ b/tools/tensorflow_model_freezer/sample/Operation_gen.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import platform
+import tensorflow as tf
+import argparse
+
+import base_freezer as base
+import model_freezer_util as util
+
+
+# see MUL_gen.py for details usage and sample
+class GenFloor(base.BaseFreezer):
+ def __init__(self, path):
+ super(self.__class__, self).__init__(path)
+
+ def getOutputDirectory(self):
+ return os.path.join(self.root_output_path,
+ 'floor') # the root path of generated files
+
+ def getTestCases(self):
+ return {"floor_4d_4d": [base.Tensor([1, 2, 2, 1]), base.Tensor([1, 2, 2, 1])]}
+
+ def buildModel(self, sess, test_case_tensor, tc_name):
+
+ input_list = []
+
+ x_tensor = self.createTFInput(test_case_tensor[0], input_list)
+
+ output_node = tf.floor(x_tensor, name=tc_name)
+
+ # ------ modify UNTIL here for your model -------#
+
+ # Note if don't have any CONST value, creating checkpoint file fails.
+ # The next lines insert such (CONST) to prevent such error.
+ # So, Graph.pb/pbtxt contains this garbage info,
+ # but this garbage info will be removed in Graph_frozen.pb/pbtxt
+ garbage = tf.get_variable(
+ "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer())
+ init_op = tf.global_variables_initializer()
+ garbage_value = [0]
+ sess.run(tf.assign(garbage, garbage_value))
+
+ sess.run(init_op)
+
+ # ------ modify appropriate return value -------#
+
+ # returning (input_node_list, output_node_list)
+ return (input_list, [output_node])
+
+
+class GenPad(base.BaseFreezer):
+ def __init__(self, path):
+ super(self.__class__, self).__init__(path)
+
+ def getOutputDirectory(self):
+ return os.path.join(self.root_output_path,
+ 'pad') # the root path of generated files
+
+ def getTestCases(self):
+ return {
+ "pad_4d_2d": [
+ base.Tensor([1, 2, 2, 1]),
+ base.Tensor([4, 2], dtype=tf.int32, const_val=[0, 0, 1, 1, 1, 1, 0, 0])
+ ]
+ }
+
+ def buildModel(self, sess, test_case_tensor, tc_name):
+
+ input_list = []
+
+ input_tensor = self.createTFInput(test_case_tensor[0], input_list)
+ pad_tensor = self.createTFInput(test_case_tensor[1], input_list)
+
+ output_node = tf.pad(input_tensor, pad_tensor, name=tc_name)
+
+ # ------ modify UNTIL here for your model -------#
+
+ # Note if don't have any CONST value, creating checkpoint file fails.
+ # The next lines insert such (CONST) to prevent such error.
+ # So, Graph.pb/pbtxt contains this garbage info,
+ # but this garbage info will be removed in Graph_frozen.pb/pbtxt
+ garbage = tf.get_variable(
+ "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer())
+ init_op = tf.global_variables_initializer()
+ garbage_value = [0]
+ sess.run(tf.assign(garbage, garbage_value))
+
+ sess.run(init_op)
+
+ # ------ modify appropriate return value -------#
+
+ # returning (input_node_list, output_node_list)
+ return (input_list, [output_node])
+
+
+class GenSqueeze(base.BaseFreezer):
+ def __init__(self, path):
+ super(self.__class__, self).__init__(path)
+
+ def getOutputDirectory(self):
+ return os.path.join(self.root_output_path,
+ 'squeeze') # the root path of generated files
+
+ def getTestCases(self):
+ return {"squeeze_3d": [base.Tensor([1, 5, 1])]}
+
+ def buildModel(self, sess, test_case_tensor, tc_name):
+
+ input_list = []
+
+ input_tensor = self.createTFInput(test_case_tensor[0], input_list)
+
+ output_node = tf.squeeze(input_tensor, [2], name=tc_name)
+
+ # ------ modify UNTIL here for your model -------#
+
+ # Note if don't have any CONST value, creating checkpoint file fails.
+ # The next lines insert such (CONST) to prevent such error.
+ # So, Graph.pb/pbtxt contains this garbage info,
+ # but this garbage info will be removed in Graph_frozen.pb/pbtxt
+ garbage = tf.get_variable(
+ "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer())
+ init_op = tf.global_variables_initializer()
+ garbage_value = [0]
+ sess.run(tf.assign(garbage, garbage_value))
+
+ sess.run(init_op)
+
+ # ------ modify appropriate return value -------#
+
+ # returning (input_node_list, output_node_list)
+ return (input_list, [output_node])
+
+
+class GenTranspose(base.BaseFreezer):
+ def __init__(self, path):
+ super(self.__class__, self).__init__(path)
+
+ def getOutputDirectory(self):
+ return os.path.join(self.root_output_path,
+ 'transpose') # the root path of generated files
+
+ def getTestCases(self):
+ return {"transpose_4d": [base.Tensor([1, 2, 2, 1])]}
+
+ def buildModel(self, sess, test_case_tensor, tc_name):
+
+ input_list = []
+
+ input_tensor = self.createTFInput(test_case_tensor[0], input_list)
+
+ output_node = tf.transpose(input_tensor, [0, 2, 1, 3], name=tc_name)
+
+ # ------ modify UNTIL here for your model -------#
+
+ # Note if don't have any CONST value, creating checkpoint file fails.
+ # The next lines insert such (CONST) to prevent such error.
+ # So, Graph.pb/pbtxt contains this garbage info,
+ # but this garbage info will be removed in Graph_frozen.pb/pbtxt
+ garbage = tf.get_variable(
+ "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer())
+ init_op = tf.global_variables_initializer()
+ garbage_value = [0]
+ sess.run(tf.assign(garbage, garbage_value))
+
+ sess.run(init_op)
+
+ # ------ modify appropriate return value -------#
+
+ # returning (input_node_list, output_node_list)
+ return (input_list, [output_node])
+
+
+'''
+How to run
+$ chmod +x tools/tensorflow_model_freezer/sample/name_of_this_file.py
+$ PYTHONPATH=$PYTHONPATH:./tools/tensorflow_model_freezer/ \
+ tools/tensorflow_model_freezer/sample/name_of_this_file.py \
+ ~/temp # directory where model files are saved
+'''
+# --------
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser(
+ description='Converted Tensorflow model in python to frozen model.')
+ parser.add_argument(
+ "out_dir",
+ help=
+ "directory where generated pb, pbtxt, checkpoint and Tensorboard log files are stored."
+ )
+
+ args = parser.parse_args()
+ root_output_path = args.out_dir
+
+ GenFloor(root_output_path).createSaveFreezeModel()
+ GenPad(root_output_path).createSaveFreezeModel()
+ GenSqueeze(root_output_path).createSaveFreezeModel()
+ GenTranspose(root_output_path).createSaveFreezeModel()
diff --git a/tools/tensorflow_model_freezer/sample/SQUEEZE_gen.py b/tools/tensorflow_model_freezer/sample/SQUEEZE_gen.py
new file mode 100755
index 000000000..88b3dfcb2
--- /dev/null
+++ b/tools/tensorflow_model_freezer/sample/SQUEEZE_gen.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import platform
+import tensorflow as tf
+import argparse
+
+import base_freezer as base
+import model_freezer_util as util
+
+
+class Gen(base.BaseFreezer):
+ '''
+ class to generate tflite files for Squeeze
+ '''
+
+ def __init__(self, path):
+ super(self.__class__, self).__init__(path)
+
+ def getOutputDirectory(self):
+ return os.path.join(self.root_output_path,
+ 'squeeze') # the root path of generated files
+
+ def getTestCases(self):
+ '''
+ this returns a a hash containg test cases.
+ key of return hash is test case name and
+ value of return hash is test is a list of input tensor metadata.
+ test name (key of hash) is used as
+ - prefix of file name to be generated (don't use white space or special characters)
+ - output node name pf graph
+ '''
+ return {
+ "squeeze_2d": [base.Tensor([1, 3])],
+ "squeeze_4d_1": [base.Tensor([1, 3, 2, 1])],
+ # squeeze with axis
+ "squeeze_4d_2": [base.Tensor([1, 3, 2, 1]),
+ [0]], # squeeze [1, 3, 2, 1] to [3, 2, 1]
+ "squeeze_4d_3": [base.Tensor([1, 3, 2, 1]),
+ [3]], # squeeze [1, 3, 2, 1] to [1, 3, 2]
+ "squeeze_4d_4": [base.Tensor([1, 3, 2, 1]),
+ [0, 3]] # squeeze [1, 3, 2, 1] to [3, 2]
+ }
+
+ def buildModel(self, sess, test_case_tensor, tc_name):
+ '''
+ This method is called per test case (defined by getTestCases()).
+
+ keyword argument:
+ test_case_tensor -- test case tensor metadata
+ For example, if a test case is { "mul_1d_1d": [base.Tensor([5]), base.Tensor([5])] }
+ test_case_tensor is [base.Tensor([5]), base.Tensor([5])]
+ '''
+
+ input_list = []
+
+ # ------ modify below for your model FROM here -------#
+
+ x_tensor = self.createTFInput(test_case_tensor[0], input_list)
+ if len(test_case_tensor) == 2:
+ axis_tensor = test_case_tensor[1]
+
+ # defining output node = x_input * y_input
+ # and input list
+ if len(test_case_tensor) == 1:
+ output_node = tf.squeeze(input=x_tensor, name=tc_name) # do not modify name
+ else:
+ output_node = tf.squeeze(
+ input=x_tensor, axis=axis_tensor, name=tc_name) # do not modify name
+
+ # ------ modify UNTIL here for your model -------#
+
+ # Note if don't have any CONST value, creating checkpoint file fails.
+ # The next lines insert such (CONST) to prevent such error.
+ # So, Graph.pb/pbtxt contains this garbage info,
+ # but this garbage info will be removed in Graph_frozen.pb/pbtxt
+ garbage = tf.get_variable(
+ "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer())
+ init_op = tf.global_variables_initializer()
+ garbage_value = [0]
+ sess.run(tf.assign(garbage, garbage_value))
+
+ sess.run(init_op)
+
+ # ------ modify appropriate return value -------#
+
+ # returning (input_node_list, output_node_list)
+ return (input_list, [output_node])
+
+
+'''
+How to run
+$ chmod +x tools/tensorflow_model_freezer/sample/name_of_this_file.py
+$ PYTHONPATH=$PYTHONPATH:./tools/tensorflow_model_freezer/ \
+ tools/tensorflow_model_freezer/sample/name_of_this_file.py \
+ ~/temp # directory where model files are saved
+'''
+# --------
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser(
+ description='Converted Tensorflow model in python to frozen model.')
+ parser.add_argument(
+ "out_dir",
+ help=
+ "directory where generated pb, pbtxt, checkpoint and Tensorboard log files are stored."
+ )
+
+ args = parser.parse_args()
+ root_output_path = args.out_dir
+
+ Gen(root_output_path).createSaveFreezeModel()
diff --git a/tools/tensorflow_model_freezer/sample/TOPK_gen.py b/tools/tensorflow_model_freezer/sample/TOPK_gen.py
new file mode 100755
index 000000000..0c16d5b75
--- /dev/null
+++ b/tools/tensorflow_model_freezer/sample/TOPK_gen.py
@@ -0,0 +1,119 @@
+#!/usr/bin/python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import platform
+import tensorflow as tf
+import argparse
+
+import base_freezer as base
+import model_freezer_util as util
+
+
+class Gen(base.BaseFreezer):
+ '''
+ class to generate tflite file for TOPK
+ '''
+
+ def __init__(self, path):
+ super(self.__class__, self).__init__(path)
+
+ def getOutputDirectory(self):
+ return os.path.join(self.root_output_path,
+ 'topk') # the root path of generated files
+
+ def getTestCases(self):
+ '''
+ this returns a hash of test case (= set of input type), for example:
+ [1.2, -2.3] : two input, both are scalar. one is 1.2, another is -2.3
+ [[5,3], [5,4,3]] : two input, both are shapes. one is [5.3], another is [5,4,3]
+
+ test name (key of hash) is used as
+ - prefix of file name to be generated
+ - output node name pf graph
+ '''
+ return {
+ "topk_2d": [
+ base.Tensor(shape=[2, 3], dtype=tf.float32),
+ base.Tensor(shape=[], const_val=2, dtype=tf.int32)
+ ],
+ "topk_3d": [
+ base.Tensor(shape=[2, 3, 4], dtype=tf.float32),
+ base.Tensor(shape=[], const_val=2, dtype=tf.int32)
+ ],
+ }
+
+ def buildModel(self, sess, test_case_tensor, tc_name):
+ '''
+ please, refer to the comment in MUL_gen.py to see how to rewrite this method
+ '''
+
+ input_list = []
+
+ # ------ modify below for your model FROM here -------#
+
+ x_tensor = self.createTFInput(test_case_tensor[0], input_list)
+ y_tensor = self.createTFInput(test_case_tensor[1], input_list)
+
+ # defining output node and input list
+ output_node = tf.nn.top_k(
+ x_tensor,
+ y_tensor, # add your input here
+ name=tc_name) # do not modify name
+
+ # ------ modify UNTIL here for your model -------#
+
+ # Note if don't have any CONST value, creating checkpoint file fails.
+ # The next lines insert such (CONST) to prevent such error.
+ # So, Graph.pb/pbtxt contains this garbage info,
+ # but this garbage info will be removed in Graph_frozen.pb/pbtxt
+ garbage = tf.get_variable(
+ "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer())
+ init_op = tf.global_variables_initializer()
+ garbage_value = [0]
+ sess.run(tf.assign(garbage, garbage_value))
+
+ sess.run(init_op)
+
+ # ------ modify appropriate return value -------#
+
+ # returning (input_node_list, output_node_list)
+ return (input_list, [output_node])
+
+
+'''
+How to run
+$ chmod +x tools/tensorflow_model_freezer/sample/name_of_this_file.py
+$ PYTHONPATH=$PYTHONPATH:./tools/tensorflow_model_freezer/ \
+ tools/tensorflow_model_freezer/sample/name_of_this_file.py \
+ ~/temp # directory where model files are saved
+'''
+# --------
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser(
+ description='Converted Tensorflow model in python to frozen model.')
+ parser.add_argument(
+ "out_dir",
+ help=
+ "directory where generated pb, pbtxt, checkpoint and Tensorboard log files are stored."
+ )
+
+ args = parser.parse_args()
+ root_output_path = args.out_dir
+
+ Gen(root_output_path).createSaveFreezeModel()
diff --git a/tools/tensorflow_model_freezer/sample/__init__.py b/tools/tensorflow_model_freezer/sample/__init__.py
new file mode 100644
index 000000000..89d760b4a
--- /dev/null
+++ b/tools/tensorflow_model_freezer/sample/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# indicating that this folder is python package
diff --git a/tools/test_driver/README.md b/tools/test_driver/README.md
new file mode 100644
index 000000000..2981267a5
--- /dev/null
+++ b/tools/test_driver/README.md
@@ -0,0 +1,63 @@
+# How the test driver works
+
+## Unittest
+- There are two kinds of unittest:
+ - Kernel ACL
+ - Runtime
+- Related file : `run_unittest.sh`
+- Usage :
+```
+$ ./tools/test_driver/test_driver.sh \
+ --artifactpath=. \
+ --unittest
+```
+- The `run_unittest.sh` usage :
+
+```
+$ LD_LIBRARY_PATH=Product/out/lib \
+ ./tools/test_driver/run_unittest.sh \
+ --reportdir=report \
+ --unittestdir=Product/out/unittest
+```
+
+### Kernel ACL Unittest
+- Test whether the various operations are performed successfully and whether the output and the expected value are the same.
+- TC location : `libs/kernel/acl/src/`
+
+### Runtime Unittest
+- Test whether the expected value and the actual output value are the same when the model is configured, compiled and executed.
+- TC location : `runtimes/tests/neural_networks_test/`
+
+## Framework test
+- Execute the **tflite model** using the given **driver**.
+- There is a TC directory for each model, and a `config.sh` file exists in each TC directory.
+- When `run_test.sh`, refer to the **tflite model** information in `config.sh`, download the file, and run the **tflite model** with the given **driver**.
+- Related files : `run_test.sh` and `run_frameworktest.sh`
+- TC location :
+ - `tests/framework/tests/` : Config directory for TC
+ - `tests/framework/cache/` : TC (Downloaded tflite model files)
+
+### Run tflite_run with various tflite models
+- Driver : `tflite_run`
+- Driver source location : `tools/tflite_run/`
+- Usage :
+```
+$ ./tools/test_driver/test_driver.sh \
+ --artifactpath=. \
+ --frameworktest
+```
+- Related pages : [tflite_run](https://github.sec.samsung.net/STAR/nnfw/tree/master/tools/tflite_run)
+
+### Run nnapi_test with various tflite models
+- `nnapi_test` runs tflite in two ways and compares the result:
+ 1. tflite interpreter
+ 2. `libneuralnetworks.so`, which could be PureACL or NNAPI depending on `--ldlibrarypath`(`LD_LIBRARY_PATH`)
+- Driver : `nnapi_test`
+- Driver source location : `tools/nnapi_test/`
+- Usage :
+```
+$ ./tools/test_driver/test_driver.sh \
+ --artifactpath=. \
+ --verification .
+```
+
diff --git a/tools/test_driver/benchmark_op_list.txt b/tools/test_driver/benchmark_op_list.txt
new file mode 100644
index 000000000..166ddb7d7
--- /dev/null
+++ b/tools/test_driver/benchmark_op_list.txt
@@ -0,0 +1,11 @@
+add/4D
+average_pool_2d/avgpool1
+average_pool_2d/avgpool1
+concat/concat1
+conv_2d/convolution1
+conv_2d/convolution2
+div/broadcast
+max_pool_2d/maxpool1
+max_pool_2d/maxpool2
+resize_bilinear
+softmax
diff --git a/tools/test_driver/common.sh b/tools/test_driver/common.sh
new file mode 100755
index 000000000..12a35fd35
--- /dev/null
+++ b/tools/test_driver/common.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+MY_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+function switch_nnfw_kernel_env()
+{
+ local switch=$1 # "ON" or "OFF"
+ local mode=$2 # "acl" or "neon" or ""
+
+ # TODO: Handle whether there is nnfw_kernel_env_list.txt or not
+ local NNFW_KERNEL_ENV_FILE=$MY_PATH/nnfw_kernel_env_list.txt
+
+ for ENV in $(cat $NNFW_KERNEL_ENV_FILE); do
+ if [[ "$switch" == "ON" ]]; then
+ export "$ENV=$mode"
+ else
+ unset "$ENV"
+ fi
+ done
+}
diff --git a/tools/test_driver/neurun_frameworktest_list.txt b/tools/test_driver/neurun_frameworktest_list.txt
new file mode 100644
index 000000000..b97558b0c
--- /dev/null
+++ b/tools/test_driver/neurun_frameworktest_list.txt
@@ -0,0 +1,10 @@
+average_pool_2d/avgpool1
+average_pool_2d/avgpool1
+conv_2d/convolution1
+conv_2d/convolution2
+max_pool_2d/maxpool1
+max_pool_2d/maxpool2
+softmax
+reshape/reshape1
+MODELS/inception_nonslim
+MODELS/inception_slim
diff --git a/tools/test_driver/print_to_json.sh b/tools/test_driver/print_to_json.sh
index 857fa0423..a5ea5689b 100755
--- a/tools/test_driver/print_to_json.sh
+++ b/tools/test_driver/print_to_json.sh
@@ -14,9 +14,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+JSON_BENCHMARK_REPORT_DIR= # $ARTIFACT/report/benchmark
+JSON_PRINT_TO_DIR= # $ARTIFACT/report
+JSON_RESULT_JSON= # $ARTIFACT/report/benchmark_result.json or benchmark_op_result.json
+JSON_MODELS_FILE_DIR= # $ARTIFACT/report/benchmark
+
function echo_to_file
{
- echo -e "$1" >> $RESULT_JSON
+ echo -e "$1" >> $JSON_RESULT_JSON
}
function print_comma() # ,
@@ -96,7 +101,7 @@ function print_test()
function print_tests()
{
local MODEL=$1
- local REPORT_MODEL_DIR=$ARTIFACT_PATH/report/benchmark/$MODEL
+ local REPORT_MODEL_DIR=$JSON_BENCHMARK_REPORT_DIR/$MODEL
local TEST_RESULTS=$(find $REPORT_MODEL_DIR -name "*.result" -exec basename {} \;)
local TEST_NUM=$(find $REPORT_MODEL_DIR -name "*.result" | wc -l)
@@ -118,19 +123,28 @@ function print_tests()
function print_groups()
{
- local MODEL_NUM=$(cat $MODELS_FILE | wc -l)
- local MODELS=$(cat $MODELS_FILE | awk '{print $1}' | uniq)
+ local TOTAL_MODEL_NUM=0
+ local TOTAL_MODELS=
+
+ for MODELS_FILE in $(find $JSON_MODELS_FILE_DIR -name "benchmark*_models.txt"); do
+ # In $MODELS_FILE, there are only unique(not duplicated) model names.
+ local MODEL_NUM=$(cat $MODELS_FILE | wc -l)
+ TOTAL_MODEL_NUM=$((TOTAL_MODEL_NUM+MODEL_NUM))
+ for MODELS in $(cat $MODELS_FILE); do
+ TOTAL_MODELS+="$MODELS "
+ done
+ done
print_bracket_start "groups"
local i=0
- for MODEL in $MODELS; do
+ for MODEL in $TOTAL_MODELS; do
print_brace_start
print_key_value "name" " $MODEL"
print_comma
print_tests $MODEL
print_brace_end
- if [[ $i -ne $MODEL_NUM-1 ]]; then
+ if [[ $i -ne $TOTAL_MODEL_NUM-1 ]]; then
print_comma
fi
i=$((i+1))
@@ -141,10 +155,13 @@ function print_groups()
function print_to_json()
{
- RESULT_JSON=$ARTIFACT_PATH/report/benchmark_result.json
- rm -f $RESULT_JSON
+ JSON_BENCHMARK_REPORT_DIR=$1
+ JSON_PRINT_TO_DIR=$2
+ JSON_PRINT_TO_FILENAME=$3
- MODELS_FILE=$ARTIFACT_PATH/report/benchmark/benchmark_models.txt
+ JSON_RESULT_JSON=$JSON_PRINT_TO_DIR/$JSON_PRINT_TO_FILENAME
+ rm -f $JSON_RESULT_JSON
+ JSON_MODELS_FILE_DIR=$JSON_BENCHMARK_REPORT_DIR
print_brace_start
print_groups
diff --git a/tools/test_driver/py/common.py b/tools/test_driver/py/common.py
new file mode 100755
index 000000000..37b40e1ce
--- /dev/null
+++ b/tools/test_driver/py/common.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import os.path
+
+mypath = os.path.abspath(os.path.dirname(__file__))
+
+
+def switch_nnfw_kernel_env(mode):
+ # mode : "acl" or "neon" or ""
+
+ # TODO: Handle whether there is nnfw_kernel_env_list.txt or not
+ # FIXME: Now nnfw_kernel_env_list.txt is parent dir of current dir
+ filename = "nnfw_kernel_env_list.txt"
+ envfilename = mypath + "/../{filename}".format(filename=filename)
+
+ with open(envfilename) as envfile:
+ for env in envfile:
+ env = env[:-1] # env has new line at the end
+ os.environ[env] = mode
+
+
+if __name__ == "__main__":
+ # for test
+ switch_nnfw_kernel_env("acl")
+ switch_nnfw_kernel_env("neon")
+ switch_nnfw_kernel_env("")
diff --git a/tools/test_driver/py/run_frameworktest.py b/tools/test_driver/py/run_frameworktest.py
new file mode 100755
index 000000000..a4fbd075a
--- /dev/null
+++ b/tools/test_driver/py/run_frameworktest.py
@@ -0,0 +1,199 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import argparse
+
+
+def get_parsed_options():
+ parser = argparse.ArgumentParser(
+ prog='run_frameworktest.py', usage='%(prog)s [options]')
+
+ parser.add_argument(
+ "--runtestsh",
+ action="store",
+ type=str,
+ dest="fwtest_runtestsh",
+ required=True,
+ help="(Usually : tests/framework/run_test.sh) run test shell for framework test")
+
+ parser.add_argument(
+ "--driverbin",
+ action="store",
+ type=str,
+ dest="fwtest_driverbin",
+ required=True,
+ help="(Usually in Product/out/bin/) driver bin for framework test")
+
+ parser.add_argument(
+ "--tapname",
+ action="store",
+ type=str,
+ dest="fwtest_tapname",
+ help="tap name for framework test")
+
+ parser.add_argument(
+ "--logname",
+ action="store",
+ type=str,
+ dest="fwtest_logname",
+ help="log name for framework test")
+
+ parser.add_argument(
+ "--testname",
+ action="store",
+ type=str,
+ dest="fwtest_testname",
+ help="test name of framework test")
+
+ parser.add_argument(
+ "--frameworktest_list_file",
+ action="store",
+ type=str,
+ dest="frameworktest_list_file",
+ help="list of files to run framework test")
+
+ parser.add_argument(
+ "--reportdir",
+ action="store",
+ type=str,
+ dest="fwtest_reportdir",
+ default="report",
+ help="(default=report) directory that each test result will be stored")
+
+ parser.add_argument(
+ "--ldlibrarypath",
+ action="store",
+ type=str,
+ dest="ldlibrarypath",
+ help=
+ "(usually : ARTIFACT_PATH/Product/out/lib) path that you want to include libraries"
+ )
+
+ options = parser.parse_args()
+ return options
+
+
+# Check each parameters if they are valid or not
+def check_params(fwtest_runtestsh, fwtest_driverbin, fwtest_reportdir, fwtest_tapname,
+ fwtest_logname, fwtest_testname, frameworktest_list_file,
+ ldlibrary_path):
+ if fwtest_runtestsh == "" or fwtest_runtestsh == None:
+ print("Fail : runtestsh is not given")
+ print("(Usually runtestsh for framework test is tests/framework/run_test.sh)")
+ sys.exit(1)
+
+ if os.path.isfile(fwtest_runtestsh) == False:
+ print("Fail : runtestsh is not valid")
+ sys.exit(1)
+
+ if fwtest_driverbin == "" or fwtest_driverbin == None:
+ print("Fail : driverbin is not given")
+ print("(Usually driverbin for framework test is in Product/out/bin/)")
+ sys.exit(1)
+
+ if os.path.isfile(fwtest_driverbin) == False:
+ print("Fail : driverbin is not valid")
+ sys.exit(1)
+
+ if fwtest_testname == "" or fwtest_testname == None:
+ print("Fail : testname is not given")
+ sys.exit(1)
+
+ if fwtest_tapname == "" or fwtest_tapname == None:
+ print("Fail : tapname is not given")
+ sys.exit(1)
+
+ if fwtest_logname == "" or fwtest_logname == None:
+ print("Fail : logname is not given")
+ sys.exit(1)
+
+ if fwtest_reportdir == "" or fwtest_reportdir == None:
+ print("Fail : report directory is not given")
+ sys.exit(1)
+
+ if type(ldlibrary_path) is str and ldlibrary_path != "":
+ os.environ["LD_LIBRARY_PATH"] = ldlibrary_path
+
+
+# Just call this function when running framework test in test_driver.py
+def run_frameworktest(fwtest_runtestsh, fwtest_driverbin, fwtest_reportdir,
+ fwtest_tapname, fwtest_logname, fwtest_testname,
+ frameworktest_list_file, ldlibrary_path):
+
+ # Handling exceptions for parameters
+ check_params(fwtest_runtestsh, fwtest_driverbin, fwtest_reportdir, fwtest_tapname,
+ fwtest_logname, fwtest_testname, frameworktest_list_file, ldlibrary_path)
+
+ os.makedirs(fwtest_reportdir, exist_ok=True)
+
+ print("")
+ print("============================================")
+ print("{fwtest_testname} with {fwtest_driverbin_name} ...".format(
+ fwtest_testname=fwtest_testname,
+ fwtest_driverbin_name=fwtest_driverbin[fwtest_driverbin.rfind('/') + 1:]))
+
+ # Run framework test using models in model_list
+ model_list = ""
+ if frameworktest_list_file != None and frameworktest_list_file != "":
+ fwtest_list_file = open(frameworktest_list_file, "r")
+ for line in fwtest_list_file:
+ model_list += (line[:-1] + " ")
+ fwtest_list_file.close()
+
+ # If model_list is empty, all possible models will be found automatically by fwtest_runtestsh
+ cmd = "{fwtest_runtestsh} --driverbin={fwtest_driverbin} \
+ --reportdir={fwtest_reportdir} \
+ --tapname={fwtest_tapname} \
+ {model_list} \
+ > {fwtest_reportdir}/{fwtest_logname} 2>&1".format(
+ fwtest_runtestsh=fwtest_runtestsh,
+ fwtest_driverbin=fwtest_driverbin,
+ fwtest_reportdir=fwtest_reportdir,
+ fwtest_tapname=fwtest_tapname,
+ model_list=model_list,
+ fwtest_logname=fwtest_logname)
+ fwtest_result = os.system(cmd)
+
+ print("")
+ tap_file_path = "{fwtest_reportdir}/{fwtest_tapname}".format(
+ fwtest_reportdir=fwtest_reportdir, fwtest_tapname=fwtest_tapname)
+ tap_file = open(tap_file_path, "r")
+ tap_data = tap_file.read()
+ print(tap_data)
+ tap_file.close()
+
+ if fwtest_result != 0:
+ print("")
+ print("{fwtest_testname} failed... exit code: {fwtest_result}".format(
+ fwtest_testname=fwtest_testname, fwtest_result=fwtest_result))
+ print("============================================")
+ print("")
+ sys.exit(1)
+
+ print("============================================")
+ print("")
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+ options = get_parsed_options()
+ sys.exit(
+ run_frameworktest(options.fwtest_runtestsh, options.fwtest_driverbin,
+ options.fwtest_reportdir, options.fwtest_tapname,
+ options.fwtest_logname, options.fwtest_testname,
+ options.frameworktest_list_file, options.ldlibrarypath))
diff --git a/tools/test_driver/py/run_unittest.py b/tools/test_driver/py/run_unittest.py
new file mode 100755
index 000000000..2e2ad3434
--- /dev/null
+++ b/tools/test_driver/py/run_unittest.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import argparse
+import subprocess
+
+
+def get_parsed_options():
+ parser = argparse.ArgumentParser(prog='run_unittest.py', usage='%(prog)s [options]')
+
+ parser.add_argument(
+ "--reportdir",
+ action="store",
+ type=str,
+ dest="reportdir",
+ default="report",
+ help="(default=report) directory that each test result will be stored")
+
+ parser.add_argument(
+ "--unittestdir",
+ action="store",
+ type=str,
+ dest="unittestdir",
+ required=True,
+ help="directory that unittests are included")
+
+ parser.add_argument(
+ "--ldlibrarypath",
+ action="store",
+ type=str,
+ dest="ldlibrarypath",
+ help=
+ "(usually : ARTIFACT_PATH/Product/out/lib) path that you want to include libraries"
+ )
+
+ parser.add_argument(
+ "--runall",
+ action="store_true",
+ dest="runall",
+ default=False,
+ help="run all unittest and ignore skiplist")
+
+ options = parser.parse_args()
+ return options
+
+
+def get_gtest_option(report_dir, test_bin, unittest_dir=None):
+ # Set path to save test result
+ output_option = "--gtest_output=xml:{report_dir}/{test_bin}.xml".format(
+ report_dir=report_dir, test_bin=test_bin)
+
+ # Set filter to run only one unit test, for runall unittest
+ if '.' in test_bin:
+ return output_option + " " + "--gtest_filter={test_list_item}".format(
+ test_list_item=test_bin)
+
+ # Set filter not to run *.skip unit tests
+ filter_option = ""
+ skiplist_path = "{unittest_dir}/{test_bin}.skip".format(
+ unittest_dir=unittest_dir, test_bin=test_bin)
+ if os.path.exists(skiplist_path):
+ filter_option = "--gtest_filter=-"
+ skiplist_file = open(skiplist_path, "r")
+ filter_option = filter_option + ':'.join(line[:-1] for line in skiplist_file
+ if line[0] != '#')
+ skiplist_file.close()
+
+ return output_option + " " + filter_option
+
+
+def get_test_list_items(unittest_dir, test_bin):
+ cmd_output = subprocess.check_output(
+ "{unittestdir}/{testbin} --gtest_list_tests".format(
+ unittestdir=unittest_dir, testbin=test_bin),
+ shell=True)
+ all_test_list = str(cmd_output).replace('\\n', ' ').split()
+ all_test_list[0] = all_test_list[0][2:]
+
+ category = ""
+ item = ""
+ test_list_items = []
+ for verbose_line in all_test_list:
+ if verbose_line[-1] == '.':
+ category = verbose_line
+ else:
+ item = "{category}{verbose_line}".format(
+ category=category, verbose_line=verbose_line)
+ test_list_items.append(item)
+
+ return test_list_items
+
+
+# Just call this function when running unit test in test_driver.py
+def run_unittest(unittest_dir, report_dir, ldlibrary_path, runall):
+ if unittest_dir == "" or unittest_dir == None:
+ print("Fail : unittestdir is not given")
+ print("(Usually unit test directory is Product/out/unittest)")
+ sys.exit(1)
+
+ if report_dir == "" or report_dir == None:
+ print("Info : 'report' folder of current path will be used as report directory")
+ report_dir = "report"
+
+ if type(ldlibrary_path) is str and ldlibrary_path != "":
+ os.environ["LD_LIBRARY_PATH"] = ldlibrary_path
+
+ print("")
+ print("============================================")
+ print("Unittest start")
+ print("============================================")
+
+ # Run all unit tests in unittest_dir
+ unittest_result = 0
+ all_test_bin = (t for t in os.listdir(unittest_dir)
+ if len(t) < 5 or t[-5:] != ".skip")
+
+ for idx, test_bin in enumerate(all_test_bin):
+ num_unittest = idx + 1
+ print("============================================")
+ print("Starting set {num_unittest}: {test_bin}...".format(
+ num_unittest=num_unittest, test_bin=test_bin))
+ print("============================================")
+
+ ret = 0
+
+ # Run all unit tests ignoring skip list
+ if runall:
+ test_list_items = get_test_list_items(unittest_dir, test_bin)
+ for test_item in test_list_items:
+ cmd = "{unittest_dir}/{test_bin} {gtest_option}".format(
+ unittest_dir=unittest_dir,
+ test_bin=test_bin,
+ gtest_option=get_gtest_option(report_dir, test_item))
+ os.system(cmd)
+ # Run all unit tests except skip list
+ else:
+ cmd = "{unittest_dir}/{test_bin} {gtest_option}".format(
+ unittest_dir=unittest_dir,
+ test_bin=test_bin,
+ gtest_option=get_gtest_option(report_dir, test_bin, unittest_dir))
+ ret = os.system(cmd)
+
+ if ret != 0:
+ unittest_result = ret
+ print("{test_bin} failed... return code: {unittest_result}".format(
+ test_bin=test_bin, unittest_result=unittest_result))
+
+ print("============================================")
+ print("Finishing set {num_unittest}: {test_bin}...".format(
+ num_unittest=num_unittest, test_bin=test_bin))
+ print("============================================")
+
+ if unittest_result != 0:
+ print("============================================")
+ print("Failed unit test... exit code: {unittest_result}".format(
+ unittest_result=unittest_result))
+ print("============================================")
+ sys.exit(1)
+
+ print("============================================")
+ print("Completed total {num_unittest} set of unittest".format(
+ num_unittest=num_unittest))
+ print("Unittest end")
+ print("============================================")
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+ options = get_parsed_options()
+ sys.exit(
+ run_unittest(options.unittestdir, options.reportdir, options.ldlibrarypath,
+ options.runall))
diff --git a/tools/test_driver/py/test_driver.py b/tools/test_driver/py/test_driver.py
new file mode 100755
index 000000000..9ed97d202
--- /dev/null
+++ b/tools/test_driver/py/test_driver.py
@@ -0,0 +1,398 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import shutil
+import argparse
+import common
+import subprocess
+import sys
+
+mypath = os.path.abspath(os.path.dirname(__file__))
+
+
+def get_parsed_options():
+ parser = argparse.ArgumentParser(prog='test_driver.py', usage='%(prog)s [options]')
+
+ # artifactpath
+ parser.add_argument(
+ "--artifactpath",
+ action="store",
+ type=str,
+ dest="artifactpath",
+ default=".",
+ help="(should be passed) path that has tests/ and Product/")
+
+ # test
+ parser.add_argument(
+ "--unittest",
+ action="store_true",
+ dest="unittest_on",
+ default=False,
+ help="(default=on) run unit test")
+ parser.add_argument(
+ "--unittestall",
+ action="store_true",
+ dest="unittestall_on",
+ default=False,
+ help="((default=off) run all unit test without skip, overrite --unittest option")
+ parser.add_argument(
+ "--verification",
+ action="store_true",
+ dest="verification_on",
+ default=False,
+ help="(default=on) run verification")
+ parser.add_argument(
+ "--frameworktest",
+ action="store_true",
+ dest="frameworktest_on",
+ default=False,
+ help="(default=off)run framework test")
+
+ # benchmark
+ parser.add_argument(
+ "--benchmark",
+ action="store_true",
+ dest="benchmark_on",
+ default=False,
+ help="(default=off) run benchmark")
+ parser.add_argument(
+ "--benchmark_acl",
+ action="store_true",
+ dest="benchmarkacl_on",
+ default=False,
+ help="(default=off) run benchmark-acl")
+ parser.add_argument(
+ "--benchmark_op",
+ action="store_true",
+ dest="benchmarkop_on",
+ default=False,
+ help="(default=off) run benchmark per operation")
+
+ # profile
+ parser.add_argument(
+ "--profile",
+ action="store_true",
+ dest="profile_on",
+ default=False,
+ help="(default=off) run profiling")
+
+ # driverbin
+ parser.add_argument(
+ "--framework_driverbin",
+ action="store",
+ type=str,
+ dest="framework_driverbin",
+ help=
+ "(default=../../Product/out/bin/tflite_run) runner for runnning framework tests")
+ parser.add_argument(
+ "--verification_driverbin",
+ action="store",
+ type=str,
+ dest="verification_driverbin",
+ help=
+ "(default=../../Product/out/bin/nnapi_test) runner for runnning verification tests"
+ )
+ parser.add_argument(
+ "--benchmark_driverbin",
+ action="store",
+ type=str,
+ dest="benchmark_driverbin",
+ help=
+ "(default=../../Product/out/bin/tflite_benchmark) runner for runnning benchmark")
+
+ # etc.
+ parser.add_argument(
+ "--runtestsh",
+ action="store",
+ type=str,
+ dest="runtestsh",
+ help=
+ "(default=ARTIFACT_PATH/tests/framework/run_test.sh) run_test.sh with path where it is for framework test and verification"
+ )
+ parser.add_argument(
+ "--unittestdir",
+ action="store",
+ type=str,
+ dest="unittestdir",
+ help=
+ "(default=ARTIFACT_PATH/Product/out/unittest) directory that has unittest binaries for unit test"
+ )
+ parser.add_argument(
+ "--ldlibrarypath",
+ action="store",
+ type=str,
+ dest="ldlibrarypath",
+ help=
+ "(default=ARTIFACT_PATH/Product/out/lib) path that you want to include libraries")
+ parser.add_argument(
+ "--frameworktest_list_file",
+ action="store",
+ type=str,
+ dest="frameworktest_list_file",
+ help=
+ "(default=ARTIFACT_PATH/tools/test_driver/pureacl_frameworktest_list.txt) filepath of model list for test"
+ )
+ parser.add_argument(
+ "--reportdir",
+ action="store",
+ type=str,
+ dest="reportdir",
+ help="(default=ARTIFACT_PATH/report) directory to save report")
+
+ # env
+ parser.add_argument(
+ "--usennapi",
+ action="store_true",
+ dest="usennapi_on",
+ default=True,
+ help="(default=on) declare USE_NNAPI=1")
+ parser.add_argument(
+ "--nousennapi",
+ action="store_false",
+ dest="usennapi_on",
+ help="(default=off) declare nothing about USE_NNAPI")
+ parser.add_argument(
+ "--acl_envon",
+ action="store_true",
+ dest="aclenv_on",
+ default=False,
+ help="(default=off) declare envs for ACL")
+
+ options = parser.parse_args()
+ return options
+
+
+def run_unittest(options):
+ cmd = "{artifactpath}/tools/test_driver/run_unittest.sh \
+ --reportdir={reportdir} \
+ --unittestdir={unittestdir}".format(
+ artifactpath=options.artifactpath,
+ reportdir=options.reportdir,
+ unittestdir=options.unittestdir)
+ if options.unittestall_on:
+ cmd += " --runall"
+ os.system(cmd)
+
+
+def run_frameworktest(options):
+ if type(options.framework_driverbin) is not str:
+ options.framework_driverbin = options.artifactpath + "/Product/out/bin/tflite_run"
+ if (os.path.exists(options.framework_driverbin) == False):
+ print("Cannot find {driverbin}".format(driverbin=options.framework_driverbin))
+ sys.exit(1)
+
+ cmd = "{artifactpath}/tools/test_driver/run_frameworktest.sh \
+ --runtestsh={runtestsh} \
+ --driverbin={driverbin} \
+ --reportdir={reportdir} \
+ --tapname=framework_test.tap \
+ --logname=framework_test.log \
+ --testname='Frameworktest'".format(
+ runtestsh=options.runtestsh,
+ driverbin=options.framework_driverbin,
+ reportdir=options.reportdir,
+ artifactpath=options.artifactpath)
+ os.system(cmd)
+
+
+def run_verification(options):
+ if type(options.verification_driverbin) is not str:
+ options.verification_driverbin = options.artifactpath + "/Product/out/bin/nnapi_test"
+ if (os.path.exists(options.verification_driverbin) == False):
+ print("Cannot find {driverbin}".format(
+ driverbin=options.verification_driverbin))
+ sys.exit(1)
+
+ cmd = "{artifactpath}/tools/test_driver/run_frameworktest.sh \
+ --runtestsh={runtestsh} \
+ --driverbin={driverbin} \
+ --reportdir={reportdir} \
+ --tapname=verification_test.tap \
+ --logname=verification_test.log \
+ --testname='Verification'".format(
+ runtestsh=options.runtestsh,
+ driverbin=options.verification_driverbin,
+ reportdir=options.reportdir,
+ artifactpath=options.artifactpath)
+ os.system(cmd)
+
+
+def run_benchmark(options):
+ if type(options.benchmark_driverbin) is not str:
+ options.benchmark_driverbin = options.artifactpath + "/Product/out/bin/tflite_benchmark"
+ if (os.path.exists(options.benchmark_driverbin) == False):
+ print("Cannot find {driverbin}".format(driverbin=options.benchmark_driverbin))
+ sys.exit(1)
+
+ cmd = "{artifactpath}/tools/test_driver/run_benchmark.sh \
+ --runtestsh={runtestsh} \
+ --driverbin={driverbin} \
+ --reportdir={reportdir}/benchmark".format(
+ runtestsh=options.runtestsh,
+ driverbin=options.benchmark_driverbin,
+ reportdir=options.reportdir,
+ artifactpath=options.artifactpath)
+ os.system(cmd)
+
+
+def run_benchmarkop(options):
+ if type(options.benchmark_driverbin) is not str:
+ options.benchmark_driverbin = options.artifactpath + "/Product/out/bin/tflite_benchmark"
+ if (os.path.exists(options.benchmark_driverbin) == False):
+ print("Cannot find {driverbin}".format(driverbin=options.benchmark_driverbin))
+ sys.exit(1)
+
+ cmd = "{artifactpath}/tools/test_driver/run_benchmark_op.sh \
+ --runtestsh={runtestsh} \
+ --driverbin={driverbin} \
+ --reportdir={reportdir}/benchmark_op \
+ --modelfilepath={artifactpath}/tests/framework \
+ --frameworktest_list_file={frameworktest_list_file}".format(
+ runtestsh=options.runtestsh,
+ driverbin=options.benchmark_driverbin,
+ artifactpath=options.artifactpath,
+ reportdir=options.reportdir,
+ frameworktest_list_file=options.frameworktest_list_file)
+ os.system(cmd)
+
+
+def run_benchmarkacl(options):
+ cmd = "{artifactpath}/tools/test_driver/run_benchmark_acl.sh \
+ --reportdir={reportdir}/benchmark \
+ --bindir={artifactpath}/Product/out/bin".format(
+ reportdir=options.reportdir, artifactpath=options.artifactpath)
+ os.system(cmd)
+
+
+def make_json_for_benchmark_result(options):
+ cmd = "source {artifactpath}/tools/test_driver/print_to_json.sh && ".format(
+ artifactpath=options.artifactpath)
+ if options.benchmarkop_on:
+ cmd += "print_to_json {artifactpath}/report/benchmark_op \
+ {reportdir} \"benchmark_op_result.json\"".format(
+ reportdir=options.reportdir, artifactpath=options.artifactpath)
+ else:
+ cmd += "print_to_json {artifactpath}/report/benchmark \
+ {reportdir} \"benchmark_result.json\"".format(
+ reportdir=options.reportdir, artifactpath=options.artifactpath)
+ sp = subprocess.Popen(["/bin/bash", "-i", "-c", cmd])
+ sp.communicate()
+
+
+def run_profile(options):
+ # FIXME: These driver and tflite test are set temporarily. Fix these to support flexibility
+ driver_bin = options.artifactpath + "/Product/out/bin/tflite_run"
+ tflite_test = options.artifactpath + "/tests/framework/cache/inceptionv3/inception_module/inception_test.tflite"
+
+ # TODO: Enable operf to set directory where sample data puts on
+ shutil.rmtree("oprofile_data", ignore_errors=True)
+
+ print("")
+ print("============================================")
+ cmd = "operf -g {driver_bin} {tflite_test}".format(
+ driver_bin=driver_bin, tflite_test=tflite_test)
+ os.system(cmd)
+ print("============================================")
+ print("")
+
+
+def main():
+ options = get_parsed_options()
+
+ alltest_on = True
+ if True in [
+ options.unittest_on, options.frameworktest_on, options.verification_on,
+ options.benchmark_on, options.benchmarkacl_on, options.benchmarkop_on,
+ options.profile_on
+ ]:
+ alltest_on = False
+
+ # artifactpath
+ if os.path.isdir(options.artifactpath) and os.path.isdir(
+ options.artifactpath + "/tests") and os.path.isdir(options.artifactpath +
+ "/Product"):
+ options.artifactpath = os.path.abspath(options.artifactpath)
+ else:
+ print("Pass on with proper arifactpath")
+ sys.exit(1)
+
+ # run_test.sh
+ if type(options.runtestsh) is not str or options.runtestsh == "":
+ options.runtestsh = options.artifactpath + "/tests/framework/run_test.sh"
+
+ if (os.path.exists(options.runtestsh) == False):
+ print("Cannot find {runtestsh}".format(runtestsh=options.runtestsh))
+ sys.exit(1)
+
+ # unittest dir
+ if type(options.unittestdir) is not str or options.unittestdir == "":
+ options.unittestdir = options.artifactpath + "/Product/out/unittest"
+
+ # LD_LIBRARY_PATH
+ if type(options.ldlibrarypath) is not str or options.ldlibrarypath == "":
+ options.ldlibrarypath = options.artifactpath + "/Product/out/lib"
+
+ # report dir
+ if type(options.reportdir) is not str or options.reportdir == "":
+ options.reportdir = options.artifactpath + "/report"
+
+ # set LD_LIBRARY_PATH
+ os.environ["LD_LIBRARY_PATH"] = options.ldlibrarypath
+
+ # set USE_NNAPI
+ if options.usennapi_on == True:
+ os.environ["USE_NNAPI"] = "1"
+
+ # set acl
+ if options.aclenv_on:
+ common.switch_nnfw_kernel_env("acl")
+
+ # unittest
+ if alltest_on or options.unittest_on:
+ run_unittest(options)
+
+ # frameworktest
+ if options.frameworktest_on:
+ run_frameworktest(options)
+
+ # verification
+ if alltest_on or options.verification_on:
+ run_verification(options)
+
+ # benchmark
+ if options.benchmark_on:
+ run_benchmark(options)
+
+ # benchmark_acl
+ if options.benchmarkacl_on:
+ run_benchmarkacl(options)
+
+ # benchmark_op
+ if options.benchmarkop_on:
+ run_benchmarkop(options)
+
+ # make json file for benchmark result on ci
+ if options.benchmark_on or options.benchmarkacl_on or options.benchmarkop_on:
+ make_json_for_benchmark_result(options)
+
+ # run profile
+ if options.profile_on:
+ run_profile(options)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/test_driver/run_benchmark.sh b/tools/test_driver/run_benchmark.sh
new file mode 100755
index 000000000..a1a0c2fa2
--- /dev/null
+++ b/tools/test_driver/run_benchmark.sh
@@ -0,0 +1,146 @@
+#!/bin/bash
+#
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+MY_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+source $MY_PATH/common.sh
+
+BENCHMARK_RUN_TEST_SH=
+BENCHMARK_DRIVER_BIN=
+BENCHMARK_REPORT_DIR=
+BENCHMARK_MODELS_FILE=
+BENCHMARK_MODEL_LIST="inceptionv3/inception_nonslim inceptionv3/inception_slim mobilenet"
+
+function Usage()
+{
+ # TODO: Fill this
+ echo "Usage: LD_LIBRARY_PATH=Product/out/lib ./$0 --reportdir=report"
+}
+
+for i in "$@"
+do
+ case $i in
+ -h|--help|help)
+ Usage
+ exit 1
+ ;;
+ --runtestsh=*)
+ BENCHMARK_RUN_TEST_SH=${i#*=}
+ ;;
+ --driverbin=*)
+ BENCHMARK_DRIVER_BIN=${i#*=}
+ ;;
+ --reportdir=*)
+ BENCHMARK_REPORT_DIR=${i#*=}
+ BENCHMARK_MODELS_FILE=$BENCHMARK_REPORT_DIR/benchmark_models.txt
+ ;;
+ esac
+ shift
+done
+
+function get_result_of_benchmark_test()
+{
+ local RUN_TEST_SH=$1
+ local DRIVER_BIN=$2
+ local MODEL=$3
+ local LOG_FILE=$4
+
+ local RET=0
+ $RUN_TEST_SH --driverbin=$DRIVER_BIN $MODEL > $LOG_FILE 2>&1
+ RET=$?
+ if [[ $RET -ne 0 ]]; then
+ echo "Testing $MODEL aborted... exit code: $RET"
+ exit $RET
+ fi
+
+ local RESULT=`grep -E '^Mean:' $LOG_FILE | sed -e 's/ms//g' | awk '{print $2}'`
+ echo "$RESULT"
+}
+
+function print_result_of_benchmark_test()
+{
+ local NAME=$1
+ local RESULT=$2
+ local RESULT_FILE=$3
+
+ echo "$NAME $RESULT" > $RESULT_FILE
+}
+
+function run_benchmark_test()
+{
+ local DRIVER_BIN=$BENCHMARK_DRIVER_BIN
+ local LOG_FILE=
+ local RESULT_FILE=
+ local RESULT=
+ local REPORT_MODEL_DIR=
+
+ export COUNT=5
+ echo "============================================"
+ local i=0
+ for MODEL in $BENCHMARK_MODEL_LIST; do
+ echo "Benchmark test with `basename $DRIVER_BIN` & `echo $MODEL`"
+ echo $MODEL >> $BENCHMARK_MODELS_FILE
+
+ REPORT_MODEL_DIR=$BENCHMARK_REPORT_DIR/$MODEL
+ mkdir -p $REPORT_MODEL_DIR
+
+ # TFLite+CPU
+ LOG_FILE=$REPORT_MODEL_DIR/tflite_cpu.txt
+ RESULT_FILE=$REPORT_MODEL_DIR/tflite_cpu.result
+ echo -n "TFLite + CPU................... "
+ unset USE_NNAPI
+ RESULT=$(get_result_of_benchmark_test $BENCHMARK_RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE)
+ echo "$RESULT ms"
+ print_result_of_benchmark_test "TFLite_CPU" $RESULT $RESULT_FILE
+
+ # TFLite+NNAPI(CPU fallback)
+ LOG_FILE=$REPORT_MODEL_DIR/tflite_nnapi_cpu.txt
+ RESULT_FILE=$REPORT_MODEL_DIR/tflite_nnapi_cpu.result
+ echo -n "TFLite + NNAPI(CPU)............ "
+ export USE_NNAPI=1
+ RESULT=$(get_result_of_benchmark_test $BENCHMARK_RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE)
+ echo "$RESULT ms"
+ print_result_of_benchmark_test "TFLite_NNAPI_CPU" $RESULT $RESULT_FILE
+
+ # TFLite+NNAPI(ACL)
+ LOG_FILE=$REPORT_MODEL_DIR/tflite_nnapi_acl.txt
+ RESULT_FILE=$REPORT_MODEL_DIR/tflite_nnapi_acl.result
+ echo -n "TFLite + NNAPI(ACL)............ "
+ switch_nnfw_kernel_env "ON" "acl"
+ RESULT=$(get_result_of_benchmark_test $BENCHMARK_RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE)
+ echo "$RESULT ms"
+ print_result_of_benchmark_test "TFLite_NNAPI_ACL" $RESULT $RESULT_FILE
+ unset USE_NNAPI
+ switch_nnfw_kernel_env "OFF"
+
+ if [[ $i -ne $(echo $BENCHMARK_MODEL_LIST | wc -w)-1 ]]; then
+ echo ""
+ fi
+ i=$((i+1))
+ done
+ echo "============================================"
+ unset COUNT
+}
+
+if [ ! -e "$BENCHMARK_REPORT_DIR" ]; then
+ mkdir -p $BENCHMARK_REPORT_DIR
+fi
+
+rm -rf $BENCHMARK_MODELS_FILE
+
+echo ""
+run_benchmark_test
+echo ""
diff --git a/tools/test_driver/run_benchmark_acl.sh b/tools/test_driver/run_benchmark_acl.sh
new file mode 100755
index 000000000..c6a643baa
--- /dev/null
+++ b/tools/test_driver/run_benchmark_acl.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+#
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+MY_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+source $MY_PATH/common.sh
+
+BENCHMARKACL_BIN_DIR=
+BENCHMARKACL_REPORT_DIR=
+BENCHMARKACL_MODELS_FILE=
+BENCHMARKACL_MODEL_LIST="inceptionv3/inception_nonslim inceptionv3/inception_slim"
+
+function Usage()
+{
+ # TODO: Fill this
+ echo "Usage: LD_LIBRARY_PATH=Product/out/lib ./$0 --reportdir=report"
+}
+
+for i in "$@"
+do
+ case $i in
+ -h|--help|help)
+ Usage
+ exit 1
+ ;;
+ --reportdir=*)
+ BENCHMARKACL_REPORT_DIR=${i#*=}
+ BENCHMARKACL_MODELS_FILE=$BENCHMARKACL_REPORT_DIR/benchmarkacl_models.txt
+ ;;
+ --bindir=*)
+ BENCHMARKACL_BIN_DIR=${i#*=}
+ ;;
+ esac
+ shift
+done
+
+function run_benchmark_acl()
+{
+ local REPORT_DIR=$BENCHMARKACL_REPORT_DIR
+ local DRIVER_DIR=$BENCHMARKACL_BIN_DIR
+ local LOG_FILE=""
+ local RESULT_FILE=""
+ local RESULT=""
+ local RET=0
+
+ export COUNT=5
+ echo "============================================"
+ local i=0
+ for BENCHMARK_ACL_BIN in $(ls $DRIVER_DIR/benchmark_*); do
+ local BENCHMARK_ACL_BIN_BASENAME=$(basename $BENCHMARK_ACL_BIN)
+ mkdir -p $REPORT_DIR/$BENCHMARK_ACL_BIN_BASENAME
+ echo "Benchmark/acl test by $BENCHMARK_ACL_BIN_BASENAME"
+ echo $BENCHMARK_ACL_BIN_BASENAME >> $BENCHMARKACL_MODELS_FILE
+
+ # ACL(NEON)
+ LOG_FILE=$REPORT_DIR/$BENCHMARK_ACL_BIN_BASENAME/acl_neon.txt
+ RESULT_FILE=$REPORT_DIR/$BENCHMARK_ACL_BIN_BASENAME/acl_neon.result
+ echo -n "ACL(NEON)...... "
+ $BENCHMARK_ACL_BIN 0 > $LOG_FILE 2>&1
+ RET=$?
+ if [[ $RET -ne 0 ]]; then
+ echo "aborted... exit code: $RET"
+ exit $RET
+ fi
+ RESULT=`grep -E '^Mean:' $LOG_FILE | sed -e 's/ms//g' | awk '{print $2}'`
+ echo "$RESULT ms"
+ echo "ACL(NEON)" $RESULT > $RESULT_FILE
+
+ # ACL(OpenCL)
+ LOG_FILE=$REPORT_DIR/$BENCHMARK_ACL_BIN_BASENAME/acl_opencl.txt
+ RESULT_FILE=$REPORT_DIR/$BENCHMARK_ACL_BIN_BASENAME/acl_opencl.result
+ echo -n "ACL(OpenCL).... "
+ $BENCHMARK_ACL_BIN 1 > $LOG_FILE 2>&1
+ RET=$?
+ if [[ $RET -ne 0 ]]; then
+ echo "aborted... exit code: $RET"
+ exit $RET
+ fi
+ RESULT=`grep -E '^Mean:' $LOG_FILE | sed -e 's/ms//g' | awk '{print $2}'`
+ echo "$RESULT ms"
+ echo "ACL(OpenCL)" $RESULT > $RESULT_FILE
+
+ if [[ $i -ne $(ls $DRIVER_DIR/benchmark_* | wc -w)-1 ]]; then
+ echo ""
+ fi
+ i=$((i+1))
+ done
+ echo "============================================"
+ unset COUNT
+}
+
+if [ ! -e "$BENCHMARKACL_REPORT_DIR" ]; then
+ mkdir -p $BENCHMARKACL_REPORT_DIR
+fi
+
+rm -rf $BENCHMARKACL_MODELS_FILE
+
+echo ""
+run_benchmark_acl
+echo ""
diff --git a/tools/test_driver/run_benchmark_op.sh b/tools/test_driver/run_benchmark_op.sh
new file mode 100755
index 000000000..8ff80a45b
--- /dev/null
+++ b/tools/test_driver/run_benchmark_op.sh
@@ -0,0 +1,209 @@
+#!/bin/bash
+#
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+MY_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+NNFW_HOME="$(dirname $(dirname ${MY_PATH}))"
+source $MY_PATH/common.sh
+
+BENCHMARK_RUN_TEST_SH=
+BENCHMARK_DRIVER_BIN=
+BENCHMARK_REPORT_DIR=
+BENCHMARK_MODELS_FILE=
+BENCHMARK_MODEL_LIST=
+MODEL_CACHE_ROOT_PATH=
+MODEL_TEST_ROOT_PATH=
+PURE_ACL_RT_LIB_PATH=
+PURE_LD_LIBRARY_PATH=
+ORIGIN_LD_LIBRARY_PATH=
+PURE_ACL_RT_ENV_FILE=$MY_PATH/benchmark_op_list.txt
+
+function Usage()
+{
+ # TODO: Fill this
+ echo "Usage: LD_LIBRARY_PATH=Product/out/lib ./$0 --reportdir=report"
+}
+
+for i in "$@"
+do
+ case $i in
+ -h|--help|help)
+ Usage
+ exit 1
+ ;;
+ --runtestsh=*)
+ BENCHMARK_RUN_TEST_SH=${i#*=}
+ ;;
+ --driverbin=*)
+ BENCHMARK_DRIVER_BIN=${i#*=}
+ ;;
+ --reportdir=*)
+ BENCHMARK_REPORT_DIR=${i#*=}
+ BENCHMARK_MODELS_FILE=$BENCHMARK_REPORT_DIR/benchmark_op_models.txt
+ ;;
+ --modelfilepath=*)
+ TEST_LIST_PATH=${i#*=}
+ MODEL_CACHE_ROOT_PATH=$TEST_LIST_PATH/cache
+ MODEL_TEST_ROOT_PATH=$TEST_LIST_PATH/tests
+ ;;
+ --frameworktest_list_file=*)
+ FRAMEWORKTEST_LIST_FILE=${i#*=}
+ ;;
+ esac
+ shift
+done
+
+function get_result_of_benchmark_test()
+{
+ local RUN_TEST_SH=$1
+ local DRIVER_BIN=$2
+ local MODEL=$3
+ local LOG_FILE=$4
+ local PUREACL_LD_LIBRARY_PATH=$5
+
+ local RET=0
+ $RUN_TEST_SH --driverbin=$DRIVER_BIN --ldlibrarypath=$PUREACL_LD_LIBRARY_PATH $MODEL > $LOG_FILE 2>&1
+ RET=$?
+ if [[ $RET -ne 0 ]]; then
+ echo "Testing $MODEL aborted... exit code: $RET"
+ exit $RET
+ fi
+
+ local RESULT=`grep -E '^Mean:' $LOG_FILE | sed -e 's/ms//g' | awk '{print $2}'`
+ echo "$RESULT"
+}
+
+function print_result_of_benchmark_test()
+{
+ local NAME=$1
+ local RESULT=$2
+ local RESULT_FILE=$3
+ echo "$NAME $RESULT" > $RESULT_FILE
+}
+
+function get_benchmark_op_list()
+{
+ if [ ! -z "$FRAMEWORKTEST_LIST_FILE" ]; then
+ BENCHMARK_MODEL_LIST=$(cat "${FRAMEWORKTEST_LIST_FILE}")
+ else
+ BENCHMARK_MODEL_LIST=$(cat "${PURE_ACL_RT_ENV_FILE}")
+ fi
+ echo "BENCHMARK_MODEL_LIST=> $BENCHMARK_MODEL_LIST"
+}
+
+function run_benchmark_test()
+{
+ local DRIVER_BIN=$BENCHMARK_DRIVER_BIN
+ local LOG_FILE=
+ local RESULT_FILE=
+ local RESULT=
+ local REPORT_MODEL_DIR=
+
+ export COUNT=5
+ echo "============================================"
+ local i=0
+ for MODEL in $BENCHMARK_MODEL_LIST; do
+ STATUS="enabled"
+ source $MODEL_TEST_ROOT_PATH/$MODEL/config.sh
+
+ LOWER_STATUS="$(echo $STATUS | awk '{print tolower($0)}')"
+ if [ "$LOWER_STATUS" == "disabled" ]; then
+ echo ""
+ echo "Skip $MODEL"
+ continue
+ fi
+
+ echo "Benchmark test with `basename $DRIVER_BIN` & `echo $MODEL`"
+ echo $MODEL >> $BENCHMARK_MODELS_FILE
+
+ REPORT_MODEL_DIR=$BENCHMARK_REPORT_DIR/$MODEL
+ mkdir -p $REPORT_MODEL_DIR
+
+ # TFLite(CPU fallback)
+ LOG_FILE=$REPORT_MODEL_DIR/tflite_cpu_op.txt
+ RESULT_FILE=$REPORT_MODEL_DIR/tflite_cpu_op.result
+ echo -n "TFLite(CPU fallback)................... "
+ unset USE_NNAPI
+ RESULT=$(get_result_of_benchmark_test $BENCHMARK_RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE)
+ echo "$RESULT ms"
+ print_result_of_benchmark_test "TFLite_CPU" $RESULT $RESULT_FILE
+
+ # TFLite+NNRuntime(CPU fallback)
+ LOG_FILE=$REPORT_MODEL_DIR/tflite_nnrt_cpu_op.txt
+ RESULT_FILE=$REPORT_MODEL_DIR/tflite_nnrt_cpu_op.result
+ echo -n "TFLite + NNRuntime(CPU fallback)............ "
+ export USE_NNAPI=1
+ RESULT=$(get_result_of_benchmark_test $BENCHMARK_RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE)
+ echo "$RESULT ms"
+ print_result_of_benchmark_test "TFLite_NNRT_CPU" $RESULT $RESULT_FILE
+
+ # TFLite+NNRuntime+ACL-Neon
+ LOG_FILE=$REPORT_MODEL_DIR/tflite_nnrt_acl_neon_op.txt
+ RESULT_FILE=$REPORT_MODEL_DIR/tflite_nnrt_acl_neon_op.result
+ echo -n "TFLite + NNRuntime + ACL-Neon............ "
+ switch_nnfw_kernel_env "ON" "neon"
+ RESULT=$(get_result_of_benchmark_test $BENCHMARK_RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE)
+ echo "$RESULT ms"
+ print_result_of_benchmark_test "TFLite_NNRT_ACL-NEON" $RESULT $RESULT_FILE
+ switch_nnfw_kernel_env "OFF"
+
+ # TFLite+NNRuntime+ACL-OpenCL
+ LOG_FILE=$REPORT_MODEL_DIR/tflite_nnrt_acl_opencl_op.txt
+ RESULT_FILE=$REPORT_MODEL_DIR/tflite_nnrt_acl_opencl_op.result
+ echo -n "TFLite + NNRuntime + ACL-OpenCL............ "
+ switch_nnfw_kernel_env "ON" "acl"
+ RESULT=$(get_result_of_benchmark_test $BENCHMARK_RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE)
+ echo "$RESULT ms"
+ print_result_of_benchmark_test "TFLite_NNRT_ACL-OPENCL" $RESULT $RESULT_FILE
+ switch_nnfw_kernel_env "OFF"
+
+ # TFLite+PureACLRuntime+ACL-OpenCL
+ if [ ! -d "$PURE_ACL_RT_LIB_PATH" ]; then
+ echo "Skip $MODEL in Pure ACL Runtime "
+ continue
+ fi
+ LOG_FILE=$REPORT_MODEL_DIR/tflite_pureaclrt_acl_opencl_op.txt
+ RESULT_FILE=$REPORT_MODEL_DIR/tflite_pureaclrt_acl_opencl_op.result
+ echo -n "TFLite + PureACLRuntime + ACL-OpenCL............ "
+ RESULT=$(get_result_of_benchmark_test $BENCHMARK_RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE $PURE_ACL_RT_LIB_PATH)
+ echo "$RESULT ms"
+ print_result_of_benchmark_test "TFLite_PUREACLRT_ACL-OPENCL" $RESULT $RESULT_FILE
+ unset USE_NNAPI
+
+ if [[ $i -ne $(echo $BENCHMARK_MODEL_LIST | wc -w)-1 ]]; then
+ echo ""
+ fi
+ i=$((i+1))
+ done
+ unset USE_NNAPI
+ unset COUNT
+ echo "============================================"
+}
+
+if [ ! -e "$BENCHMARK_REPORT_DIR" ]; then
+ mkdir -p $BENCHMARK_REPORT_DIR
+fi
+
+if [ -z "$PURE_ACL_RT_LIB_PATH" ]; then
+ PURE_ACL_RT_LIB_PATH="$NNFW_HOME/Product/out/lib/pureacl"
+fi
+
+get_benchmark_op_list
+
+rm -rf $BENCHMARK_MODELS_FILE
+
+echo ""
+run_benchmark_test
+echo ""
diff --git a/tools/test_driver/run_benchmark_tflite_model.in b/tools/test_driver/run_benchmark_tflite_model.in
new file mode 100644
index 000000000..1003ecc13
--- /dev/null
+++ b/tools/test_driver/run_benchmark_tflite_model.in
@@ -0,0 +1 @@
+MODELS/inception_nonslim --input_layer=Mul --input_layer_shape=1,299,299,3 --num_threads=1 --num_runs=1
diff --git a/tools/test_driver/run_benchmark_tflite_model.sh b/tools/test_driver/run_benchmark_tflite_model.sh
new file mode 100755
index 000000000..50a2a5fb3
--- /dev/null
+++ b/tools/test_driver/run_benchmark_tflite_model.sh
@@ -0,0 +1,125 @@
+#!/bin/bash
+#
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+NNFW_DIR="$(dirname "$(dirname "${SCRIPT_DIR}")")"
+REPORT_DIR="$NNFW_DIR/report/tflite_benchmark_model"
+MODEL_ROOT="$NNFW_DIR/tests/framework/tests"
+LD_LIBRARY_PATH="$NNFW_DIR/Product/out/lib"
+
+RUN_TEST=$NNFW_DIR/tests/framework/run_test.sh
+MODEL_IN=${BASH_SOURCE[0]%.sh}.in
+BENCHMARK_BIN=$NNFW_DIR/Product/out/bin/tflite_benchmark_model
+MODEL_NAMES=
+MODEL_PARAMS=
+
+source $SCRIPT_DIR/common.sh
+
+usage()
+{
+ echo
+ echo "Usage: LD_LIBRARY_PATH=Product/out/lib $(basename ${BASH_SOURCE[0]}) --reportdir=report --modelroot=modelroot"
+ echo
+}
+
+parse_args()
+{
+ for i in "$@"; do
+ case $i in
+ -h|--help|help)
+ usage
+ exit 1
+ ;;
+ --reportdir=*)
+ REPORT_DIR=${i#*=}
+ ;;
+ --modelroot=*)
+ MODEL_ROOT=${i#*=}
+ ;;
+ esac
+ shift
+ done
+}
+
+load_input()
+{
+ mapfile -t MODEL_NAMES < <(cut -f1 "${MODEL_IN}")
+ mapfile -t MODEL_PARAMS < <(cut -f2- "${MODEL_IN}")
+ if [ "${#MODEL_NAMES[@]}" -eq 0 ]; then
+ echo "No model is found. Please check ${MODEL_IN} is correct."
+ exit 1
+ fi
+}
+
+download_models()
+{
+ $RUN_TEST --download=on $MODEL_NAMES
+}
+
+run_benchmarks()
+{
+ echo
+ echo "Running benchmarks:"
+ echo "======================"
+
+ for (( i=0; i< ${#MODEL_NAMES[@]}; i++)); do
+ MODEL_NAME=${MODEL_NAMES[i]}
+ MODEL_PATH=$(find $NNFW_DIR/tests/framework/cache/$MODEL_NAME/ -name "*.tflite")
+ MODEL_PARAM=${MODEL_PARAMS[$i]}
+
+ echo "$MODEL_NAME"
+
+ local REPORT_MODEL_DIR=$REPORT_DIR/$MODEL_NAME
+ mkdir -p $REPORT_MODEL_DIR
+
+ local OUT_FILE
+
+ # TFLite Interpreter
+ OUT_FILE=$REPORT_MODEL_DIR/tflite_interpreter.out
+ echo
+ echo "{ // TFLite Interpreter"
+ LD_LIBRARY_PATH=$LD_LIBRARY_PATH $BENCHMARK_BIN --graph=$MODEL_PATH $MODEL_PARAM --use_nnapi=false 2> >(tee $OUT_FILE)
+ echo "} // TFLite Interpreter"
+
+ # TFLite PureACL (CL)
+ OUT_FILE=$REPORT_MODEL_DIR/tflite_pureacl_cl.out
+ echo
+ echo "{ // TFLite PureACL(CL)"
+ LD_LIBRARY_PATH=$LD_LIBRARY_PATH $BENCHMARK_BIN --graph=$MODEL_PATH $MODEL_PARAM --use_nnapi=true 2> >(tee $OUT_FILE)
+ echo "} // TFLite_PureACL(CL)"
+ done
+}
+
+# for debug
+print_vars()
+{
+ echo SCRIPT_DIR=$SCRIPT_DIR
+ echo NNFW_DIR=$NNFW_DIR
+ echo RUN_TEST=$RUN_TEST
+ echo MODEL_IN=$MODEL_IN
+ echo BENCHMARK_BIN=$BENCHMARK_BIN
+ echo REPORT_DIR=$REPORT_DIR
+ echo MODEL_ROOT=$MODEL_ROOT
+}
+
+if [ ! -e "$REPORT_DIR" ]; then
+ mkdir -p $REPORT_DIR
+fi
+
+parse_args $@
+load_input
+download_models
+run_benchmarks
diff --git a/tools/test_driver/run_frameworktest.sh b/tools/test_driver/run_frameworktest.sh
new file mode 100755
index 000000000..343fd0292
--- /dev/null
+++ b/tools/test_driver/run_frameworktest.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+#
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FWTEST_RUN_TEST_SH=
+FWTEST_DRIVER_BIN=
+FWTEST_REPORT_DIR=
+FWTEST_TAP_NAME=
+FWTEST_LOG_NAME=
+FWTEST_TEST_NAME=
+
+function Usage()
+{
+ # TODO: Fill this
+ echo "Usage: LD_LIBRARY_PATH=Product/out/lib ./$0 --reportdir=report"
+}
+
+for i in "$@"
+do
+ case $i in
+ -h|--help|help)
+ Usage
+ exit 1
+ ;;
+ --runtestsh=*)
+ FWTEST_RUN_TEST_SH=${i#*=}
+ ;;
+ --driverbin=*)
+ FWTEST_DRIVER_BIN=${i#*=}
+ ;;
+ --reportdir=*)
+ FWTEST_REPORT_DIR=${i#*=}
+ ;;
+ --tapname=*)
+ FWTEST_TAP_NAME=${i#*=}
+ ;;
+ --logname=*)
+ FWTEST_LOG_NAME=${i#*=}
+ ;;
+ --testname=*)
+ FWTEST_TEST_NAME=${i#*=}
+ ;;
+ --frameworktest_list_file=*)
+ FRAMEWORKTEST_LIST_FILE=${i#*=}
+ ;;
+ esac
+ shift
+done
+
+# TODO: handle exceptions for params
+
+if [ ! -e "$FWTEST_REPORT_DIR" ]; then
+ mkdir -p $FWTEST_REPORT_DIR
+fi
+
+echo ""
+echo "============================================"
+echo "$FWTEST_TEST_NAME with $(basename $FWTEST_DRIVER_BIN) ..."
+
+if [ ! -z "$FRAMEWORKTEST_LIST_FILE" ]; then
+ MODELLIST=$(cat "${FRAMEWORKTEST_LIST_FILE}")
+fi
+
+$FWTEST_RUN_TEST_SH --driverbin=$FWTEST_DRIVER_BIN \
+ --reportdir=$FWTEST_REPORT_DIR \
+ --tapname=$FWTEST_TAP_NAME \
+ ${MODELLIST:-} \
+ > $FWTEST_REPORT_DIR/$FWTEST_LOG_NAME 2>&1
+FWTEST_RESULT=$?
+if [[ $FWTEST_RESULT -ne 0 ]]; then
+ echo ""
+ cat $FWTEST_REPORT_DIR/$FWTEST_TAP_NAME
+ echo ""
+ echo "$FWTEST_TEST_NAME failed... exit code: $FWTEST_RESULT"
+ echo "============================================"
+ echo ""
+ exit $FWTEST_RESULT
+fi
+
+echo ""
+cat $FWTEST_REPORT_DIR/$FWTEST_TAP_NAME
+echo "============================================"
+echo ""
diff --git a/tools/test_driver/run_unittest.sh b/tools/test_driver/run_unittest.sh
new file mode 100755
index 000000000..abf3194ad
--- /dev/null
+++ b/tools/test_driver/run_unittest.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+#
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+UNITTEST_REPORT_DIR=
+UNITTEST_TEST_DIR=
+UNITTEST_RESULT=0
+UNITTEST_RUN_ALL=""
+
+function Usage()
+{
+ # TODO: Fill this
+ echo "Usage: LD_LIBRARY_PATH=Product/out/lib ./$0 --reportdir=report --unittestdir=Product/out/bin"
+}
+
+get_gtest_option()
+{
+ local output_option="--gtest_output=xml:$UNITTEST_REPORT_DIR/$TEST_BIN.xml"
+ local filter_option
+ if [ -r "$UNITTEST_TEST_DIR/$TEST_BIN.skip" ]; then
+ filter_option="--gtest_filter=-$(grep -v '#' "$UNITTEST_TEST_DIR/$TEST_BIN.skip" | tr '\n' ':')"
+ fi
+ echo "$output_option $filter_option"
+}
+
+for i in "$@"
+do
+ case $i in
+ -h|--help|help)
+ Usage
+ exit 1
+ ;;
+ --reportdir=*)
+ UNITTEST_REPORT_DIR=${i#*=}
+ ;;
+ --unittestdir=*)
+ UNITTEST_TEST_DIR=${i#*=}
+ ;;
+ --runall)
+ UNITTEST_RUN_ALL="true"
+ esac
+ shift
+done
+
+# TODO: handle exceptions for params
+
+if [ ! -e "$UNITTEST_REPORT_DIR" ]; then
+ mkdir -p $UNITTEST_REPORT_DIR
+fi
+
+echo ""
+echo "============================================"
+echo "Unittest start"
+echo "============================================"
+
+num_unittest=0
+for TEST_BIN in `ls $UNITTEST_TEST_DIR --hide=*.skip`; do
+ num_unittest=$((num_unittest+1))
+ echo "============================================"
+ echo "Starting set $num_unittest: $TEST_BIN..."
+ echo "============================================"
+ TEMP_UNITTEST_RESULT=0
+
+ if [ "$UNITTEST_RUN_ALL" == "true" ]; then
+ for TEST_LIST_VERBOSE_LINE in $($UNITTEST_TEST_DIR/$TEST_BIN --gtest_list_tests); do
+ if [[ $TEST_LIST_VERBOSE_LINE == *\. ]]; then
+ TEST_LIST_CATEGORY=$TEST_LIST_VERBOSE_LINE
+ else
+ TEST_LIST_ITEM="$TEST_LIST_CATEGORY""$TEST_LIST_VERBOSE_LINE"
+ $UNITTEST_TEST_DIR/$TEST_BIN --gtest_filter=$TEST_LIST_ITEM --gtest_output="xml:$UNITTEST_REPORT_DIR/$TEST_LIST_ITEM.xml"
+ fi
+ done
+ else
+ $UNITTEST_TEST_DIR/$TEST_BIN $(get_gtest_option)
+ TEMP_UNITTEST_RESULT=$?
+ fi
+
+ if [[ $TEMP_UNITTEST_RESULT -ne 0 ]]; then
+ UNITTEST_RESULT=$TEMP_UNITTEST_RESULT
+ echo "$TEST_BIN failed... return code: $TEMP_UNITTEST_RESULT"
+ fi
+ echo "============================================"
+ echo "Finishing set $num_unittest: $TEST_BIN..."
+ echo "============================================"
+done
+
+if [[ $UNITTEST_RESULT -ne 0 ]]; then
+ echo "============================================"
+ echo "Failed unit test... exit code: $UNITTEST_RESULT"
+ echo "============================================"
+ exit $UNITTEST_RESULT
+fi
+
+echo "============================================"
+echo "Completed total $num_unittest set of unittest"
+echo "Unittest end"
+echo "============================================"
diff --git a/tools/test_driver/test_driver.sh b/tools/test_driver/test_driver.sh
index 3bff910f5..72be65dc1 100755
--- a/tools/test_driver/test_driver.sh
+++ b/tools/test_driver/test_driver.sh
@@ -24,32 +24,41 @@ function Usage()
echo "Usage: ./$0 --artifactpath=. # run all tests"
echo "Usage: ./$0 --artifactpath=/home/dragon/nnfw --frameworktest --verification --benchmark # run fw test & verfication and benchmark"
echo ""
- echo "--artifactpath - (should be passed) path that has tests/ and Product/"
+ echo "--artifactpath - (default={test_driver.sh's path}/../../) it should contain tests/ and Product/"
echo ""
- echo "Following three options are needed when you want to tests of specific types. If you don't pass any one, unittest and verification will be run"
- echo "--unittest - (default=on) run unit test"
- echo "--frameworktest - (default=off)run framework test"
- echo "--verification - (default=on) run verification"
+ echo "Following options are needed when you want to tests of specific types. If you don't pass any one, unittest and verification will be run"
+ echo "--unittest - (default=on) run unit test"
+ echo "--unittestall - (default=off) run all unit test without skip, overrite --unittest option"
+ echo "--frameworktest - (default=off) run framework test"
+ echo "--verification - (default=on) run verification"
+ echo "--frameworktest_list_file - filepath of model list for test"
echo ""
echo "Following option is only needed when you want to test benchmark."
- echo "--benchmark - (default=off) run benchmark"
- echo "--benchmark_acl - (default=off) run benchmark-acl"
+ echo "--benchmark_acl - (default=off) run benchmark-acl"
+ echo "--benchmark - (default=off) run benchmark"
+ echo "--benchmark_op - (default=off) run benchmark per operation"
+ echo "--benchmark_tflite_model - (default=off) run tflite_benchmark_model"
+ echo ""
+ echo "Following option is used for profiling."
+ echo "--profile - (default=off) run operf"
echo ""
echo "etc."
- echo "--framework_driverbin - (default=../../Product/out/bin/tflite_run) runner for runnning framework tests"
- echo "--verification_driverbin - (default=../../Product/out/bin/nnapi_test) runner for runnning verification tests"
- echo "--benchmark_driverbin - (default=../../Product/out/bin/tflite_benchmark) runner for runnning benchmark"
- echo "--runtestsh - (default=\$ARTIFACT_PATH/tests/framework/run_test.sh) run_test.sh with path where it is for framework test and verification"
- echo "--unittestdir - (default=\$ARTIFACT_PATH/Product/out/unittest) directory that has unittest binaries for unit test"
+ echo "--framework_driverbin - (default=../../Product/out/bin/tflite_run) runner for runnning framework tests"
+ echo "--verification_driverbin - (default=../../Product/out/bin/nnapi_test) runner for runnning verification tests"
+ echo "--benchmark_driverbin - (default=../../Product/out/bin/tflite_benchmark) runner for runnning benchmark"
+ echo "--runtestsh - (default=\$ARTIFACT_PATH/tests/framework/run_test.sh) run_test.sh with path where it is for framework test and verification"
+ echo "--unittestdir - (default=\$ARTIFACT_PATH/Product/out/unittest) directory that has unittest binaries for unit test"
echo ""
- echo "--ldlibrarypath - (default=\$ARTIFACT_PATH/Product/out/lib) path that you want to include libraries"
- echo "--usennapi - (default=on) declare USE_NNAPI=1"
- echo "--nousennapi - (default=off) declare nothing about USE_NNAPI"
- echo "--acl_envon - (default=off) declare envs for ACL"
+ echo "--ldlibrarypath - (default=\$ARTIFACT_PATH/Product/out/lib) path that you want to include libraries"
+ echo "--usennapi - (default=on) declare USE_NNAPI=1"
+ echo "--nousennapi - (default=off) declare nothing about USE_NNAPI"
+ echo "--acl_envon - (default=off) declare envs for ACL"
+ echo "--reportdir - (default=\$ARTIFACT_PATH/report) directory to save report"
echo ""
}
-ARTIFACT_PATH=""
+TEST_DRIVER_DIR="$( cd "$( dirname "${BASH_SOURCE}" )" && pwd )"
+ARTIFACT_PATH="$TEST_DRIVER_DIR/../../"
FRAMEWORK_DRIVER_BIN=""
VERIFICATION_DRIVER_BIN=""
BENCHMARK_DRIVER_BIN=""
@@ -59,12 +68,16 @@ LD_LIBRARY_PATH_IN_SHELL=""
USE_NNAPI="USE_NNAPI=1"
ALLTEST_ON="true"
UNITTEST_ON="false"
+UNITTESTALL_ON="false"
FRAMEWORKTEST_ON="false"
VERIFICATION_ON="false"
BENCHMARK_ON="false"
+BENCHMARK_OP_ON="false"
+BENCHMARK_TFLITE_MODEL_ON="false"
BENCHMARK_ACL_ON="false"
-MODEL_LIST="inceptionv3/inception_nonslim inceptionv3/inception_slim"
ACL_ENV_ON="false"
+PROFILE_ON="false"
+REPORT_DIR=""
for i in "$@"
do
@@ -104,10 +117,22 @@ do
ALLTEST_ON="false"
UNITTEST_ON="true"
;;
+ --unittestall)
+ ALLTEST_ON="false"
+ UNITTEST_ON="true"
+ UNITTESTALL_ON="true"
+ ;;
--frameworktest)
ALLTEST_ON="false"
FRAMEWORKTEST_ON="true"
;;
+ --frameworktest_list_file=*)
+ FRAMEWORKTEST_LIST_FILE=$PWD/${i#*=}
+ if [ ! -e "$FRAMEWORKTEST_LIST_FILE" ]; then
+ echo "Pass on with proper frameworktest_list_file"
+ exit 1
+ fi
+ ;;
--verification)
ALLTEST_ON="false"
VERIFICATION_ON="true"
@@ -116,6 +141,14 @@ do
ALLTEST_ON="false"
BENCHMARK_ON="true"
;;
+ --benchmark_op)
+ ALLTEST_ON="false"
+ BENCHMARK_OP_ON="true"
+ ;;
+ --benchmark_tflite_model)
+ ALLTEST_ON="false"
+ BENCHMARK_TFLITE_MODEL_ON="true"
+ ;;
--benchmark_acl)
ALLTEST_ON="false"
BENCHMARK_ACL_ON="true"
@@ -123,6 +156,13 @@ do
--acl_envon)
ACL_ENV_ON="true"
;;
+ --profile)
+ ALLTEST_ON="false"
+ PROFILE_ON="true"
+ ;;
+ --reportdir=*)
+ REPORT_DIR=${i#*=}
+ ;;
*)
# Be careful that others params are handled as $ARTIFACT_PATH
ARTIFACT_PATH="$i"
@@ -131,10 +171,6 @@ do
shift
done
-if [ ! -e "$ARTIFACT_PATH" ]; then
- echo "Pass on with proper ARTIFACT_PATH"
- exit 1
-fi
ARTIFACT_PATH="$(readlink -f $ARTIFACT_PATH)"
if [ -z "$RUN_TEST_SH" ]; then
@@ -150,6 +186,10 @@ if [ -z "$UNIT_TEST_DIR" ]; then
UNIT_TEST_DIR=$ARTIFACT_PATH/Product/out/unittest
fi
+if [ -z "$REPORT_DIR" ]; then
+ REPORT_DIR=$ARTIFACT_PATH/report
+fi
+
if [ -z "$LD_LIBRARY_PATH_IN_SHELL" ]; then
LD_LIBRARY_PATH="$ARTIFACT_PATH/Product/out/lib:$LD_LIBRARY_PATH"
else
@@ -162,20 +202,7 @@ if [ -n "$USE_NNAPI" ]; then
export "$USE_NNAPI"
fi
-function switch_nnfw_kernel_env()
-{
- local switch=$1 # "ON" or "OFF"
- local mode=$2 # "acl" or "neon" or ""
- local NNFW_KERNEL_ENV_FILE=$ARTIFACT_PATH/tools/test_driver/nnfw_kernel_env_list.txt
-
- for ENV in $(cat $NNFW_KERNEL_ENV_FILE); do
- if [[ "$switch" == "ON" ]]; then
- export "$ENV=$mode"
- else
- unset "$ENV"
- fi
- done
-}
+source $TEST_DRIVER_DIR/common.sh
if [ "$ACL_ENV_ON" == "true" ]; then
switch_nnfw_kernel_env "ON" "acl"
@@ -183,30 +210,16 @@ fi
# Run unittest in each part such as Runtime, ACL
if [ "$ALLTEST_ON" == "true" ] || [ "$UNITTEST_ON" == "true" ]; then
- if [ ! -e "$ARTIFACT_PATH/report" ]; then
- mkdir -p $ARTIFACT_PATH/report
+ if [ "$UNITTESTALL_ON" == "true" ]; then
+ $TEST_DRIVER_DIR/run_unittest.sh \
+ --reportdir=$REPORT_DIR \
+ --unittestdir=$UNIT_TEST_DIR \
+ --runall
+ else
+ $TEST_DRIVER_DIR/run_unittest.sh \
+ --reportdir=$REPORT_DIR \
+ --unittestdir=$UNIT_TEST_DIR
fi
-
- echo ""
- echo "============================================"
- echo "Unittest start"
- echo "============================================"
-
- num_unittest=0
- for TEST_BIN in `ls $UNIT_TEST_DIR`; do
- num_unittest=$((num_unittest+1))
- echo "============================================"
- echo "Starting set $num_unittest: $TEST_BIN..."
- echo "============================================"
- $UNIT_TEST_DIR/$TEST_BIN --gtest_output=xml:$ARTIFACT_PATH/report/$TEST_BIN.xml
- echo "============================================"
- echo "Finishing set $num_unittest: $TEST_BIN..."
- echo "============================================"
- done
- echo "============================================"
- echo "Completed total $num_unittest set of unittest"
- echo "Unittest end"
- echo "============================================"
fi
# Run tflite_run with various tflite models
@@ -215,21 +228,14 @@ if [ "$FRAMEWORKTEST_ON" == "true" ]; then
FRAMEWORK_DRIVER_BIN=$ARTIFACT_PATH/Product/out/bin/tflite_run
fi
- if [ ! -e "$ARTIFACT_PATH/report" ]; then
- mkdir -p $ARTIFACT_PATH/report
- fi
-
- echo ""
- echo "============================================"
- echo "Framework Test with tflite_run..."
- $RUN_TEST_SH --driverbin=$FRAMEWORK_DRIVER_BIN \
- --reportdir=$ARTIFACT_PATH/report \
+ $TEST_DRIVER_DIR/run_frameworktest.sh \
+ --runtestsh=$RUN_TEST_SH \
+ --driverbin=$FRAMEWORK_DRIVER_BIN \
+ --reportdir=$REPORT_DIR \
--tapname=framework_test.tap \
- > $ARTIFACT_PATH/report/framework_test.log 2>&1
- echo "============================================"
- cat $ARTIFACT_PATH/report/framework_test.tap
- echo "============================================"
- echo ""
+ --logname=framework_test.log \
+ --testname="Frameworktest" \
+ --frameworktest_list_file=${FRAMEWORKTEST_LIST_FILE:-}
fi
# Run nnapi_test with various tflite models
@@ -238,180 +244,86 @@ if [ "$ALLTEST_ON" == "true" ] || [ "$VERIFICATION_ON" == "true" ]; then
VERIFICATION_DRIVER_BIN=$ARTIFACT_PATH/Product/out/bin/nnapi_test
fi
- if [ ! -e "$ARTIFACT_PATH/report" ]; then
- mkdir -p $ARTIFACT_PATH/report
- fi
-
- echo ""
- echo "============================================"
- echo "Verification with nnapi_test..."
- $RUN_TEST_SH --driverbin=$VERIFICATION_DRIVER_BIN \
- --reportdir=$ARTIFACT_PATH/report \
- --tapname=verification.tap \
- > $ARTIFACT_PATH/report/verification.log 2>&1
- echo "============================================"
- cat $ARTIFACT_PATH/report/verification.tap
- echo "============================================"
- echo ""
+ # verification uses the same script as frameworktest does
+ $TEST_DRIVER_DIR/run_frameworktest.sh \
+ --runtestsh=$RUN_TEST_SH \
+ --driverbin=$VERIFICATION_DRIVER_BIN \
+ --reportdir=$REPORT_DIR \
+ --tapname=verification_test.tap \
+ --logname=verification_test.log \
+ --testname="Verification" \
+ --frameworktest_list_file=${FRAMEWORKTEST_LIST_FILE:-}
fi
-# Benchmark test
-function get_result_of_benchmark_test()
-{
- local RUN_TEST_SH=$1
- local DRIVER_BIN=$2
- local MODEL=$3
- local LOG_FILE=$4
-
- $RUN_TEST_SH --driverbin=$DRIVER_BIN $MODEL > $LOG_FILE 2>&1
-
- local RESULT=`grep -E '^Mean:' $LOG_FILE | sed -e 's/ms//g' | awk '{print $2}'`
- echo "$RESULT"
-}
-
-function print_result_of_benchmark_test()
-{
- local NAME=$1
- local RESULT=$2
- local RESULT_FILE=$3
-
- echo "$NAME $RESULT" > $RESULT_FILE
-}
-
-function run_benchmark_test()
-{
- local DRIVER_BIN=
- local LOG_FILE=
- local RESULT_FILE=
- local RESULT=
- local REPORT_MODEL_DIR=
-
+# Run tflite_benchmark with tflite models
+if [ "$BENCHMARK_ON" == "true" ]; then
if [ -z "$BENCHMARK_DRIVER_BIN" ]; then
DRIVER_BIN=$ARTIFACT_PATH/Product/out/bin/tflite_benchmark
else
DRIVER_BIN=$BENCHMARK_DRIVER_BIN
fi
- export COUNT=5
- echo "============================================"
- local i=0
- for MODEL in $MODEL_LIST; do
- echo "Benchmark test with tflite_benchmark & `echo $MODEL`"
- echo $MODEL >> $MODELS_FILE
-
- REPORT_MODEL_DIR=$ARTIFACT_PATH/report/benchmark/$MODEL
- mkdir -p $REPORT_MODEL_DIR
-
- # TFLite+CPU
- LOG_FILE=$REPORT_MODEL_DIR/tflite_cpu.txt
- RESULT_FILE=$REPORT_MODEL_DIR/tflite_cpu.result
- echo -n "TFLite + CPU................... "
- unset USE_NNAPI
- RESULT=$(get_result_of_benchmark_test $RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE)
- echo "$RESULT ms"
- print_result_of_benchmark_test "TFLite_CPU" $RESULT $RESULT_FILE
-
-
- # TFLite+NNAPI(CPU fallback)
- LOG_FILE=$REPORT_MODEL_DIR/tflite_nnapi_cpu.txt
- RESULT_FILE=$REPORT_MODEL_DIR/tflite_nnapi_cpu.result
- echo -n "TFLite + NNAPI(CPU)............ "
- export USE_NNAPI=1
- RESULT=$(get_result_of_benchmark_test $RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE)
- echo "$RESULT ms"
- print_result_of_benchmark_test "TFLite_NNAPI_CPU" $RESULT $RESULT_FILE
-
- # TFLite+NNAPI(ACL)
- LOG_FILE=$REPORT_MODEL_DIR/tflite_nnapi_acl.txt
- RESULT_FILE=$REPORT_MODEL_DIR/tflite_nnapi_acl.result
- echo -n "TFLite + NNAPI(ACL)............ "
- switch_nnfw_kernel_env "ON" "acl"
- RESULT=$(get_result_of_benchmark_test $RUN_TEST_SH $DRIVER_BIN $MODEL $LOG_FILE)
- echo "$RESULT ms"
- print_result_of_benchmark_test "TFLite_NNAPI_ACL" $RESULT $RESULT_FILE
- unset USE_NNAPI
- switch_nnfw_kernel_env "OFF"
-
- if [[ $i -ne $(echo $MODEL_LIST | wc -w)-1 ]]; then
- echo ""
- fi
- i=$((i+1))
- done
- echo "============================================"
- unset COUNT
-}
-
-function run_benchmark_acl()
-{
- local REPORT_DIR=$ARTIFACT_PATH/report/benchmark
- local DRIVER_DIR=$ARTIFACT_PATH/Product/out/bin
- local LOG_FILE=""
- local RESULT_FILE=""
- local RESULT=""
-
- export COUNT=5
- echo "============================================"
- local i=0
- for BENCHMARK_ACL_BIN in $(ls $DRIVER_DIR/benchmark_*); do
- BENCHMARK_ACL_BIN_BASENAME=$(basename $BENCHMARK_ACL_BIN)
- mkdir -p $REPORT_DIR/$BENCHMARK_ACL_BIN_BASENAME
- echo "Benchmark/acl test by $BENCHMARK_ACL_BIN_BASENAME"
- echo $BENCHMARK_ACL_BIN_BASENAME >> $MODELS_FILE
+ $TEST_DRIVER_DIR/run_benchmark.sh \
+ --runtestsh=$RUN_TEST_SH \
+ --driverbin=$DRIVER_BIN \
+ --reportdir=$REPORT_DIR/benchmark
+fi
- # ACL(NEON)
- LOG_FILE=$REPORT_DIR/$BENCHMARK_ACL_BIN_BASENAME/acl_neon.txt
- RESULT_FILE=$REPORT_DIR/$BENCHMARK_ACL_BIN_BASENAME/acl_neon.result
- echo -n "ACL(NEON)...... "
- $BENCHMARK_ACL_BIN 0 > $LOG_FILE 2>&1
- RESULT=`grep -E '^Mean:' $LOG_FILE | sed -e 's/ms//g' | awk '{print $2}'`
- echo "$RESULT ms"
- echo "ACL(NEON)" $RESULT > $RESULT_FILE
+# Run tflite_benchmark from a list of tflite models.
+# Each model has only one operator.
+if [ "$BENCHMARK_OP_ON" == "true" ]; then
+ if [ -z "$BENCHMARK_DRIVER_BIN" ]; then
+ DRIVER_BIN=$ARTIFACT_PATH/Product/out/bin/tflite_benchmark
+ else
+ DRIVER_BIN=$BENCHMARK_DRIVER_BIN
+ fi
- # ACL(OpenCL)
- LOG_FILE=$REPORT_DIR/$BENCHMARK_ACL_BIN_BASENAME/acl_opencl.txt
- RESULT_FILE=$REPORT_DIR/$BENCHMARK_ACL_BIN_BASENAME/acl_opencl.result
- echo -n "ACL(OpenCL).... "
- $BENCHMARK_ACL_BIN 1 > $LOG_FILE 2>&1
- RESULT=`grep -E '^Mean:' $LOG_FILE | sed -e 's/ms//g' | awk '{print $2}'`
- echo "$RESULT ms"
- echo "ACL(OpenCL)" $RESULT > $RESULT_FILE
+ $TEST_DRIVER_DIR/run_benchmark_op.sh \
+ --runtestsh=$RUN_TEST_SH \
+ --driverbin=$DRIVER_BIN \
+ --reportdir=$REPORT_DIR/benchmark_op \
+ --modelfilepath=$ARTIFACT_PATH/tests/framework \
+ --frameworktest_list_file=${FRAMEWORKTEST_LIST_FILE:-}
+fi
- if [[ $i -ne $(ls $DRIVER_DIR/benchmark_* | wc -w)-1 ]]; then
- echo ""
- fi
- i=$((i+1))
- done
- echo "============================================"
- unset COUNT
-}
+# Run benchmark/acl/benchmark_googlenet, mobilenet and inception_v3
+if [ "$BENCHMARK_ACL_ON" == "true" ]; then
+ $TEST_DRIVER_DIR/run_benchmark_acl.sh \
+ --reportdir=$AREPORT_DIR/benchmark \
+ --bindir=$ARTIFACT_PATH/Product/out/bin
+fi
-if [ "$BENCHMARK_ON" == "true" ] || [ "$BENCHMARK_ACL_ON" == "true" ]; then
- if [ ! -e "$ARTIFACT_PATH/report" ]; then
- mkdir -p $ARTIFACT_PATH/report
+# Make json file. Actually, this process is only needed on CI. That's why it is in test_driver.sh.
+if [ "$BENCHMARK_ON" == "true" ] || [ "$BENCHMARK_ACL_ON" == "true" ] || [ "$BENCHMARK_OP_ON" == "true" ]; then
+ # functions to fill json with benchmark results
+ source $ARTIFACT_PATH/tools/test_driver/print_to_json.sh
+ if [ "$BENCHMARK_OP_ON" == "true" ]; then
+ print_to_json $REPORT_DIR/benchmark_op $REPORT_DIR "benchmark_op_result.json"
+ else
+ print_to_json $REPORT_DIR/benchmark $REPORT_DIR "benchmark_result.json"
fi
- REPORT_MODEL_DIR=$ARTIFACT_PATH/report/benchmark
- mkdir -p $REPORT_MODEL_DIR
- MODELS_FILE=$ARTIFACT_PATH/report/benchmark/benchmark_models.txt
- rm -f $MODELS_FILE
fi
-# Run tflite_benchmark with Iv3_non_slim & Iv3_slim
-if [ "$BENCHMARK_ON" == "true" ]; then
- echo ""
- run_benchmark_test
- echo ""
+# Run tflite_benchmark_model (= per-operation profiling tool).
+# Each model can contain arbitrary number of operators.
+if [ "$BENCHMARK_TFLITE_MODEL_ON" == "true" ]; then
+ $TEST_DRIVER_DIR/run_benchmark_tflite_model.sh \
+ --reportdir=$REPORT_DIR/benchmark_tflite_model \
+ --modelroot=$ARTIFACT_PATH/tests/framework/tests
fi
-# Run benchmark/acl/benchmark_googlenet,mobilenet and inception_v3
-if [ "$BENCHMARK_ACL_ON" == "true" ]; then
+# Run profiling
+if [ "$PROFILE_ON" == "true" ]; then
+ # FIXME: These driver and tflite test are set temporarily. Fix these to support flexibility
+ DRIVER_BIN=$ARTIFACT_PATH/Product/out/bin/tflite_run
+ TFLITE_TEST=$ARTIFACT_PATH/tests/framework/cache/inceptionv3/inception_module/inception_test.tflite
+
+ # TODO: Enable operf to set directory where sample data puts on
+ rm -rf oprofile_data
+
echo ""
- run_benchmark_acl
+ echo "============================================"
+ operf -g $DRIVER_BIN $TFLITE_TEST
+ echo "============================================"
echo ""
fi
-
-# make json file
-if [ "$BENCHMARK_ON" == "true" ] || [ "$BENCHMARK_ACL_ON" == "true" ]; then
- # functions to fill json with benchmark results
- source $ARTIFACT_PATH/tools/test_driver/print_to_json.sh
- print_to_json
-fi
diff --git a/tools/tflite_benchmark/CMakeLists.txt b/tools/tflite_benchmark/CMakeLists.txt
new file mode 100644
index 000000000..72ee5d3f9
--- /dev/null
+++ b/tools/tflite_benchmark/CMakeLists.txt
@@ -0,0 +1,5 @@
+list(APPEND SOURCES "src/tflite_benchmark.cc")
+
+add_executable(tflite_benchmark ${SOURCES})
+target_link_libraries(tflite_benchmark tensorflow-lite ${LIB_PTHREAD} dl nnfw_util nnfw_support_tflite)
+install(TARGETS tflite_benchmark DESTINATION bin)
diff --git a/tools/tflite_benchmark/src/tflite_benchmark.cc b/tools/tflite_benchmark/src/tflite_benchmark.cc
new file mode 100644
index 000000000..ba2e62883
--- /dev/null
+++ b/tools/tflite_benchmark/src/tflite_benchmark.cc
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+
+#include "support/tflite/Assert.h"
+#include "support/tflite/Session.h"
+#include "support/tflite/InterpreterSession.h"
+#include "support/tflite/NNAPISession.h"
+#include "support/tflite/Diff.h"
+#include "util/tensor/IndexIterator.h"
+
+#include <boost/accumulators/accumulators.hpp>
+#include <boost/accumulators/statistics/stats.hpp>
+#include <boost/accumulators/statistics/min.hpp>
+#include <boost/accumulators/statistics/max.hpp>
+#include <boost/accumulators/statistics/mean.hpp>
+
+#include <iostream>
+
+#include "util/environment.h"
+#include "util/benchmark.h"
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+void help(std::ostream &out, const int argc, char **argv)
+{
+ std::string cmd = argv[0];
+ auto pos = cmd.find_last_of("/");
+ if (pos != std::string::npos)
+ cmd = cmd.substr(pos + 1);
+
+ out << "use:" << std::endl << cmd << " <model file name>" << std::endl;
+}
+
+bool checkParams(const int argc, char **argv)
+{
+ if (argc < 2)
+ {
+ help(std::cerr, argc, argv);
+ return false;
+ }
+ return true;
+}
+
+int main(const int argc, char **argv)
+{
+
+ if (!checkParams(argc, argv))
+ {
+ return -1;
+ }
+
+ const auto filename = argv[1];
+
+ const bool use_nnapi = nnfw::util::get_env_bool("USE_NNAPI");
+ const auto thread_count = nnfw::util::get_env_int("THREAD", -1);
+
+ std::cout << "Num threads: " << thread_count << std::endl;
+ if (use_nnapi)
+ {
+ std::cout << "Use NNAPI" << std::endl;
+ }
+
+ StderrReporter error_reporter;
+
+ auto model = FlatBufferModel::BuildFromFile(filename, &error_reporter);
+ if (model == nullptr)
+ {
+ std::cerr << "Cannot create model" << std::endl;
+ return -1;
+ }
+
+ BuiltinOpResolver resolver;
+
+ InterpreterBuilder builder(*model, resolver);
+
+ std::unique_ptr<Interpreter> interpreter;
+
+ TFLITE_ENSURE(builder(&interpreter));
+
+ // Show inputs
+ for (uint32_t n = 0; n < interpreter->inputs().size(); ++n)
+ {
+ // TODO Print shape
+ auto tensor_id = interpreter->inputs().at(n);
+ auto tensor_ptr = interpreter->tensor(tensor_id);
+
+ std::cout << "Input #" << n << ":" << std::endl;
+ std::cout << " Name: " << tensor_ptr->name << std::endl;
+ }
+
+ // Show outputs
+ for (uint32_t n = 0; n < interpreter->outputs().size(); ++n)
+ {
+ // TODO Print shape
+ auto tensor_id = interpreter->outputs().at(n);
+ auto tensor_ptr = interpreter->tensor(tensor_id);
+
+ std::cout << "Output #" << n << ":" << std::endl;
+ std::cout << " Name: " << tensor_ptr->name << std::endl;
+ }
+
+ interpreter->SetNumThreads(thread_count);
+
+ std::shared_ptr<nnfw::support::tflite::Session> sess;
+
+ if (use_nnapi)
+ {
+ sess = std::make_shared<nnfw::support::tflite::NNAPISession>(interpreter.get());
+ }
+ else
+ {
+ sess = std::make_shared<nnfw::support::tflite::InterpreterSession>(interpreter.get());
+ }
+
+ //
+ // Warming-up
+ //
+ for (uint32_t n = 0; n < 3; ++n)
+ {
+ std::chrono::milliseconds elapsed(0);
+
+ sess->prepare();
+
+ for (const auto &id : interpreter->inputs())
+ {
+ TfLiteTensor *tensor = interpreter->tensor(id);
+ if (tensor->type == kTfLiteInt32)
+ {
+ // Generate singed 32-bit integer (s32) input
+ auto tensor_view = nnfw::support::tflite::TensorView<int32_t>::make(*interpreter, id);
+
+ int32_t value = 0;
+
+ nnfw::util::tensor::iterate(tensor_view.shape())
+ << [&](const nnfw::util::tensor::Index &ind) {
+ // TODO Generate random values
+ // Gather operation: index should be within input coverage.
+ tensor_view.at(ind) = value;
+ value++;
+ };
+ }
+ else if (tensor->type == kTfLiteUInt8)
+ {
+ // Generate unsigned 8-bit integer input
+ auto tensor_view = nnfw::support::tflite::TensorView<uint8_t>::make(*interpreter, id);
+
+ uint8_t value = 0;
+
+ nnfw::util::tensor::iterate(tensor_view.shape())
+ << [&](const nnfw::util::tensor::Index &ind) {
+ // TODO Generate random values
+ tensor_view.at(ind) = value;
+ value = (value + 1) & 0xFF;
+ };
+ }
+ else
+ {
+ assert(tensor->type == kTfLiteFloat32);
+
+ const int seed = 1; /* TODO Add an option for seed value */
+ RandomGenerator randgen{seed, 0.0f, 0.2f};
+ const float *end = reinterpret_cast<const float *>(tensor->data.raw_const + tensor->bytes);
+ for (float *ptr = tensor->data.f; ptr < end; ptr++)
+ {
+ *ptr = randgen.generate<float>();
+ }
+ }
+ }
+
+ nnfw::util::benchmark::measure(elapsed) << [&](void) {
+ if (!sess->run())
+ {
+ assert(0 && "run failed");
+ }
+ };
+ sess->teardown();
+
+ std::cout << "Warming-up " << n << ": " << elapsed.count() << "ms" << std::endl;
+ }
+
+ //
+ // Measure
+ //
+ const auto cnt = nnfw::util::get_env_int("COUNT", 1);
+
+ using namespace boost::accumulators;
+
+ accumulator_set<double, stats<tag::mean, tag::min, tag::max>> acc;
+
+ for (int n = 0; n < cnt; ++n)
+ {
+ std::chrono::milliseconds elapsed(0);
+
+ sess->prepare();
+ nnfw::util::benchmark::measure(elapsed) << [&](void) {
+ if (!sess->run())
+ {
+ assert(0 && "run failed");
+ }
+ };
+ sess->teardown();
+
+ acc(elapsed.count());
+
+ std::cout << "Iteration " << n << ": " << elapsed.count() << "ms" << std::endl;
+ }
+
+ std::cout << "--------" << std::endl;
+ std::cout << "Min: " << min(acc) << "ms" << std::endl;
+ std::cout << "Max: " << max(acc) << "ms" << std::endl;
+ std::cout << "Mean: " << mean(acc) << "ms" << std::endl;
+
+ return 0;
+}
diff --git a/tools/cross/apt_proxy b/tools/tflite_benchmark_model/.FORMATDENY
index e69de29bb..e69de29bb 100644
--- a/tools/cross/apt_proxy
+++ b/tools/tflite_benchmark_model/.FORMATDENY
diff --git a/tools/tflite_benchmark_model/CMakeLists.txt b/tools/tflite_benchmark_model/CMakeLists.txt
new file mode 100644
index 000000000..d52690460
--- /dev/null
+++ b/tools/tflite_benchmark_model/CMakeLists.txt
@@ -0,0 +1,6 @@
+file(GLOB_RECURSE SOURCES "*.cc")
+
+add_executable(tflite_benchmark_model ${SOURCES})
+target_compile_definitions(tflite_benchmark_model PUBLIC "TFLITE_PROFILING_ENABLED")
+target_link_libraries(tflite_benchmark_model tensorflow-lite ${LIB_PTHREAD} dl nnfw_util nnfw_support_tflite)
+install(TARGETS tflite_benchmark_model DESTINATION bin)
diff --git a/tools/tflite_benchmark_model/README.md b/tools/tflite_benchmark_model/README.md
new file mode 100644
index 000000000..93769305b
--- /dev/null
+++ b/tools/tflite_benchmark_model/README.md
@@ -0,0 +1,209 @@
+# TFLite Model Benchmark Tool
+
+## Description
+
+A simple C++ binary to benchmark a TFLite model and its individual operators,
+both on desktop machines and on Android. The binary takes a TFLite model,
+generates random inputs and then repeatedly runs the model for specified number
+of runs. Aggregrate latency statistics are reported after running the benchmark.
+
+The instructions below are for running the binary on Desktop and Android,
+for iOS please use the
+[iOS benchmark app] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios).
+
+## Parameters
+
+The binary takes the following required parameters:
+
+* `graph`: `string` \
+ The path to the TFLite model file.
+* `input_layer`: `string` \
+ The name of the input layer, this is typically the first layer of the model.
+* `input_layer_shape`: `string` \
+ The shape of the input layer. This is a comma separated string of the shape
+ of tensor of input layer.
+
+and the following optional parameters:
+
+* `num_threads`: `int` (default=1) \
+ The number of threads to use for running TFLite interpreter.
+* `warmup_runs`: `int` (default=1) \
+ The number of warmup runs to do before starting the benchmark.
+* `run_delay`: `float` (default=-1.0) \
+ The delay in seconds between subsequent benchmark runs. Non-positive values
+ mean use no delay.
+* `use_nnapi`: `bool` (default=false) \
+ Whether to use [Android NNAPI] (https://developer.android.com/ndk/guides/neuralnetworks/).
+ This API is available on recent Android devices.
+
+## To build/install/run
+
+### On Android:
+
+(0) Refer to https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android to edit the `WORKSPACE` to configure the android NDK/SDK.
+
+(1) Build for your specific platform, e.g.:
+
+```
+bazel build -c opt \
+ --config=android_arm \
+ --cxxopt='--std=c++11' \
+ tensorflow/contrib/lite/tools/benchmark:benchmark_model
+```
+
+(2) Connect your phone. Push the binary to your phone with adb push
+ (make the directory if required):
+
+```
+adb push bazel-bin/tensorflow/contrib/lite/tools/benchmark/benchmark_model /data/local/tmp
+```
+
+(3) Make the binary executable.
+
+```
+adb shell chmod +x /data/local/tmp/benchmark_model
+```
+
+(4) Push the compute graph that you need to test. For example:
+
+```
+adb push mobilenet_quant_v1_224.tflite /data/local/tmp
+```
+
+(5) Run the benchmark. For example:
+
+```
+adb shell /data/local/tmp/benchmark_model \
+ --graph=/data/local/tmp/mobilenet_quant_v1_224.tflite \
+ --input_layer="input" \
+ --input_layer_shape="1,224,224,3" \
+ --num_threads=4
+```
+
+### On desktop:
+(1) build the binary
+
+```
+bazel build -c opt tensorflow/contrib/lite/tools/benchmark:benchmark_model
+```
+
+(2) Run on your compute graph, similar to the Android case but without the need of adb shell.
+For example:
+
+```
+bazel-bin/tensorflow/contrib/lite/tools/benchmark/benchmark_model \
+ --graph=mobilenet_quant_v1_224.tflite \
+ --input_layer="Placeholder" \
+ --input_layer_shape="1,224,224,3" \
+ --num_threads=4
+```
+
+The MobileNet graph used as an example here may be downloaded from
+https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip
+
+
+## Reducing variance between runs on Android.
+
+Most modern Android phones use [ARM big.LITTLE](https://en.wikipedia.org/wiki/ARM_big.LITTLE)
+architecture where some cores are more power hungry but faster than other cores.
+When running benchmarks on these phones there can be significant variance
+between different runs of the benchmark. One way to reduce variance between runs
+is to set the [CPU affinity](https://en.wikipedia.org/wiki/Processor_affinity)
+before running the benchmark. On Android this can be done using the `taskset`
+command.
+E.g. for running the benchmark on big cores on Pixel 2 with a single thread one
+can use the following command:
+
+```
+adb shell tasket f0 /data/local/tmp/benchmark_model \
+ --graph=/data/local/tmp/mobilenet_quant_v1_224.tflite \
+ --input_layer="input" \
+ --input_layer_shape="1,224,224,3" \
+ --num_threads=1
+```
+
+where `f0` is the affinity mask for big cores on Pixel 2.
+Note: The affinity mask varies with the device.
+
+## Profiling model operators
+The benchmark model binary also allows you to profile operators and give execution times of each operator. To do this,
+compile the binary with a compiler flag that enables profiling to be compiled in. Pass **--copt=-DTFLITE_PROFILING_ENABLED**
+to compile benchmark with profiling support.
+For example, to compile with profiling support on Android, add this flag to the previous command:
+
+```
+bazel build -c opt \
+ --config=android_arm \
+ --cxxopt='--std=c++11' \
+ --copt=-DTFLITE_PROFILING_ENABLED \
+ tensorflow/contrib/lite/tools/benchmark:benchmark_model
+```
+This compiles TFLite with profiling enabled, now you can run the benchmark binary like before. The binary will produce detailed statistics for each operation similar to those shown below:
+
+```
+
+============================== Run Order ==============================
+ [node type] [start] [first] [avg ms] [%] [cdf%] [mem KB] [times called] [Name]
+ CONV_2D 0.000 4.269 4.269 0.107% 0.107% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_0/Relu6]
+ DEPTHWISE_CONV_2D 4.270 2.150 2.150 0.054% 0.161% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_1_depthwise/Relu6]
+ CONV_2D 6.421 6.107 6.107 0.153% 0.314% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 12.528 1.366 1.366 0.034% 0.348% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_2_depthwise/Relu6]
+ CONV_2D 13.895 4.195 4.195 0.105% 0.454% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_2_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 18.091 1.260 1.260 0.032% 0.485% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_3_depthwise/Relu6]
+ CONV_2D 19.352 6.652 6.652 0.167% 0.652% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 26.005 0.698 0.698 0.018% 0.670% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_4_depthwise/Relu6]
+ CONV_2D 26.703 3.344 3.344 0.084% 0.754% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_4_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 30.047 0.646 0.646 0.016% 0.770% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_5_depthwise/Relu6]
+ CONV_2D 30.694 5.800 5.800 0.145% 0.915% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 36.495 0.331 0.331 0.008% 0.924% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_6_depthwise/Relu6]
+ CONV_2D 36.826 2.838 2.838 0.071% 0.995% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 39.665 0.439 0.439 0.011% 1.006% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_7_depthwise/Relu6]
+ CONV_2D 40.105 5.293 5.293 0.133% 1.139% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 45.399 0.352 0.352 0.009% 1.147% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_8_depthwise/Relu6]
+ CONV_2D 45.752 5.322 5.322 0.133% 1.281% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 51.075 0.357 0.357 0.009% 1.290% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_9_depthwise/Relu6]
+ CONV_2D 51.432 5.693 5.693 0.143% 1.433% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 57.126 0.366 0.366 0.009% 1.442% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_10_depthwise/Relu6]
+ CONV_2D 57.493 5.472 5.472 0.137% 1.579% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 62.966 0.364 0.364 0.009% 1.588% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_11_depthwise/Relu6]
+ CONV_2D 63.330 5.404 5.404 0.136% 1.724% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 68.735 0.155 0.155 0.004% 1.728% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_12_depthwise/Relu6]
+ CONV_2D 68.891 2.970 2.970 0.074% 1.802% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_12_pointwise/Relu6]
+ DEPTHWISE_CONV_2D 71.862 0.206 0.206 0.005% 1.807% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_13_depthwise/Relu6]
+ CONV_2D 72.069 5.888 5.888 0.148% 1.955% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Relu6]
+ AVERAGE_POOL_2D 77.958 0.036 0.036 0.001% 1.956% 0.000 0 [MobilenetV1/Logits/AvgPool_1a/AvgPool]
+ CONV_2D 77.994 1.445 1.445 0.036% 1.992% 0.000 0 [MobilenetV1/Logits/Conv2d_1c_1x1/BiasAdd]
+ RESHAPE 79.440 0.002 0.002 0.000% 1.992% 0.000 0 [MobilenetV1/Predictions/Reshape]
+ SOFTMAX 79.443 0.029 0.029 0.001% 1.993% 0.000 0 [MobilenetV1/Predictions/Softmax]
+
+============================== Top by Computation Time ==============================
+ [node type] [start] [first] [avg ms] [%] [cdf%] [mem KB] [times called] [Name]
+ CONV_2D 19.352 6.652 6.652 0.167% 0.167% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Relu6]
+ CONV_2D 6.421 6.107 6.107 0.153% 0.320% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Relu6]
+ CONV_2D 72.069 5.888 5.888 0.148% 0.468% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Relu6]
+ CONV_2D 30.694 5.800 5.800 0.145% 0.613% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Relu6]
+ CONV_2D 51.432 5.693 5.693 0.143% 0.756% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Relu6]
+ CONV_2D 57.493 5.472 5.472 0.137% 0.893% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Relu6]
+ CONV_2D 63.330 5.404 5.404 0.136% 1.029% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Relu6]
+ CONV_2D 45.752 5.322 5.322 0.133% 1.162% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Relu6]
+ CONV_2D 40.105 5.293 5.293 0.133% 1.295% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Relu6]
+ CONV_2D 0.000 4.269 4.269 0.107% 1.402% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_0/Relu6]
+
+Number of nodes executed: 31
+============================== Summary by node type ==============================
+ [Node type] [count] [avg ms] [avg %] [cdf %] [mem KB] [times called]
+ CONV_2D 15 1.406 89.270% 89.270% 0.000 0
+ DEPTHWISE_CONV_2D 13 0.169 10.730% 100.000% 0.000 0
+ SOFTMAX 1 0.000 0.000% 100.000% 0.000 0
+ RESHAPE 1 0.000 0.000% 100.000% 0.000 0
+ AVERAGE_POOL_2D 1 0.000 0.000% 100.000% 0.000 0
+
+Timings (microseconds): count=50 first=79449 curr=81350 min=77385 max=88213 avg=79732 std=1929
+Memory (bytes): count=0
+31 nodes observed
+
+
+Average inference timings in us: Warmup: 83235, Init: 38467, no stats: 79760.9
+```
+
+
diff --git a/tools/tflite_benchmark_model/benchmark_main.cc b/tools/tflite_benchmark_model/benchmark_main.cc
new file mode 100644
index 000000000..7e4231c48
--- /dev/null
+++ b/tools/tflite_benchmark_model/benchmark_main.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "benchmark_tflite_model.h"
+#include "logging.h"
+
+namespace nnfw {
+namespace benchmark {
+
+int Main(int argc, char** argv) {
+#ifdef TFLITE_CUSTOM_OPS_HEADER
+ TFLITE_LOG(INFO) << "STARTING with custom ops!";
+#else
+ TFLITE_LOG(INFO) << "STARTING!";
+#endif
+ BenchmarkTfLiteModel benchmark;
+ BenchmarkLoggingListener listener;
+ benchmark.AddListener(&listener);
+ benchmark.Run(argc, argv);
+ return 0;
+}
+} // namespace benchmark
+} // namespace nnfw
+
+int main(int argc, char** argv) { return nnfw::benchmark::Main(argc, argv); }
diff --git a/tools/tflite_benchmark_model/benchmark_model.cc b/tools/tflite_benchmark_model/benchmark_model.cc
new file mode 100644
index 000000000..7869180bf
--- /dev/null
+++ b/tools/tflite_benchmark_model/benchmark_model.cc
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "benchmark_model.h"
+
+#include <time.h>
+
+#include <iostream>
+#include <sstream>
+
+#include "tensorflow/contrib/lite/profiling/time.h"
+#include "logging.h"
+
+namespace {
+void SleepForSeconds(double sleep_seconds) {
+ if (sleep_seconds <= 0.0) {
+ return;
+ }
+ // Convert the run_delay string into a timespec.
+ timespec req;
+ req.tv_sec = static_cast<time_t>(sleep_seconds);
+ req.tv_nsec = (sleep_seconds - req.tv_sec) * 1000000000;
+ // If requested, sleep between runs for an arbitrary amount of time.
+ // This can be helpful to determine the effect of mobile processor
+ // scaling and thermal throttling.
+#ifdef PLATFORM_WINDOWS
+ Sleep(sleep_seconds * 1000);
+#else
+ nanosleep(&req, nullptr);
+#endif
+}
+
+} // namespace
+
+namespace nnfw {
+namespace benchmark {
+using tensorflow::Stat;
+
+BenchmarkParams BenchmarkModel::DefaultParams() {
+ BenchmarkParams params;
+ params.AddParam("num_runs", BenchmarkParam::Create<int32_t>(50));
+ params.AddParam("run_delay", BenchmarkParam::Create<float>(-1.0f));
+ params.AddParam("num_threads", BenchmarkParam::Create<int32_t>(1));
+ params.AddParam("benchmark_name", BenchmarkParam::Create<std::string>(""));
+ params.AddParam("output_prefix", BenchmarkParam::Create<std::string>(""));
+ params.AddParam("warmup_runs", BenchmarkParam::Create<int32_t>(1));
+ return params;
+}
+
+BenchmarkModel::BenchmarkModel() : params_(DefaultParams()) {}
+
+void BenchmarkLoggingListener::OnBenchmarkEnd(const BenchmarkResults &results) {
+ auto inference_us = results.inference_time_us();
+ auto init_us = results.startup_latency_us();
+ auto warmup_us = results.warmup_time_us();
+ TFLITE_LOG(INFO) << "Average inference timings in us: "
+ << "Warmup: " << warmup_us.avg() << ", "
+ << "Init: " << init_us << ", "
+ << "no stats: " << inference_us.avg();
+}
+
+std::vector<Flag> BenchmarkModel::GetFlags() {
+ return {
+ CreateFlag<int32_t>("num_runs", &params_, "number of runs"),
+ CreateFlag<float>("run_delay", &params_, "delay between runs in seconds"),
+ CreateFlag<int32_t>("num_threads", &params_, "number of threads"),
+ CreateFlag<std::string>("benchmark_name", &params_, "benchmark name"),
+ CreateFlag<std::string>("output_prefix", &params_,
+ "benchmark output prefix"),
+ CreateFlag<int32_t>("warmup_runs", &params_,
+ "how many runs to initialize model"),
+ };
+}
+
+void BenchmarkModel::LogFlags() {
+ TFLITE_LOG(INFO) << "Num runs: [" << params_.Get<int32_t>("num_runs") << "]";
+ TFLITE_LOG(INFO) << "Inter-run delay (seconds): ["
+ << params_.Get<float>("run_delay") << "]";
+ TFLITE_LOG(INFO) << "Num threads: [" << params_.Get<int32_t>("num_threads")
+ << "]";
+ TFLITE_LOG(INFO) << "Benchmark name: ["
+ << params_.Get<std::string>("benchmark_name") << "]";
+ TFLITE_LOG(INFO) << "Output prefix: ["
+ << params_.Get<std::string>("output_prefix") << "]";
+ TFLITE_LOG(INFO) << "Warmup runs: [" << params_.Get<int32_t>("warmup_runs")
+ << "]";
+}
+
+Stat<int64_t> BenchmarkModel::Run(int num_times, RunType run_type) {
+ Stat<int64_t> run_stats;
+ TFLITE_LOG(INFO) << "Running benchmark for " << num_times << " iterations ";
+ for (int run = 0; run < num_times; run++) {
+ listeners_.OnSingleRunStart(run_type);
+ int64_t start_us = tflite::profiling::time::NowMicros();
+ RunImpl();
+ int64_t end_us = tflite::profiling::time::NowMicros();
+ listeners_.OnSingleRunEnd();
+
+ run_stats.UpdateStat(end_us - start_us);
+ SleepForSeconds(params_.Get<float>("run_delay"));
+ }
+
+ std::stringstream stream;
+ run_stats.OutputToStream(&stream);
+ TFLITE_LOG(INFO) << stream.str() << std::endl;
+
+ return run_stats;
+}
+
+void BenchmarkModel::Run(int argc, char **argv) {
+ if (!ParseFlags(argc, argv)) {
+ return;
+ }
+
+ LogFlags();
+
+ listeners_.OnBenchmarkStart(params_);
+ int64_t initialization_start_us = tflite::profiling::time::NowMicros();
+ Init();
+ int64_t initialization_end_us = tflite::profiling::time::NowMicros();
+ int64_t startup_latency_us = initialization_end_us - initialization_start_us;
+ TFLITE_LOG(INFO) << "Initialized session in " << startup_latency_us / 1e3
+ << "ms";
+
+ uint64_t input_bytes = ComputeInputBytes();
+ Stat<int64_t> warmup_time_us =
+ Run(params_.Get<int32_t>("warmup_runs"), WARMUP);
+ Stat<int64_t> inference_time_us =
+ Run(params_.Get<int32_t>("num_runs"), REGULAR);
+ listeners_.OnBenchmarkEnd(
+ {startup_latency_us, input_bytes, warmup_time_us, inference_time_us});
+}
+
+bool BenchmarkModel::ParseFlags(int argc, char **argv) {
+ auto flag_list = GetFlags();
+ const bool parse_result =
+ Flags::Parse(&argc, const_cast<const char **>(argv), flag_list);
+ if (!parse_result) {
+ std::string usage = Flags::Usage(argv[0], flag_list);
+ TFLITE_LOG(ERROR) << usage;
+ return false;
+ }
+ return ValidateFlags();
+}
+
+} // namespace benchmark
+} // namespace nnfw
diff --git a/tools/tflite_benchmark_model/benchmark_model.h b/tools/tflite_benchmark_model/benchmark_model.h
new file mode 100644
index 000000000..5645e2910
--- /dev/null
+++ b/tools/tflite_benchmark_model/benchmark_model.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef __TFLITE_BENCHMARK_MODEL_BENCHMARK_MODEL_H__
+#define __TFLITE_BENCHMARK_MODEL_BENCHMARK_MODEL_H__
+
+#include <cmath>
+#include <limits>
+#include <ostream>
+#include <string>
+#include <unordered_set>
+#include <vector>
+
+#include "benchmark_params.h"
+#include "command_line_flags.h"
+#include "tensorflow/core/util/stats_calculator.h"
+
+namespace nnfw {
+namespace benchmark {
+
+enum RunType {
+ WARMUP,
+ REGULAR,
+};
+
+class BenchmarkResults {
+ public:
+ BenchmarkResults(int64_t startup_latency_us, uint64_t input_bytes,
+ tensorflow::Stat<int64_t> warmup_time_us,
+ tensorflow::Stat<int64_t> inference_time_us)
+ : startup_latency_us_(startup_latency_us),
+ input_bytes_(input_bytes),
+ warmup_time_us_(warmup_time_us),
+ inference_time_us_(inference_time_us) {}
+
+ tensorflow::Stat<int64_t> inference_time_us() const {
+ return inference_time_us_;
+ }
+ tensorflow::Stat<int64_t> warmup_time_us() const { return warmup_time_us_; }
+ int64_t startup_latency_us() const { return startup_latency_us_; }
+ uint64_t input_bytes() const { return input_bytes_; }
+ double throughput_MB_per_second() const {
+ double bytes_per_sec = (input_bytes_ * inference_time_us_.count() * 1e6) /
+ inference_time_us_.sum();
+ return bytes_per_sec / (1024.0 * 1024.0);
+ }
+
+ private:
+ int64_t startup_latency_us_;
+ uint64_t input_bytes_;
+ tensorflow::Stat<int64_t> warmup_time_us_;
+ tensorflow::Stat<int64_t> inference_time_us_;
+};
+
+class BenchmarkListener {
+ public:
+ virtual void OnBenchmarkStart(const BenchmarkParams& params) {}
+ virtual void OnSingleRunStart(RunType runType) {}
+ virtual void OnSingleRunEnd() {}
+ virtual void OnBenchmarkEnd(const BenchmarkResults& results) {}
+ virtual ~BenchmarkListener() {}
+};
+
+// A listener that forwards its method calls to a collection of listeners.
+class BenchmarkListeners : public BenchmarkListener {
+ public:
+ // Added a listener to the listener collection.
+ // |listener| is not owned by the instance of |BenchmarkListeners|.
+ // |listener| should not be null and should outlast the instance of
+ // |BenchmarkListeners|.
+ void AddListener(BenchmarkListener* listener) {
+ listeners_.push_back(listener);
+ }
+
+ void OnBenchmarkStart(const BenchmarkParams& params) override {
+ for (auto listener : listeners_) {
+ listener->OnBenchmarkStart(params);
+ }
+ }
+
+ void OnSingleRunStart(RunType runType) override {
+ for (auto listener : listeners_) {
+ listener->OnSingleRunStart(runType);
+ }
+ }
+
+ void OnSingleRunEnd() override {
+ for (auto listener : listeners_) {
+ listener->OnSingleRunEnd();
+ }
+ }
+
+ void OnBenchmarkEnd(const BenchmarkResults& results) override {
+ for (auto listener : listeners_) {
+ listener->OnBenchmarkEnd(results);
+ }
+ }
+
+ ~BenchmarkListeners() {}
+
+ private:
+ // Use vector so listeners are invoked in the order they are added.
+ std::vector<BenchmarkListener*> listeners_;
+};
+
+// Benchmark listener that just logs the results of benchmark run.
+class BenchmarkLoggingListener : public BenchmarkListener {
+ void OnBenchmarkEnd(const BenchmarkResults& results) override;
+};
+
+template <typename T>
+Flag CreateFlag(const char* name, BenchmarkParams* params,
+ const std::string& usage) {
+ return Flag(name, [params, name](const T& val) { params->Set<T>(name, val); },
+ params->Get<T>(name), usage);
+}
+
+// Benchmarks a model.
+//
+// Subclasses need to implement initialization and running of the model.
+// The results can be collected by adding BenchmarkListener(s).
+class BenchmarkModel {
+ public:
+ static BenchmarkParams DefaultParams();
+ BenchmarkModel();
+ BenchmarkModel(BenchmarkParams params) : params_(std::move(params)) {}
+ virtual ~BenchmarkModel() {}
+ bool ParseFlags(int argc, char** argv);
+ virtual void Init() = 0;
+ void Run(int argc, char** argv);
+ void AddListener(BenchmarkListener* listener) {
+ listeners_.AddListener(listener);
+ }
+
+ protected:
+ virtual void LogFlags();
+ virtual bool ValidateFlags() { return true; }
+ virtual std::vector<Flag> GetFlags();
+ virtual uint64_t ComputeInputBytes() = 0;
+ virtual tensorflow::Stat<int64_t> Run(int num_times, RunType run_type);
+ virtual void RunImpl() = 0;
+ BenchmarkParams params_;
+ BenchmarkListeners listeners_;
+};
+
+} // namespace benchmark
+} // namespace nnfw
+
+#endif //__TFLITE_BENCHMARK_MODEL_BENCHMARK_MODEL_H__
diff --git a/tools/tflite_benchmark_model/benchmark_params.cc b/tools/tflite_benchmark_model/benchmark_params.cc
new file mode 100644
index 000000000..7b667a442
--- /dev/null
+++ b/tools/tflite_benchmark_model/benchmark_params.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "benchmark_params.h"
+
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "logging.h"
+
+namespace nnfw {
+namespace benchmark {
+
+void BenchmarkParam::AssertHasSameType(BenchmarkParam::ParamType a,
+ BenchmarkParam::ParamType b) {
+ TFLITE_BENCHMARK_CHECK(a == b) << "Type mismatch while accessing parameter.";
+}
+
+template <>
+BenchmarkParam::ParamType BenchmarkParam::GetValueType<int32_t>() {
+ return BenchmarkParam::ParamType::TYPE_INT32;
+}
+
+template <>
+BenchmarkParam::ParamType BenchmarkParam::GetValueType<bool>() {
+ return BenchmarkParam::ParamType::TYPE_BOOL;
+}
+
+template <>
+BenchmarkParam::ParamType BenchmarkParam::GetValueType<float>() {
+ return BenchmarkParam::ParamType::TYPE_FLOAT;
+}
+
+template <>
+BenchmarkParam::ParamType BenchmarkParam::GetValueType<std::string>() {
+ return BenchmarkParam::ParamType::TYPE_STRING;
+}
+
+void BenchmarkParams::AssertParamExists(const std::string& name) const {
+ TFLITE_BENCHMARK_CHECK(HasParam(name)) << name << " was not found.";
+}
+
+} // namespace benchmark
+} // namespace nnfw
diff --git a/tools/tflite_benchmark_model/benchmark_params.h b/tools/tflite_benchmark_model/benchmark_params.h
new file mode 100644
index 000000000..1ac3f4af6
--- /dev/null
+++ b/tools/tflite_benchmark_model/benchmark_params.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef __TFLITE_BENCHMARK_MODEL_BENCHMARK_PARAMS_H__
+#define __TFLITE_BENCHMARK_MODEL_BENCHMARK_PARAMS_H__
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "logging.h"
+
+namespace nnfw {
+namespace benchmark {
+
+template <typename T>
+class TypedBenchmarkParam;
+
+class BenchmarkParam {
+ protected:
+ enum class ParamType { TYPE_INT32, TYPE_FLOAT, TYPE_BOOL, TYPE_STRING };
+
+ public:
+ template <typename T>
+ static std::unique_ptr<BenchmarkParam> Create(const T& default_value) {
+ return std::unique_ptr<BenchmarkParam>(
+ new TypedBenchmarkParam<T>(default_value));
+ }
+
+ template <typename T>
+ TypedBenchmarkParam<T>* AsTyped() {
+ AssertHasSameType(GetValueType<T>(), type_);
+ return static_cast<TypedBenchmarkParam<T>*>(this);
+ }
+ virtual ~BenchmarkParam() {}
+ BenchmarkParam(ParamType type) : type_(type) {}
+
+ private:
+ static void AssertHasSameType(ParamType a, ParamType b);
+ protected:
+ template <typename T>
+ static ParamType GetValueType();
+
+ const ParamType type_;
+};
+
+template <typename T>
+class TypedBenchmarkParam : public BenchmarkParam {
+ public:
+ TypedBenchmarkParam(const T& value)
+ : BenchmarkParam(GetValueType<T>()), value_(value) {}
+ void Set(const T& value) { value_ = value; }
+
+ T Get() { return value_; }
+
+ private:
+ T value_;
+};
+
+class BenchmarkParams {
+ public:
+ void AddParam(const std::string& name,
+ std::unique_ptr<BenchmarkParam> value) {
+ params_[name] = std::move(value);
+ }
+
+ bool HasParam(const std::string& name) const {
+ return params_.find(name) != params_.end();
+ }
+
+ template <typename T>
+ void Set(const std::string& name, const T& value) {
+ AssertParamExists(name);
+ params_.at(name)->AsTyped<T>()->Set(value);
+ }
+
+ template <typename T>
+ T Get(const std::string& name) const {
+ AssertParamExists(name);
+ return params_.at(name)->AsTyped<T>()->Get();
+ }
+
+ private:
+ void AssertParamExists(const std::string& name) const;
+ std::unordered_map<std::string, std::unique_ptr<BenchmarkParam>> params_;
+};
+
+} // namespace benchmark
+} // namespace nnfw
+#endif // __TFLITE_BENCHMARK_MODEL_BENCHMARK_PARAMS_H__
diff --git a/tools/tflite_benchmark_model/benchmark_tflite_model.cc b/tools/tflite_benchmark_model/benchmark_tflite_model.cc
new file mode 100644
index 000000000..d277795a3
--- /dev/null
+++ b/tools/tflite_benchmark_model/benchmark_tflite_model.cc
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "benchmark_tflite_model.h"
+
+#include <cstdarg>
+#include <cstdlib>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <unordered_set>
+#include <vector>
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/op_resolver.h"
+#include "tensorflow/contrib/lite/string_util.h"
+#include "logging.h"
+#include "util/profiling/profiling.h"
+#include "support/tflite/nnapi_delegate.h"
+
+#ifdef TFLITE_CUSTOM_OPS_HEADER
+void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
+#endif
+
+namespace nnfw {
+namespace benchmark {
+
+void ProfilingListener::SetInterpreter(tflite::Interpreter* interpreter) {
+ TFLITE_BENCHMARK_CHECK(interpreter);
+ interpreter_ = interpreter;
+ interpreter_->SetProfiler(&profiler_);
+}
+
+void ProfilingListener::OnSingleRunStart(RunType run_type) {
+ if (run_type == REGULAR) {
+ profiler_.Reset();
+ profiler_.StartProfiling();
+ }
+}
+
+void ProfilingListener::OnBenchmarkEnd(const BenchmarkResults& results) {
+ if (has_profiles_) {
+ TFLITE_LOG(INFO) << summarizer_.GetOutputString();
+ }
+}
+
+void ProfilingListener::OnSingleRunEnd() {
+ profiler_.StopProfiling();
+ auto profile_events = profiler_.GetProfileEvents();
+ has_profiles_ = !profile_events.empty();
+ summarizer_.ProcessProfiles(profile_events, *interpreter_);
+}
+
+namespace {
+
+std::vector<std::string> Split(const std::string& str, const char delim) {
+ std::istringstream input(str);
+ std::vector<std::string> results;
+ std::string item;
+ while (std::getline(input, item, delim)) {
+ results.push_back(item);
+ }
+ return results;
+}
+
+template <typename T>
+bool SplitAndParse(const std::string& str, char delim, std::vector<T>* values) {
+ std::istringstream input(str);
+ bool first = true;
+ while (!input.eof()) {
+ if (!first) {
+ char c;
+ input >> c;
+ if (c != delim) {
+ return false;
+ }
+ } else {
+ first = false;
+ }
+ T val;
+ input >> val;
+ if (!input.eof() && !input.good()) {
+ return false;
+ }
+ values->push_back(val);
+ }
+ return true;
+}
+
+template <typename T>
+void FillRandomValue(T* ptr, const std::vector<int>& sizes,
+ const std::function<T()>& random_func) {
+ int num_elements = 1;
+ for (int dim : sizes) {
+ num_elements *= dim;
+ }
+ for (int i = 0; i < num_elements; ++i) {
+ *ptr++ = random_func();
+ }
+}
+
+void FillRandomString(tflite::DynamicBuffer* buffer,
+ const std::vector<int>& sizes,
+ const std::function<std::string()>& random_func) {
+ int num_elements = 1;
+ for (int dim : sizes) {
+ num_elements *= dim;
+ }
+ for (int i = 0; i < num_elements; ++i) {
+ auto str = random_func();
+ buffer->AddString(str.data(), str.length());
+ }
+}
+
+bool PopulateInputLayerInfo(
+ const std::string& names_string, const std::string& shapes_string,
+ std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
+ std::vector<std::string> names = Split(names_string, ',');
+ std::vector<std::string> shapes = Split(shapes_string, ':');
+
+ if (names.size() != shapes.size()) {
+ TFLITE_LOG(ERROR) << "The number of items in"
+ << " --input_layer_shape (" << shapes_string << ", with "
+ << shapes.size() << " items)"
+ << " must match the number of items in"
+ << " --input_layer (" << names_string << ", with "
+ << names.size() << " items)."
+ << " For example --input_layer=input1,input2"
+ << " --input_layer_shape=1,224,224,4:1,20";
+ return false;
+ }
+
+ for (int i = 0; i < names.size(); ++i) {
+ info->push_back(BenchmarkTfLiteModel::InputLayerInfo());
+ BenchmarkTfLiteModel::InputLayerInfo& input = info->back();
+
+ input.name = names[i];
+
+ TFLITE_BENCHMARK_CHECK(SplitAndParse(shapes[i], ',', &input.shape))
+ << "Incorrect size string specified: " << shapes[i];
+ for (int dim : input.shape) {
+ if (dim == -1) {
+ TFLITE_LOG(ERROR)
+ << "Any unknown sizes in the shapes (-1's) must be replaced"
+ << " with the size you want to benchmark with.";
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+BenchmarkParams GetDefaultParams() {
+ BenchmarkParams default_params = BenchmarkModel::DefaultParams();
+ default_params.AddParam("graph", BenchmarkParam::Create<std::string>(""));
+ default_params.AddParam("input_layer",
+ BenchmarkParam::Create<std::string>(""));
+ default_params.AddParam("input_layer_shape",
+ BenchmarkParam::Create<std::string>(""));
+ default_params.AddParam("use_nnapi", BenchmarkParam::Create<bool>(false));
+ return default_params;
+}
+
+} // namespace
+
+BenchmarkTfLiteModel::BenchmarkTfLiteModel()
+ : BenchmarkModel(GetDefaultParams()) {
+ AddListener(&profiling_listener_);
+}
+
+BenchmarkTfLiteModel::BenchmarkTfLiteModel(BenchmarkParams params)
+ : BenchmarkModel(std::move(params)) {
+ AddListener(&profiling_listener_);
+}
+
+std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
+ std::vector<Flag> flags = BenchmarkTfLiteModel::BenchmarkModel::GetFlags();
+ std::vector<Flag> specific_flags = {
+ CreateFlag<std::string>("graph", &params_, "graph file name"),
+ CreateFlag<std::string>("input_layer", &params_, "input layer names"),
+ CreateFlag<std::string>("input_layer_shape", &params_,
+ "input layer shape"),
+ CreateFlag<bool>("use_nnapi", &params_, "use nnapi api")};
+
+ flags.insert(flags.end(), specific_flags.begin(), specific_flags.end());
+ return flags;
+}
+
+void BenchmarkTfLiteModel::LogFlags() {
+ BenchmarkModel::LogFlags();
+ TFLITE_LOG(INFO) << "Graph: [" << params_.Get<std::string>("graph") << "]";
+ TFLITE_LOG(INFO) << "Input layers: ["
+ << params_.Get<std::string>("input_layer") << "]";
+ TFLITE_LOG(INFO) << "Input shapes: ["
+ << params_.Get<std::string>("input_layer_shape") << "]";
+ TFLITE_LOG(INFO) << "Use nnapi : [" << params_.Get<bool>("use_nnapi") << "]";
+}
+
+bool BenchmarkTfLiteModel::ValidateFlags() {
+ if (params_.Get<std::string>("graph").empty()) {
+ TFLITE_LOG(ERROR)
+ << "Please specify the name of your TF Lite input file with --graph";
+ return false;
+ }
+ return PopulateInputLayerInfo(params_.Get<std::string>("input_layer"),
+ params_.Get<std::string>("input_layer_shape"),
+ &inputs);
+}
+
+uint64_t BenchmarkTfLiteModel::ComputeInputBytes() {
+ TFLITE_BENCHMARK_CHECK(interpreter);
+ uint64_t total_input_bytes = 0;
+ for (int input : interpreter->inputs()) {
+ auto* t = interpreter->tensor(input);
+ total_input_bytes += t->bytes;
+ }
+ return total_input_bytes;
+}
+
+void BenchmarkTfLiteModel::Init() {
+ std::string graph = params_.Get<std::string>("graph");
+ model = tflite::FlatBufferModel::BuildFromFile(graph.c_str());
+ if (!model) {
+ TFLITE_LOG(FATAL) << "Failed to mmap model " << graph;
+ }
+ TFLITE_LOG(INFO) << "Loaded model " << graph;
+ model->error_reporter();
+ TFLITE_LOG(INFO) << "resolved reporter";
+
+#ifdef TFLITE_CUSTOM_OPS_HEADER
+ tflite::MutableOpResolver resolver;
+ RegisterSelectedOps(&resolver);
+#else
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+#endif
+
+ tflite::InterpreterBuilder(*model, resolver)(&interpreter);
+ if (!interpreter) {
+ TFLITE_LOG(FATAL) << "Failed to construct interpreter";
+ }
+ profiling_listener_.SetInterpreter(interpreter.get());
+ profiling::Context::get().setProfiler(interpreter->GetProfiler());
+
+ const int32_t num_threads = params_.Get<int32_t>("num_threads");
+
+ if (num_threads != -1) {
+ interpreter->SetNumThreads(num_threads);
+ }
+
+ bool use_nnapi = params_.Get<bool>("use_nnapi");
+
+ interpreter->UseNNAPI(use_nnapi);
+ auto interpreter_inputs = interpreter->inputs();
+
+ if (!inputs.empty()) {
+ TFLITE_BENCHMARK_CHECK_EQ(inputs.size(), interpreter_inputs.size())
+ << "Inputs mismatch: Model inputs #:" << interpreter_inputs.size()
+ << " expected: " << inputs.size();
+ }
+
+ // TFLITE_BENCHMARK_CHECK that all names and types match
+ for (int j = 0; j < inputs.size(); ++j) {
+ const InputLayerInfo& input = inputs[j];
+ int i = interpreter_inputs[j];
+ TfLiteTensor* t = interpreter->tensor(i);
+ TFLITE_BENCHMARK_CHECK_EQ(t->name, input.name)
+ << "Tensor # " << i << " is named " << t->name << " but flags call it "
+ << input.name;
+ }
+
+ // Resize all non-string tensors.
+ for (int j = 0; j < inputs.size(); ++j) {
+ const InputLayerInfo& input = inputs[j];
+ int i = interpreter_inputs[j];
+ TfLiteTensor* t = interpreter->tensor(i);
+ if (t->type != kTfLiteString) {
+ interpreter->ResizeInputTensor(i, input.shape);
+ }
+ }
+
+ if (interpreter->AllocateTensors() != kTfLiteOk) {
+ TFLITE_LOG(FATAL) << "Failed to allocate tensors!";
+ }
+
+ // Set the values of the input tensors.
+ for (int j = 0; j < inputs.size(); ++j) {
+ const InputLayerInfo& input = inputs[j];
+ int i = interpreter_inputs[j];
+ TfLiteTensor* t = interpreter->tensor(i);
+ std::vector<int> sizes = input.shape;
+
+ // TODO(ahentz): below we ignore the O-th dimension (number of batches).
+ if (t->type == kTfLiteFloat32) {
+ FillRandomValue<float>(
+ interpreter->typed_tensor<float>(i),
+ std::vector<int>(sizes.begin() + 1, sizes.end()),
+ []() { return static_cast<float>(rand()) / RAND_MAX - 0.5f; });
+ } else if (t->type == kTfLiteUInt8) {
+ FillRandomValue<uint8_t>(
+ interpreter->typed_tensor<uint8_t>(i),
+ std::vector<int>(sizes.begin() + 1, sizes.end()),
+ []() { return static_cast<uint8_t>(rand()) % 255; });
+ } else if (t->type == kTfLiteString) {
+ tflite::DynamicBuffer buffer;
+ FillRandomString(&buffer, sizes, []() {
+ return "we're have some friends over saturday to hang out in the yard";
+ });
+ buffer.WriteToTensor(interpreter->tensor(i));
+ } else {
+ TFLITE_LOG(FATAL) << "Don't know how to populate tensor " << t->name
+ << " of type " << t->type;
+ }
+ }
+}
+
+void BenchmarkTfLiteModel::RunImpl() {
+ bool use_nnapi = params_.Get<bool>("use_nnapi");
+ if (use_nnapi) {
+ if (nnfw::NNAPIDelegate().Invoke(interpreter.get()) != kTfLiteOk) {
+ TFLITE_LOG(FATAL) << "Failed to invoke!";
+ }
+ } else {
+ if (interpreter->Invoke() != kTfLiteOk) {
+ TFLITE_LOG(FATAL) << "Failed to invoke!";
+ }
+ }
+}
+
+} // namespace benchmark
+} // namespace nnfw
diff --git a/tools/tflite_benchmark_model/benchmark_tflite_model.h b/tools/tflite_benchmark_model/benchmark_tflite_model.h
new file mode 100644
index 000000000..7892de1f7
--- /dev/null
+++ b/tools/tflite_benchmark_model/benchmark_tflite_model.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef __TFLITE_BENCHMARK_MODEL_BENCHMARK_TFLITE_MODEL_H__
+#define __TFLITE_BENCHMARK_MODEL_BENCHMARK_TFLITE_MODEL_H__
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/profiling/profile_summarizer.h"
+#include "benchmark_model.h"
+
+namespace nnfw {
+namespace benchmark {
+
+// Dumps profiling events if profiling is enabled
+class ProfilingListener : public BenchmarkListener {
+ public:
+ explicit ProfilingListener() : interpreter_(nullptr), has_profiles_(false) {}
+
+ void SetInterpreter(tflite::Interpreter* interpreter);
+
+ void OnSingleRunStart(RunType run_type) override;
+
+ void OnSingleRunEnd() override;
+
+ void OnBenchmarkEnd(const BenchmarkResults& results) override;
+
+ private:
+ tflite::Interpreter* interpreter_;
+ tflite::profiling::Profiler profiler_;
+ tflite::profiling::ProfileSummarizer summarizer_;
+ bool has_profiles_;
+};
+
+// Benchmarks a TFLite model by running tflite interpreter.
+class BenchmarkTfLiteModel : public BenchmarkModel {
+ public:
+ BenchmarkTfLiteModel();
+ BenchmarkTfLiteModel(BenchmarkParams params);
+
+ std::vector<Flag> GetFlags() override;
+ void LogFlags() override;
+ bool ValidateFlags() override;
+ uint64_t ComputeInputBytes() override;
+ void Init() override;
+ void RunImpl() override;
+ virtual ~BenchmarkTfLiteModel() {}
+
+ struct InputLayerInfo {
+ std::string name;
+ std::vector<int> shape;
+ };
+
+ private:
+ std::unique_ptr<tflite::FlatBufferModel> model;
+ std::unique_ptr<tflite::Interpreter> interpreter;
+ std::vector<InputLayerInfo> inputs;
+ ProfilingListener profiling_listener_;
+};
+
+} // namespace benchmark
+} // namespace nnfw
+
+#endif //__TFLITE_BENCHMARK_MODEL_BENCHMARK_TFLITE_MODEL_H__
diff --git a/tools/tflite_benchmark_model/command_line_flags.cc b/tools/tflite_benchmark_model/command_line_flags.cc
new file mode 100644
index 000000000..eacca9f73
--- /dev/null
+++ b/tools/tflite_benchmark_model/command_line_flags.cc
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "command_line_flags.h"
+
+#include <cstring>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace nnfw {
+namespace {
+
+template <typename T>
+std::string ToString(T val) {
+ std::ostringstream stream;
+ stream << val;
+ return stream.str();
+}
+
+bool ParseFlag(const std::string& arg, const std::string& flag,
+ const std::function<bool(const std::string&)>& parse_func,
+ bool* value_parsing_ok) {
+ *value_parsing_ok = true;
+ std::string flag_prefix = "--" + flag + "=";
+ if (arg.find(flag_prefix) != 0) {
+ return false;
+ }
+ bool has_value = arg.size() >= flag_prefix.size();
+ *value_parsing_ok = has_value;
+ if (has_value) {
+ *value_parsing_ok = parse_func(arg.substr(flag_prefix.size()));
+ }
+ return true;
+}
+
+template <typename T>
+bool ParseFlag(const std::string& flag_value,
+ const std::function<void(const T&)>& hook) {
+ std::istringstream stream(flag_value);
+ T read_value;
+ stream >> read_value;
+ if (!stream.eof() && !stream.good()) {
+ return false;
+ }
+ hook(read_value);
+ return true;
+}
+
+bool ParseBoolFlag(const std::string& flag_value,
+ const std::function<void(const bool&)>& hook) {
+ if (flag_value != "true" && flag_value != "false") {
+ return false;
+ }
+
+ hook(flag_value == "true");
+ return true;
+}
+} // namespace
+
+Flag::Flag(const char* name, const std::function<void(const int32_t&)>& hook,
+ int32_t default_value, const std::string& usage_text)
+ : name_(name),
+ type_(TYPE_INT32),
+ value_hook_([hook](const std::string& flag_value) {
+ return ParseFlag<int32_t>(flag_value, hook);
+ }),
+ default_for_display_(ToString(default_value)),
+ usage_text_(usage_text) {}
+
+Flag::Flag(const char* name, const std::function<void(const int64_t&)>& hook,
+ int64_t default_value, const std::string& usage_text)
+ : name_(name),
+ type_(TYPE_INT64),
+ value_hook_([hook](const std::string& flag_value) {
+ return ParseFlag<int64_t>(flag_value, hook);
+ }),
+ default_for_display_(ToString(default_value)),
+ usage_text_(usage_text) {}
+
+Flag::Flag(const char* name, const std::function<void(const float&)>& hook,
+ float default_value, const std::string& usage_text)
+ : name_(name),
+ type_(TYPE_FLOAT),
+ value_hook_([hook](const std::string& flag_value) {
+ return ParseFlag<float>(flag_value, hook);
+ }),
+ default_for_display_(ToString(default_value)),
+ usage_text_(usage_text) {}
+
+Flag::Flag(const char* name, const std::function<void(const bool&)>& hook,
+ bool default_value, const std::string& usage_text)
+ : name_(name),
+ type_(TYPE_BOOL),
+ value_hook_([hook](const std::string& flag_value) {
+ return ParseBoolFlag(flag_value, hook);
+ }),
+ default_for_display_(default_value ? "true" : "false"),
+ usage_text_(usage_text) {}
+
+Flag::Flag(const char* name,
+ const std::function<void(const std::string&)>& hook,
+ const std::string& default_value, const std::string& usage_text)
+ : name_(name),
+ type_(TYPE_STRING),
+ value_hook_([hook](const std::string& flag_value) {
+ hook(flag_value);
+ return true;
+ }),
+ default_for_display_(default_value),
+ usage_text_(usage_text) {}
+
+bool Flag::Parse(const std::string& arg, bool* value_parsing_ok) const {
+ return ParseFlag(arg, name_, value_hook_, value_parsing_ok);
+}
+
+std::string Flag::GetTypeName() const {
+ switch (type_) {
+ case TYPE_INT32:
+ return "int32";
+ case TYPE_INT64:
+ return "int64";
+ case TYPE_FLOAT:
+ return "float";
+ case TYPE_BOOL:
+ return "bool";
+ case TYPE_STRING:
+ return "string";
+ }
+
+ return "unknown";
+}
+
+/*static*/ bool Flags::Parse(int* argc, const char** argv,
+ const std::vector<Flag>& flag_list) {
+ bool result = true;
+ std::vector<const char*> unknown_flags;
+ for (int i = 1; i < *argc; ++i) {
+ if (std::string(argv[i]) == "--") {
+ while (i < *argc) {
+ unknown_flags.push_back(argv[i]);
+ ++i;
+ }
+ break;
+ }
+
+ bool was_found = false;
+ for (const Flag& flag : flag_list) {
+ bool value_parsing_ok;
+ was_found = flag.Parse(argv[i], &value_parsing_ok);
+ if (!value_parsing_ok) {
+ result = false;
+ }
+ if (was_found) {
+ break;
+ }
+ }
+ if (!was_found) {
+ unknown_flags.push_back(argv[i]);
+ }
+ }
+ int dst = 1; // Skip argv[0]
+ for (auto f : unknown_flags) {
+ argv[dst++] = f;
+ }
+ argv[dst++] = nullptr;
+ *argc = unknown_flags.size() + 1;
+ return result && (*argc < 2 || std::strcmp(argv[1], "--help") != 0);
+}
+
+/*static*/ std::string Flags::Usage(const std::string& cmdline,
+ const std::vector<Flag>& flag_list) {
+ std::ostringstream usage_text;
+ usage_text << "usage: " << cmdline << "\n";
+ if (!flag_list.empty()) {
+ usage_text << "Flags:\n";
+ }
+
+ for (const Flag& flag : flag_list) {
+ auto type_name = flag.GetTypeName();
+ usage_text << "\t";
+ usage_text << "--" << flag.name_ << "=" << flag.default_for_display_;
+ usage_text << "\t" << type_name << "\t" << flag.usage_text_ << "\n";
+ }
+ return usage_text.str();
+}
+
+} // namespace nnfw
diff --git a/tools/tflite_benchmark_model/command_line_flags.h b/tools/tflite_benchmark_model/command_line_flags.h
new file mode 100644
index 000000000..766417d87
--- /dev/null
+++ b/tools/tflite_benchmark_model/command_line_flags.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef __TFLITE_BENCHMARK_MODEL_COMMAND_LINE_FLAGS_H__
+#define __TFLITE_BENCHMARK_MODEL_COMMAND_LINE_FLAGS_H__
+
+#include <functional>
+#include <string>
+#include <vector>
+
+namespace nnfw {
+// A simple command-line argument parsing module.
+// Dependency free simplified port of core/util/command_line_flags.
+// This class is written for benchmarks and uses inefficient string
+// concatenation. This was written to avoid dependency on tensorflow/core/util
+// which transitively brings in a lot of other dependencies that are not
+// necessary for tflite benchmarking code.
+// The recommended way of using it is with local variables and an initializer
+// list of Flag objects, for example:
+//
+// int some_int = 10;
+// bool some_switch = false;
+// std::string some_name = "something";
+//
+// std::vector<tensorFlow::Flag> flag_list = {
+// Flag::CreateFlag("some_int", &some_int, "an integer that affects X"),
+// Flag::CreateFlag("some_switch", &some_switch, "a bool that affects Y"),
+// Flag::CreateFlag("some_name", &some_name, "a string that affects Z")
+// };
+// // Get usage message before ParseFlags() to capture default values.
+// std::string usage = Flag::Usage(argv[0], flag_list);
+// bool parsed_values_ok = Flags::Parse(&argc, argv, flag_list);
+//
+// tensorflow::port::InitMain(usage.c_str(), &argc, &argv);
+// if (argc != 1 || !parsed_values_ok) {
+// ...output usage and error message...
+// }
+//
+// The argc and argv values are adjusted by the Parse function so all that
+// remains is the program name (at argv[0]) and any unknown arguments fill the
+// rest of the array. This means you can check for flags that weren't understood
+// by seeing if argv is greater than 1.
+// The result indicates if there were any errors parsing the values that were
+// passed to the command-line switches. For example, --some_int=foo would return
+// false because the argument is expected to be an integer.
+//
+// NOTE: Unlike gflags-style libraries, this library is intended to be
+// used in the `main()` function of your binary. It does not handle
+// flag definitions that are scattered around the source code.
+
+// A description of a single command line flag, holding its name, type, usage
+// text, and a pointer to the corresponding variable.
+class Flag {
+ public:
+ template <typename T>
+ static Flag CreateFlag(const char* name, T* val, const char* usage) {
+ return Flag(name, [val](const T& v) { *val = v; }, *val, usage);
+ }
+
+ Flag(const char* name, const std::function<void(const int32_t&)>& hook,
+ int32_t default_value, const std::string& usage_text);
+ Flag(const char* name, const std::function<void(const int64_t&)>& hook,
+ int64_t default_value, const std::string& usage_text);
+ Flag(const char* name, const std::function<void(const float&)>& hook,
+ float default_value, const std::string& usage_text);
+ Flag(const char* name, const std::function<void(const bool&)>& hook,
+ bool default_value, const std::string& usage_text);
+ Flag(const char* name, const std::function<void(const std::string&)>& hook,
+ const std::string& default_value, const std::string& usage_text);
+
+ private:
+ friend class Flags;
+
+ bool Parse(const std::string& arg, bool* value_parsing_ok) const;
+
+ std::string name_;
+ enum {
+ TYPE_INT32,
+ TYPE_INT64,
+ TYPE_BOOL,
+ TYPE_STRING,
+ TYPE_FLOAT,
+ } type_;
+
+ std::string GetTypeName() const;
+
+ std::function<bool(const std::string&)> value_hook_;
+ std::string default_for_display_;
+
+ std::string usage_text_;
+};
+
+class Flags {
+ public:
+ // Parse the command line represented by argv[0, ..., (*argc)-1] to find flag
+ // instances matching flags in flaglist[]. Update the variables associated
+ // with matching flags, and remove the matching arguments from (*argc, argv).
+ // Return true iff all recognized flag values were parsed correctly, and the
+ // first remaining argument is not "--help".
+ static bool Parse(int* argc, const char** argv,
+ const std::vector<Flag>& flag_list);
+
+ // Return a usage message with command line cmdline, and the
+ // usage_text strings in flag_list[].
+ static std::string Usage(const std::string& cmdline,
+ const std::vector<Flag>& flag_list);
+};
+
+} // namespace nnfw
+
+#endif // __TFLITE_BENCHMARK_MODEL_COMMAND_LINE_FLAGS_H__
+
+
diff --git a/tools/tflite_benchmark_model/logging.h b/tools/tflite_benchmark_model/logging.h
new file mode 100644
index 000000000..e694a0926
--- /dev/null
+++ b/tools/tflite_benchmark_model/logging.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef __TFLITE_BENCHMARK_MODEL_LOGGING_H_
+#define __TFLITE_BENCHMARK_MODEL_LOGGING_H_
+
+// LOG and CHECK macros for benchmarks.
+
+#include <cstdlib>
+#include <iostream>
+#include <sstream>
+
+namespace nnfw {
+namespace logging {
+// A wrapper that logs to stderr.
+//
+// Used for TFLITE_LOG and TFLITE_BENCHMARK_CHECK macros.
+class LoggingWrapper {
+ public:
+ enum class LogSeverity : int {
+ INFO = 0,
+ WARN = 1,
+ ERROR = 2,
+ FATAL = 3,
+ };
+ LoggingWrapper(LogSeverity severity)
+ : severity_(severity), should_log_(true) {}
+ LoggingWrapper(LogSeverity severity, bool log)
+ : severity_(severity), should_log_(log) {}
+ std::stringstream& Stream() { return stream_; }
+ ~LoggingWrapper() {
+ if (should_log_) {
+ std::cerr << stream_.str() << std::endl;
+ if (severity_ == LogSeverity::FATAL) {
+ std::flush(std::cerr);
+ std::abort();
+ }
+ }
+ }
+
+ private:
+ std::stringstream stream_;
+ LogSeverity severity_;
+ bool should_log_;
+};
+
+} // namespace logging
+
+} // namespace nnfw
+
+#define TFLITE_LOG(severity) \
+ nnfw::logging::LoggingWrapper( \
+ nnfw::logging::LoggingWrapper::LogSeverity::severity) \
+ .Stream()
+
+#define TFLITE_BENCHMARK_CHECK(condition) \
+ nnfw::logging::LoggingWrapper( \
+ nnfw::logging::LoggingWrapper::LogSeverity::FATAL, \
+ (condition) ? false : true) \
+ .Stream()
+
+#define TFLITE_BENCHMARK_CHECK_EQ(a, b) TFLITE_BENCHMARK_CHECK(a == b)
+
+#endif // __TFLITE_BENCHMARK_MODEL_BENCHMARK_LOGGING_H_
diff --git a/tools/tflite_benchmark_model/profile_summarizer.cc b/tools/tflite_benchmark_model/profile_summarizer.cc
new file mode 100644
index 000000000..4d12b50af
--- /dev/null
+++ b/tools/tflite_benchmark_model/profile_summarizer.cc
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/lite/profiling/profile_summarizer.h"
+
+#include <sstream>
+
+#include "tensorflow/contrib/lite/schema/schema_generated.h"
+
+namespace tflite {
+namespace profiling {
+namespace {
+
+using Detail = tensorflow::StatsCalculator::Detail;
+
+struct OperatorDetails {
+ std::string name;
+ std::vector<std::string> inputs;
+ std::vector<std::string> outputs;
+};
+
+std::string GetTensorName(const tflite::Interpreter& interpreter,
+ int tensor_index) {
+ const auto tensor = interpreter.tensor(tensor_index);
+ if (tensor == nullptr || tensor->name == nullptr) {
+ return "Unknown";
+ }
+ return tensor->name;
+}
+std::vector<std::string> GetTensorNames(const tflite::Interpreter& interpreter,
+ const TfLiteIntArray* tensor_indices) {
+ std::vector<std::string> tensors;
+ tensors.reserve(tensor_indices->size);
+ for (int i = 0; i < tensor_indices->size; i++) {
+ tensors.push_back(GetTensorName(interpreter, tensor_indices->data[i]));
+ }
+ return tensors;
+}
+
+std::string ToString(const std::vector<std::string>& str_vector) {
+ std::stringstream stream;
+ stream << "[";
+ bool first = true;
+ for (const auto& s : str_vector) {
+ if (!first) {
+ stream << ", ";
+ } else {
+ first = false;
+ }
+ stream << s;
+ }
+ stream << "]";
+ return stream.str();
+}
+
+OperatorDetails GetOperatorDetails(const tflite::Interpreter& interpreter,
+ int node_index) {
+ auto node_reg = interpreter.node_and_registration(node_index);
+ auto inputs = node_reg->first.inputs;
+ auto outputs = node_reg->first.outputs;
+ int code = node_reg->second.builtin_code;
+ const char* op_name = nullptr;
+ if (code == tflite::BuiltinOperator_CUSTOM) {
+ const char* custom_name = node_reg->second.custom_name;
+ op_name = custom_name ? custom_name : "UnknownCustomOp";
+ } else {
+ op_name = tflite::EnumNamesBuiltinOperator()[code];
+ }
+ OperatorDetails details;
+ details.name = op_name;
+ details.inputs = GetTensorNames(interpreter, inputs);
+ details.outputs = GetTensorNames(interpreter, outputs);
+ return details;
+}
+
+} // namespace
+
+ProfileSummarizer::ProfileSummarizer()
+ : stats_calculator_(new ::tensorflow::StatsCalculator(
+ tensorflow::StatSummarizerOptions())) {}
+
+void ProfileSummarizer::ProcessProfiles(
+ const std::vector<const ProfileEvent*>& profile_stats,
+ const tflite::Interpreter& interpreter) {
+ std::vector<const ProfileEvent*> events;
+ std::copy_if(profile_stats.begin(), profile_stats.end(),
+ std::back_inserter(events), [](const ProfileEvent* e) {
+ return e->event_type ==
+ ProfileEvent::EventType::OPERATOR_INVOKE_EVENT &&
+ e->end_timestamp_us >= e->begin_timestamp_us;
+ });
+ // Sort with begin_time.
+ std::sort(events.begin(), events.end(),
+ [](const ProfileEvent* const& a, const ProfileEvent* const& b) {
+ return a->begin_timestamp_us < b->begin_timestamp_us;
+ });
+ if (events.empty()) {
+ return;
+ }
+
+ int64_t base_start_us = events[0]->begin_timestamp_us;
+ int node_num = 0;
+ int64_t curr_total_us = 0;
+ std::map<std::string, Detail> details;
+ int prev_op_idx = -1;
+ int seq_no = 1;
+ for (auto event : events) {
+ auto op_details = GetOperatorDetails(interpreter, event->event_metadata);
+ bool is_continued = (prev_op_idx == event->event_metadata);
+ seq_no = is_continued ? seq_no + 1 : 1;
+ auto node_name = ToString(op_details.outputs) + "#" + std::to_string(seq_no);
+ auto result = details.emplace(node_name, Detail());
+ Detail* detail = &(result.first->second);
+ detail->start_us.UpdateStat(event->begin_timestamp_us - base_start_us);
+ int64_t node_exec_time =
+ event->end_timestamp_us - event->begin_timestamp_us;
+ detail->rel_end_us.UpdateStat(node_exec_time);
+ curr_total_us += node_exec_time;
+ ++node_num;
+
+ if (result.second) {
+ detail->name = node_name;
+ detail->type = op_details.name;
+ detail->run_order = node_num;
+ detail->times_called = 0;
+ }
+ if (!is_continued) {
+ ++detail->times_called;
+ }
+ prev_op_idx = event->event_metadata;
+ }
+ stats_calculator_->UpdateDetails(details);
+ stats_calculator_->UpdateRunTotalUs(curr_total_us);
+}
+} // namespace profiling
+} // namespace tflite
diff --git a/tools/tflite_benchmark_model/profile_summarizer.h b/tools/tflite_benchmark_model/profile_summarizer.h
new file mode 100644
index 000000000..a529ff874
--- /dev/null
+++ b/tools/tflite_benchmark_model/profile_summarizer.h
@@ -0,0 +1,55 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_SUMMARIZER_H_
+#define TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_SUMMARIZER_H_
+
+#include <vector>
+
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/profiling/profiler.h"
+#include "tensorflow/core/util/stats_calculator.h"
+
+namespace tflite {
+namespace profiling {
+
+// Creates a summary of operator invocations in the interpreter.
+class ProfileSummarizer {
+ public:
+ ProfileSummarizer();
+ virtual ~ProfileSummarizer() {}
+
+ // Process profile events to update statistics for operator invocations.
+ void ProcessProfiles(const std::vector<const ProfileEvent*>& profile_stats,
+ const tflite::Interpreter& interpreter);
+
+ // Returns a string detailing the accumulated runtime stats in a tab-separated
+ // format which can be pasted into a spreadsheet for further analysis.
+ std::string GetOutputString() const {
+ return stats_calculator_->GetOutputString();
+ }
+
+ std::string GetShortSummary() const {
+ return stats_calculator_->GetShortSummary();
+ }
+
+ private:
+ std::unique_ptr<tensorflow::StatsCalculator> stats_calculator_;
+};
+
+} // namespace profiling
+} // namespace tflite
+
+#endif // TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_SUMMARIZER_H_
diff --git a/tools/tflite_examples/CMakeLists.txt b/tools/tflite_examples/CMakeLists.txt
new file mode 100644
index 000000000..5fe33302a
--- /dev/null
+++ b/tools/tflite_examples/CMakeLists.txt
@@ -0,0 +1,2 @@
+add_executable(tflite_conv_example "src/conv.cpp")
+target_link_libraries(tflite_conv_example tensorflow-lite ${LIB_PTHREAD} dl nnfw_support_tflite)
diff --git a/tools/tflite_examples/src/conv.cpp b/tools/tflite_examples/src/conv.cpp
new file mode 100644
index 000000000..a647346ee
--- /dev/null
+++ b/tools/tflite_examples/src/conv.cpp
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+
+#include <iostream>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+namespace vector
+{
+
+template <typename T> struct View
+{
+ virtual ~View() = default;
+
+ virtual int32_t size(void) const = 0;
+ virtual T at(uint32_t off) const = 0;
+};
+}
+
+namespace feature
+{
+
+struct Shape
+{
+ int32_t C;
+ int32_t H;
+ int32_t W;
+};
+
+template <typename T> struct View
+{
+ virtual ~View() = default;
+
+ virtual const Shape &shape(void) const = 0;
+ virtual T at(uint32_t ch, uint32_t row, uint32_t col) const = 0;
+};
+}
+
+namespace kernel
+{
+
+struct Shape
+{
+ int32_t N;
+ int32_t C;
+ int32_t H;
+ int32_t W;
+};
+
+template <typename T> struct View
+{
+ virtual ~View() = default;
+
+ virtual const Shape &shape(void) const = 0;
+ virtual T at(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) const = 0;
+};
+}
+
+const int32_t N = 1;
+const int32_t C = 2;
+
+class SampleBiasObject final : public vector::View<float>
+{
+public:
+ SampleBiasObject() : _size(N)
+ {
+ // DO NOTHING
+ }
+
+public:
+ int32_t size(void) const override { return _size; }
+
+ float at(uint32_t off) const override { return 0.0f; }
+
+private:
+ int32_t _size;
+};
+
+class SampleFeatureObject final : public feature::View<float>
+{
+public:
+ SampleFeatureObject()
+ {
+ _shape.C = C;
+ _shape.H = 3;
+ _shape.W = 4;
+
+ const uint32_t size = _shape.C * _shape.H * _shape.W;
+
+ for (uint32_t off = 0; off < size; ++off)
+ {
+ _value.emplace_back(off);
+ }
+
+ assert(_value.size() == size);
+ }
+
+public:
+ const feature::Shape &shape(void) const override { return _shape; };
+
+ float at(uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ return _value.at(ch * _shape.H * _shape.W + row * _shape.W + col);
+ }
+
+public:
+ float &at(uint32_t ch, uint32_t row, uint32_t col)
+ {
+ return _value.at(ch * _shape.H * _shape.W + row * _shape.W + col);
+ }
+
+private:
+ feature::Shape _shape;
+ std::vector<float> _value;
+};
+
+class SampleKernelObject final : public kernel::View<float>
+{
+public:
+ SampleKernelObject()
+ {
+ _shape.N = N;
+ _shape.C = C;
+ _shape.H = 3;
+ _shape.W = 4;
+
+ const uint32_t size = _shape.N * _shape.C * _shape.H * _shape.W;
+
+ for (uint32_t off = 0; off < size; ++off)
+ {
+ _value.emplace_back(off);
+ }
+
+ assert(_value.size() == size);
+ }
+
+public:
+ const kernel::Shape &shape(void) const override { return _shape; };
+
+ float at(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ return _value.at(nth * _shape.C * _shape.H * _shape.W + ch * _shape.H * _shape.W +
+ row * _shape.W + col);
+ }
+
+private:
+ kernel::Shape _shape;
+ std::vector<float> _value;
+};
+
+int main(int argc, char **argv)
+{
+ const SampleFeatureObject ifm;
+ const SampleKernelObject kernel;
+ const SampleBiasObject bias;
+
+ const int32_t IFM_C = ifm.shape().C;
+ const int32_t IFM_H = ifm.shape().H;
+ const int32_t IFM_W = ifm.shape().W;
+
+ const int32_t KER_N = kernel.shape().N;
+ const int32_t KER_C = kernel.shape().C;
+ const int32_t KER_H = kernel.shape().H;
+ const int32_t KER_W = kernel.shape().W;
+
+ const int32_t OFM_C = kernel.shape().N;
+ const int32_t OFM_H = (IFM_H - KER_H) + 1;
+ const int32_t OFM_W = (IFM_W - KER_W) + 1;
+
+ // Assumption on this example
+ assert(IFM_C == KER_C);
+ assert(KER_N == bias.size());
+
+ // Comment from 'context.h'
+ //
+ // Parameters for asymmetric quantization. Quantized values can be converted
+ // back to float using:
+ // real_value = scale * (quantized_value - zero_point);
+ //
+ // Q: Is this necessary?
+ TfLiteQuantizationParams quantization;
+
+ quantization.scale = 1;
+ quantization.zero_point = 0;
+
+ Interpreter interp;
+
+ // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
+ interp.AddTensors(5);
+
+ // Configure OFM
+ interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
+ {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
+
+ // Configure IFM
+ interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
+ {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
+
+ // Configure Filter
+ const uint32_t kernel_size = KER_N * KER_C * KER_H * KER_W;
+ float kernel_data[kernel_size] = {
+ 0.0f,
+ };
+
+ // Fill kernel data in NHWC order
+ {
+ uint32_t off = 0;
+
+ for (uint32_t nth = 0; nth < KER_N; ++nth)
+ {
+ for (uint32_t row = 0; row < KER_H; ++row)
+ {
+ for (uint32_t col = 0; col < KER_W; ++col)
+ {
+ for (uint32_t ch = 0; ch < KER_C; ++ch)
+ {
+ const auto value = kernel.at(nth, ch, row, col);
+ kernel_data[off++] = value;
+ }
+ }
+ }
+ }
+
+ assert(kernel_size == off);
+ }
+
+ interp.SetTensorParametersReadOnly(
+ 2, kTfLiteFloat32 /* type */, "filter" /* name */, {KER_N, KER_H, KER_W, KER_C} /* dims */,
+ quantization, reinterpret_cast<const char *>(kernel_data), sizeof(kernel_data));
+
+ // Configure Bias
+ const uint32_t bias_size = bias.size();
+ float bias_data[bias_size] = {
+ 0.0f,
+ };
+
+ // Fill bias data
+ for (uint32_t off = 0; off < bias.size(); ++off)
+ {
+ bias_data[off] = bias.at(off);
+ }
+
+ interp.SetTensorParametersReadOnly(3, kTfLiteFloat32 /* type */, "bias" /* name */,
+ {bias.size()} /* dims */, quantization,
+ reinterpret_cast<const char *>(bias_data), sizeof(bias_data));
+
+ // Add Convolution Node
+ //
+ // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
+ // So, param should be allocated with malloc
+ TfLiteConvParams *param = reinterpret_cast<TfLiteConvParams *>(malloc(sizeof(TfLiteConvParams)));
+
+ param->padding = kTfLitePaddingValid;
+ param->stride_width = 1;
+ param->stride_height = 1;
+ param->activation = kTfLiteActRelu;
+
+ // Run Convolution and store its result into Tensor #0
+ // - Read IFM from Tensor #1
+ // - Read Filter from Tensor #2,
+ // - Read Bias from Tensor #3
+ interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+ BuiltinOpResolver().FindOp(BuiltinOperator_CONV_2D, 1));
+
+ // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
+ interp.SetInputs({1});
+ interp.SetOutputs({0});
+
+ // Let's use NNAPI (if possible)
+ interp.UseNNAPI(true);
+
+ // Allocate Tensor
+ interp.AllocateTensors();
+
+ // Fill IFM data in HWC order
+ {
+ uint32_t off = 0;
+
+ for (uint32_t row = 0; row < ifm.shape().H; ++row)
+ {
+ for (uint32_t col = 0; col < ifm.shape().W; ++col)
+ {
+ for (uint32_t ch = 0; ch < ifm.shape().C; ++ch)
+ {
+ const auto value = ifm.at(ch, row, col);
+ interp.typed_input_tensor<float>(0)[off++] = value;
+ }
+ }
+ }
+ }
+
+ // Let's Rock-n-Roll!
+ interp.Invoke();
+
+ // Print OFM
+ {
+ uint32_t off = 0;
+
+ for (uint32_t row = 0; row < OFM_H; ++row)
+ {
+ for (uint32_t col = 0; col < OFM_W; ++col)
+ {
+ for (uint32_t ch = 0; ch < kernel.shape().N; ++ch)
+ {
+ std::cout << interp.typed_output_tensor<float>(0)[off++] << std::endl;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/tflite_run/CMakeLists.txt b/tools/tflite_run/CMakeLists.txt
new file mode 100644
index 000000000..c9f72acee
--- /dev/null
+++ b/tools/tflite_run/CMakeLists.txt
@@ -0,0 +1,26 @@
+list(APPEND TFLITE_RUN_SRCS "src/tflite_run.cc")
+list(APPEND TFLITE_RUN_SRCS "src/bin_image.cc")
+list(APPEND TFLITE_RUN_SRCS "src/args.cc")
+list(APPEND TFLITE_RUN_SRCS "src/tensor_dumper.cc")
+list(APPEND TFLITE_RUN_SRCS "src/tensor_loader.cc")
+
+add_executable(tflite_run ${TFLITE_RUN_SRCS})
+target_include_directories(tflite_run PRIVATE src)
+target_link_libraries(tflite_run tensorflow-lite ${LIB_PTHREAD} dl nnfw_support_tflite)
+target_link_libraries(tflite_run boost_program_options boost_system boost_filesystem)
+
+install(TARGETS tflite_run DESTINATION bin)
+
+# TEST BUILD
+nnfw_find_package(GTest)
+
+if(NOT GTest_FOUND)
+ return()
+endif(NOT GTest_FOUND)
+
+## Add test cpp file
+add_executable(tflite_test src/tflite_test.cc)
+## Link test executable against gtest & gtest_main
+target_link_libraries(tflite_test gtest gtest_main ${LIB_PTHREAD})
+## install test binary for packaging
+install(TARGETS tflite_test DESTINATION unittest)
diff --git a/tools/tflite_run/README.md b/tools/tflite_run/README.md
new file mode 100644
index 000000000..35d2b6497
--- /dev/null
+++ b/tools/tflite_run/README.md
@@ -0,0 +1,91 @@
+# tflite_run
+
+A simple Tensorflow Lite runner. It measures the elapsed time and optionally dump the input/output tensors or verify them.
+
+## Usage
+
+### Simple run
+
+This will run with random input data
+
+```
+$ ./tflite_run model.tflite
+```
+
+Output would look like:
+
+```
+input tensor indices = [0,]
+Input image size is smaller than the size required by the model. Input will not be set.
+output tensor indices = [308(max:984),]
+Prepare takes 0.00126718 seconds
+Invoke takes 7.09527 seconds
+```
+
+### Specifying input feature map
+
+We can specify input feature map, but it only accepts preprocessed data which means that the image files must be converted.
+
+TODO : Add input image preprocessing instruction
+
+```
+$ ./tflite_run model.tflite -i binary_input_file
+```
+
+### Dump the input and output tensors
+
+Dump the input and output tensors to a file.
+```
+$ ./tflite_run model.tflite --dump golden
+```
+
+Why we do this is usually for later verification. The tensors are written to name "golden".
+
+### Compare with the saved outputs
+
+The result from `tflite_run` and binary file are compared with `--compare` option.
+
+```
+$ ls golden
+golden
+$ ./tflite_run model.tflite --compare golden
+```
+
+The output would look like:
+
+```
+input tensor indices = [0,]
+Input image size is smaller than the size required by the model. Input will not be set.
+output tensor indices = [308(max:984),]
+Prepare takes 0.00126718 seconds
+Invoke takes 7.09527 seconds
+========================================
+Comparing the results with "golden2".
+========================================
+ Tensor #308: UNMATCHED
+ 1 diffs are detected
+ Max absolute diff at [0, 0]
+ expected: 99
+ obtained: 0.000139008
+ absolute diff: 98.9999
+ Max relative diff at [0, 1007]
+ expected: 7.01825e-33
+ obtained: 0.000139011
+ relative diff: 1
+ (tolerance level = 8.38861e+06)
+```
+
+If `--compare` option is on, the exit code will be depend on its compare result. 0 for matched, other number for unmatched.
+
+## How Verification Works
+
+For verification, we may follow these steps:
+
+1. Generate and store the verfication data (run with option `--dump`)
+ 1. Input Tensor does not matter as we will keep inputs along with outputs
+ 1. Interpreter.Invoke()
+ 1. Dump input tensors and output tensors to a file
+1. Give the dumped file for other runtime that we want to verify (run with option `--compare`)
+ 1. Set interpreter's input to input tensor data from the file
+ 1. Interpreter.Invoke()
+ 1. Compare the results with output tensor data from the file
diff --git a/tools/tflite_run/src/args.cc b/tools/tflite_run/src/args.cc
new file mode 100644
index 000000000..6aebbbbd7
--- /dev/null
+++ b/tools/tflite_run/src/args.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "args.h"
+
+#include <iostream>
+
+#include <boost/filesystem.hpp>
+
+namespace TFLiteRun
+{
+
+Args::Args(const int argc, char **argv)
+{
+ Initialize();
+ Parse(argc, argv);
+}
+
+void Args::Initialize(void)
+{
+
+ // General options
+ po::options_description general("General options");
+
+ // clang-format off
+ general.add_options()
+ ("help,h", "Display available options")
+ ("input,i", po::value<std::string>(&_input_filename)->default_value(""), "Input filename")
+ ("dump,d", po::value<std::string>()->default_value(""), "Output filename")
+ ("compare,c", po::value<std::string>()->default_value(""), "filename to be compared with")
+ ("tflite", po::value<std::string>()->required());
+ // clang-format on
+
+ _options.add(general);
+ _positional.add("tflite", 1);
+}
+
+void Args::Parse(const int argc, char **argv)
+{
+ po::variables_map vm;
+ po::store(po::command_line_parser(argc, argv).options(_options).positional(_positional).run(),
+ vm);
+ po::notify(vm);
+
+ {
+ auto conflicting_options = [&](const std::string &o1, const std::string &o2) {
+ if ((vm.count(o1) && !vm[o1].defaulted()) && (vm.count(o2) && !vm[o2].defaulted()))
+ {
+ throw boost::program_options::error(std::string("Two options '") + o1 + "' and '" + o2 +
+ "' cannot be given at once.");
+ }
+ };
+
+ conflicting_options("input", "compare");
+ }
+
+ if (vm.count("help"))
+ {
+ std::cout << "tflite_run\n\n";
+ std::cout << "Usage: " << argv[0] << " <.tflite> [<options>]\n\n";
+ std::cout << _options;
+ std::cout << "\n";
+
+ exit(0);
+ }
+
+ if (vm.count("input"))
+ {
+ _input_filename = vm["input"].as<std::string>();
+
+ if (!_input_filename.empty())
+ {
+ if (!boost::filesystem::exists(_input_filename))
+ {
+ std::cerr << "input image file not found: " << _input_filename << "\n";
+ }
+ }
+ }
+
+ if (vm.count("dump"))
+ {
+ _dump_filename = vm["dump"].as<std::string>();
+ }
+
+ if (vm.count("compare"))
+ {
+ _compare_filename = vm["compare"].as<std::string>();
+ }
+
+ if (vm.count("tflite"))
+ {
+ _tflite_filename = vm["tflite"].as<std::string>();
+
+ if (_tflite_filename.empty())
+ {
+ // TODO Print usage instead of the below message
+ std::cerr << "Please specify tflite file. Run with `--help` for usage."
+ << "\n";
+
+ exit(1);
+ }
+ else
+ {
+ if (!boost::filesystem::exists(_tflite_filename))
+ {
+ std::cerr << "tflite file not found: " << _tflite_filename << "\n";
+ }
+ }
+ }
+}
+
+} // end of namespace TFLiteRun
diff --git a/tools/tflite_run/src/args.h b/tools/tflite_run/src/args.h
new file mode 100644
index 000000000..7b270d4ee
--- /dev/null
+++ b/tools/tflite_run/src/args.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_RUN_ARGS_H__
+#define __TFLITE_RUN_ARGS_H__
+
+#include <string>
+#include <boost/program_options.hpp>
+
+namespace po = boost::program_options;
+
+namespace TFLiteRun
+{
+
+class Args
+{
+public:
+ Args(const int argc, char **argv);
+ void print(void);
+
+ const std::string &getTFLiteFilename(void) const { return _tflite_filename; }
+ const std::string &getInputFilename(void) const { return _input_filename; }
+ const std::string &getDumpFilename(void) const { return _dump_filename; }
+ const std::string &getCompareFilename(void) const { return _compare_filename; }
+
+private:
+ void Initialize();
+ void Parse(const int argc, char **argv);
+
+private:
+ po::positional_options_description _positional;
+ po::options_description _options;
+
+ std::string _tflite_filename;
+ std::string _input_filename;
+ std::string _dump_filename;
+ std::string _compare_filename;
+};
+
+} // end of namespace TFLiteRun
+
+#endif // __TFLITE_RUN_ARGS_H__
diff --git a/tools/tflite_run/src/bin_image.cc b/tools/tflite_run/src/bin_image.cc
new file mode 100644
index 000000000..16d4c94f7
--- /dev/null
+++ b/tools/tflite_run/src/bin_image.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <fstream>
+
+#include "bin_image.h"
+
+BinImage::BinImage(unsigned int width, unsigned int height, unsigned int channels)
+ : _width(width), _height(height), _channels(channels)
+{
+}
+
+BinImage::~BinImage() {}
+
+void BinImage::loadImage(const std::string &filename)
+{
+ std::ifstream fin(filename);
+
+ if (!fin)
+ {
+ std::cerr << "image filename is not specified. "
+ << "Input image will not be set." << std::endl;
+ return;
+ }
+
+ _image.reserve(_width * _height * _channels);
+
+ // Assuption: binary image is stored in the order of [H,W,C]
+ for (unsigned int i = 0; i < _width * _height * _channels; ++i)
+ _image.push_back(fin.get());
+}
+
+void BinImage::AssignTensor(TfLiteTensor *t)
+{
+ float *p = t->data.f;
+ const int IMAGE_MEAN = 128;
+ const float IMAGE_STD = 128.0f;
+
+ // to prevent runtime exception
+ if (_image.size() < _width * _height * _channels)
+ {
+ std::cerr << "Input image size is smaller than the size required by the model."
+ << " Input will not be set." << std::endl;
+ return;
+ }
+
+ for (int x = 0; x < _width; ++x)
+ {
+ for (int y = 0; y < _height; ++y)
+ {
+ for (int c = 0; c < _channels; ++c)
+ {
+ *p++ = (_image[y * _width * _channels + x * _channels + c] - IMAGE_MEAN) / IMAGE_STD;
+ }
+ }
+ }
+}
diff --git a/tools/tflite_run/src/bin_image.h b/tools/tflite_run/src/bin_image.h
new file mode 100644
index 000000000..845011be6
--- /dev/null
+++ b/tools/tflite_run/src/bin_image.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFLITE_RUN_LIBJPEG_H__
+#define __TFLITE_RUN_LIBJPEG_H__
+
+#include <string>
+#include <vector>
+
+#include "tensorflow/contrib/lite/context.h"
+
+class BinImage
+{
+public:
+ BinImage(unsigned int width, unsigned int height, unsigned int channel);
+ ~BinImage();
+
+ void loadImage(const std::string &filename);
+
+ void AssignTensor(TfLiteTensor *t);
+
+private:
+ unsigned int _width;
+ unsigned int _height;
+ unsigned int _channels;
+
+ std::vector<unsigned char> _image;
+};
+
+#endif // __TFLITE_RUN_LIBJPEG_H__
diff --git a/tools/tflite_run/src/tensor_dumper.cc b/tools/tflite_run/src/tensor_dumper.cc
new file mode 100644
index 000000000..8568c9b67
--- /dev/null
+++ b/tools/tflite_run/src/tensor_dumper.cc
@@ -0,0 +1,54 @@
+#include "tensor_dumper.h"
+
+#include <fstream>
+#include <iostream>
+#include <cstring>
+
+#include "tensorflow/contrib/lite/interpreter.h"
+
+namespace TFLiteRun
+{
+
+TensorDumper::TensorDumper()
+{
+ // DO NOTHING
+}
+
+void TensorDumper::addTensors(tflite::Interpreter &interpreter, const std::vector<int> &indices)
+{
+ for (const auto &o : indices)
+ {
+ const TfLiteTensor *tensor = interpreter.tensor(o);
+ int size = tensor->bytes;
+ std::vector<char> buffer;
+ buffer.resize(size);
+ memcpy(buffer.data(), tensor->data.raw, size);
+ _tensors.emplace_back(o, std::move(buffer));
+ }
+}
+
+void TensorDumper::dump(const std::string &filename) const
+{
+ // TODO Handle file open/write error
+ std::ofstream file(filename, std::ios::out | std::ios::binary);
+
+ // Write number of tensors
+ uint32_t num_tensors = static_cast<uint32_t>(_tensors.size());
+ file.write(reinterpret_cast<const char *>(&num_tensors), sizeof(num_tensors));
+
+ // Write tensor indices
+ for (const auto &t : _tensors)
+ {
+ file.write(reinterpret_cast<const char *>(&t._index), sizeof(int));
+ }
+
+ // Write data
+ for (const auto &t : _tensors)
+ {
+ file.write(t._data.data(), t._data.size());
+ }
+
+ file.close();
+}
+
+} // end of namespace TFLiteRun
diff --git a/tools/tflite_run/src/tensor_dumper.h b/tools/tflite_run/src/tensor_dumper.h
new file mode 100644
index 000000000..2805f1076
--- /dev/null
+++ b/tools/tflite_run/src/tensor_dumper.h
@@ -0,0 +1,38 @@
+#ifndef __TFLITE_RUN_TENSOR_DUMPER_H__
+#define __TFLITE_RUN_TENSOR_DUMPER_H__
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace tflite
+{
+class Interpreter;
+}
+
+namespace TFLiteRun
+{
+
+class TensorDumper
+{
+private:
+ struct Tensor
+ {
+ int _index;
+ std::vector<char> _data;
+
+ Tensor(int index, std::vector<char> &&data) : _index(index), _data(std::move(data)) {}
+ };
+
+public:
+ TensorDumper();
+ void addTensors(tflite::Interpreter &interpreter, const std::vector<int> &indices);
+ void dump(const std::string &filename) const;
+
+private:
+ std::vector<Tensor> _tensors;
+};
+
+} // end of namespace TFLiteRun
+
+#endif // __TFLITE_RUN_TENSOR_DUMPER_H__
diff --git a/tools/tflite_run/src/tensor_loader.cc b/tools/tflite_run/src/tensor_loader.cc
new file mode 100644
index 000000000..678ff083e
--- /dev/null
+++ b/tools/tflite_run/src/tensor_loader.cc
@@ -0,0 +1,67 @@
+#include "tensor_loader.h"
+
+#include <assert.h>
+
+#include <fstream>
+
+#include "util/tensor/Shape.h"
+
+namespace TFLiteRun
+{
+
+TensorLoader::TensorLoader(tflite::Interpreter &interpreter)
+ : _interpreter(interpreter), _raw_data(nullptr)
+{
+}
+
+void TensorLoader::load(const std::string &filename)
+{
+ // TODO Handle file open/read error
+ std::ifstream file(filename, std::ios::ate | std::ios::binary);
+ size_t file_size = file.tellg();
+ file.seekg(0, std::ios::beg);
+
+ uint32_t num_tensors = 0;
+ file.read(reinterpret_cast<char *>(&num_tensors), sizeof(num_tensors));
+
+ int tensor_indices_raw[num_tensors];
+ file.read(reinterpret_cast<char *>(tensor_indices_raw), sizeof(tensor_indices_raw));
+ std::vector<int> tensor_indices(tensor_indices_raw, tensor_indices_raw + num_tensors);
+
+ _raw_data = std::unique_ptr<float>(new float[file_size]);
+ file.read(reinterpret_cast<char *>(_raw_data.get()), file_size);
+
+ size_t offset = 0;
+ for (const auto &o : tensor_indices)
+ {
+ const TfLiteTensor *tensor = _interpreter.tensor(o);
+
+ // Convert tensor shape to `Shape` from `tensor->dims`
+ nnfw::util::tensor::Shape shape(static_cast<size_t>(tensor->dims->size));
+ for (int d = 0; d < tensor->dims->size; d++)
+ {
+ shape.dim(d) = tensor->dims->data[d];
+ }
+
+ float *base = _raw_data.get() + offset;
+
+ assert(tensor->bytes % sizeof(float) == 0);
+ offset += (tensor->bytes / sizeof(float));
+
+ _tensor_map.insert(std::make_pair(o, nnfw::support::tflite::TensorView<float>(shape, base)));
+ }
+
+ // The file size and total output tensor size must match
+ assert(file_size == sizeof(num_tensors) + sizeof(tensor_indices_raw) + offset * sizeof(float));
+
+ file.close();
+}
+
+const nnfw::support::tflite::TensorView<float> &TensorLoader::get(int tensor_idx) const
+{
+ auto found = _tensor_map.find(tensor_idx);
+ assert(found != _tensor_map.end());
+ return found->second;
+}
+
+} // end of namespace TFLiteRun
diff --git a/tools/tflite_run/src/tensor_loader.h b/tools/tflite_run/src/tensor_loader.h
new file mode 100644
index 000000000..f2a699185
--- /dev/null
+++ b/tools/tflite_run/src/tensor_loader.h
@@ -0,0 +1,35 @@
+#ifndef __TFLITE_RUN_TENSOR_LOADER_H__
+#define __TFLITE_RUN_TENSOR_LOADER_H__
+
+#include <sys/mman.h>
+
+#include <string>
+#include <unordered_map>
+
+#include "support/tflite/TensorView.h"
+
+namespace tflite
+{
+class Interpreter;
+}
+
+namespace TFLiteRun
+{
+
+class TensorLoader
+{
+public:
+ TensorLoader(tflite::Interpreter &interpreter);
+ void load(const std::string &filename);
+ const nnfw::support::tflite::TensorView<float> &get(int tensor_idx) const;
+ size_t getNums() const { return _tensor_map.size(); }
+
+private:
+ tflite::Interpreter &_interpreter;
+ std::unique_ptr<float> _raw_data;
+ std::unordered_map<int, nnfw::support::tflite::TensorView<float>> _tensor_map;
+};
+
+} // end of namespace TFLiteRun
+
+#endif // __TFLITE_RUN_TENSOR_LOADER_H__
diff --git a/tools/tflite_run/src/tflite_run.cc b/tools/tflite_run/src/tflite_run.cc
new file mode 100644
index 000000000..23a23809b
--- /dev/null
+++ b/tools/tflite_run/src/tflite_run.cc
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "support/tflite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+
+#include "bin_image.h"
+#include "args.h"
+#include "tensor_dumper.h"
+#include "tensor_loader.h"
+#include "util/benchmark.h"
+#include "util/environment.h"
+#include "util/fp32.h"
+#include "support/tflite/Diff.h"
+#include "support/tflite/Assert.h"
+#include "support/tflite/Session.h"
+#include "support/tflite/InterpreterSession.h"
+#include "support/tflite/NNAPISession.h"
+#include "util/tensor/IndexIterator.h"
+
+#include <iostream>
+#include <chrono>
+#include <algorithm>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+void print_max_idx(float *f, int size)
+{
+ float *p = std::max_element(f, f + size);
+ std::cout << "max:" << p - f;
+}
+
+int main(const int argc, char **argv)
+{
+ bool use_nnapi = false;
+
+ if (std::getenv("USE_NNAPI") != nullptr)
+ {
+ use_nnapi = true;
+ }
+
+ StderrReporter error_reporter;
+
+ TFLiteRun::Args args(argc, argv);
+
+ auto model = FlatBufferModel::BuildFromFile(args.getTFLiteFilename().c_str(), &error_reporter);
+ std::unique_ptr<Interpreter> interpreter;
+
+ std::chrono::milliseconds t_prepare(0);
+ std::chrono::milliseconds t_invoke(0);
+
+ nnfw::util::benchmark::measure(t_prepare) << [&](void) {
+ BuiltinOpResolver resolver;
+
+ InterpreterBuilder builder(*model, resolver);
+
+ TFLITE_ENSURE(builder(&interpreter))
+
+ interpreter->SetNumThreads(1);
+ };
+
+ std::shared_ptr<nnfw::support::tflite::Session> sess;
+
+ if (use_nnapi)
+ {
+ sess = std::make_shared<nnfw::support::tflite::NNAPISession>(interpreter.get());
+ }
+ else
+ {
+ sess = std::make_shared<nnfw::support::tflite::InterpreterSession>(interpreter.get());
+ }
+
+ sess->prepare();
+
+ TFLiteRun::TensorLoader tensor_loader(*interpreter);
+
+ // Load input from image or dumped tensor file. Two options are exclusive and will be checked
+ // from Args.
+ if (args.getInputFilename().size() > 0)
+ {
+ BinImage image(299, 299, 3);
+ image.loadImage(args.getInputFilename());
+
+ for (const auto &o : interpreter->inputs())
+ {
+ image.AssignTensor(interpreter->tensor(o));
+ }
+ }
+ else if (!args.getCompareFilename().empty())
+ {
+ tensor_loader.load(args.getCompareFilename());
+
+ for (const auto &o : interpreter->inputs())
+ {
+ const auto &tensor_view = tensor_loader.get(o);
+ TfLiteTensor *tensor = interpreter->tensor(o);
+
+ memcpy(reinterpret_cast<void *>(tensor->data.f),
+ reinterpret_cast<const void *>(tensor_view._base), tensor->bytes);
+ }
+ }
+ else
+ {
+ // No input specified. So we fill the input tensors with random values.
+ for (const auto &o : interpreter->inputs())
+ {
+ TfLiteTensor *tensor = interpreter->tensor(o);
+ if (tensor->type == kTfLiteInt32)
+ {
+ // Generate singed 32-bit integer (s32) input
+ auto tensor_view = nnfw::support::tflite::TensorView<int32_t>::make(*interpreter, o);
+
+ int32_t value = 0;
+
+ nnfw::util::tensor::iterate(tensor_view.shape())
+ << [&](const nnfw::util::tensor::Index &ind) {
+ // TODO Generate random values
+ // Gather operation: index should be within input coverage.
+ tensor_view.at(ind) = value;
+ value++;
+ };
+ }
+ else if (tensor->type == kTfLiteUInt8)
+ {
+ // Generate unsigned 8-bit integer input
+ auto tensor_view = nnfw::support::tflite::TensorView<uint8_t>::make(*interpreter, o);
+
+ uint8_t value = 0;
+
+ nnfw::util::tensor::iterate(tensor_view.shape())
+ << [&](const nnfw::util::tensor::Index &ind) {
+ // TODO Generate random values
+ tensor_view.at(ind) = value;
+ value = (value + 1) & 0xFF;
+ };
+ }
+ else
+ {
+ assert(tensor->type == kTfLiteFloat32);
+
+ const int seed = 1; /* TODO Add an option for seed value */
+ RandomGenerator randgen{seed, 0.0f, 0.2f};
+ const float *end = reinterpret_cast<const float *>(tensor->data.raw_const + tensor->bytes);
+ for (float *ptr = tensor->data.f; ptr < end; ptr++)
+ {
+ *ptr = randgen.generate<float>();
+ }
+ }
+ }
+ }
+
+ TFLiteRun::TensorDumper tensor_dumper;
+ // Must be called before `interpreter->Invoke()`
+ tensor_dumper.addTensors(*interpreter, interpreter->inputs());
+
+ std::cout << "input tensor indices = [";
+ for (const auto &o : interpreter->inputs())
+ {
+ std::cout << o << ",";
+ }
+ std::cout << "]" << std::endl;
+
+ nnfw::util::benchmark::measure(t_invoke) << [&sess](void) {
+ if (!sess->run())
+ {
+ assert(0 && "run failed!");
+ }
+ };
+
+ sess->teardown();
+
+ // Must be called after `interpreter->Invoke()`
+ tensor_dumper.addTensors(*interpreter, interpreter->outputs());
+
+ std::cout << "output tensor indices = [";
+ for (const auto &o : interpreter->outputs())
+ {
+ std::cout << o << "(";
+
+ print_max_idx(interpreter->tensor(o)->data.f, interpreter->tensor(o)->bytes / sizeof(float));
+
+ std::cout << "),";
+ }
+ std::cout << "]" << std::endl;
+
+ std::cout << "Prepare takes " << t_prepare.count() / 1000.0 << " seconds" << std::endl;
+ std::cout << "Invoke takes " << t_invoke.count() / 1000.0 << " seconds" << std::endl;
+
+ if (!args.getDumpFilename().empty())
+ {
+ const std::string &dump_filename = args.getDumpFilename();
+ tensor_dumper.dump(dump_filename);
+ std::cout << "Input/output tensors have been dumped to file \"" << dump_filename << "\"."
+ << std::endl;
+ }
+
+ if (!args.getCompareFilename().empty())
+ {
+ const std::string &compare_filename = args.getCompareFilename();
+ std::cout << "========================================" << std::endl;
+ std::cout << "Comparing the results with \"" << compare_filename << "\"." << std::endl;
+ std::cout << "========================================" << std::endl;
+
+ // TODO Code duplication (copied from RandomTestRunner)
+
+ int tolerance = 1;
+ nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
+
+ auto equals = [tolerance](float lhs, float rhs) {
+ // NOTE Hybrid approach
+ // TODO Allow users to set tolerance for absolute_epsilon_equal
+ if (nnfw::util::fp32::absolute_epsilon_equal(lhs, rhs))
+ {
+ return true;
+ }
+
+ return nnfw::util::fp32::epsilon_equal(lhs, rhs, tolerance);
+ };
+
+ nnfw::util::tensor::Comparator comparator(equals);
+ TfLiteInterpMatchApp app(comparator);
+ bool res = true;
+
+ for (const auto &o : interpreter->outputs())
+ {
+ auto expected = tensor_loader.get(o);
+ auto obtained = nnfw::support::tflite::TensorView<float>::make(*interpreter, o);
+
+ res = res && app.compareSingleTensorView(expected, obtained, o);
+ }
+
+ if (!res)
+ {
+ return 255;
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/tflite_run/src/tflite_test.cc b/tools/tflite_run/src/tflite_test.cc
new file mode 100644
index 000000000..d0d36c229
--- /dev/null
+++ b/tools/tflite_run/src/tflite_test.cc
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+
+TEST(TFLite_test_case, simple_test) { EXPECT_EQ(1, 1); }
diff --git a/tools/tflitefile_tool/README.md b/tools/tflitefile_tool/README.md
new file mode 100644
index 000000000..e88669843
--- /dev/null
+++ b/tools/tflitefile_tool/README.md
@@ -0,0 +1,81 @@
+## Model parser
+
+### Purpose
+
+This tool print operators, tensors, and buffers information in tflite model file (`.tflite`)
+
+### How to use
+
+```
+./model_parser.py <model file>
+```
+
+### Example
+
+```
+$ ./tools/tflitefile_tool/model_parser.py /home/nnfw/convolution_test.tflite
+
+[Main model]
+
+Main model input tensors: [0]
+Main model output tensors: [1]
+Operators list
+
+Operator 0: CONV_2D
+ Input Tensors[0 3 2]
+ Output Tensors[1]
+
+
+Tensor-Buffer mapping & shape
+
+Tensor 0 : buffer 3 | Empty | FLOAT32 | Shape [1, 299, 299, 3] (Mul)
+Tensor 1 : buffer 4 | Empty | FLOAT32 | Shape [1, 149, 149, 32] (conv)
+Tensor 2 : buffer 1 | Filled | FLOAT32 | Shape [32] (conv/Conv2D_bias)
+Tensor 3 : buffer 2 | Filled | FLOAT32 | Shape [32, 3, 3, 3] (conv/conv2d_params)
+
+$
+```
+
+## Model generator from other model file
+
+### Purpose
+
+This tool makes small model file from base model file (such as inception v3)
+
+### How to use
+
+```
+./select_operator.py <base model file> <opcode list txt file> <output file name>
+```
+
+### Example
+
+```
+$ cat /home/nnfw/opcodelist.txt
+107 108 109 110 111 112 113 114 115 116 117 118 119 120
+
+$ ./tools/tflitefile_tool/select_operator.py /home/nnfw/inceptionv3_non_slim_2015.tflite \
+/home/nnfw/opcodelist.txt /home/nnfw/test.tflite
+
+Input tensor(s): [29]
+Output tensor(s): [31]
+
+$ Product/out/bin/tflite_run /home/nfs/inception_test.tflite
+nnapi error: unable to open library libneuralnetworks.so
+input tensor indices = [29,]
+Input image size is smaller than the size required by the model. Input will not be set.
+output tensor indices = [31(max:567),]
+Prepare takes 0.000516954 seconds
+Invoke takes 0.719677 seconds
+
+$
+```
+
+You can use range such as `107-120` in `opcodelist.txt` instead of using each operator index
+
+## Colaboration model parser and model generator
+
+1. Get imformation about base model using model parser
+2. Select operators you want to make test model
+3. Make text file including selected operators index
+4. Generate test model file using model generator
diff --git a/tools/tflitefile_tool/model_parser.py b/tools/tflitefile_tool/model_parser.py
new file mode 100755
index 000000000..b8967d33f
--- /dev/null
+++ b/tools/tflitefile_tool/model_parser.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+import os
+import sys
+import numpy
+
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tflite'))
+flatbuffersPath = '../../externals/flatbuffers'
+sys.path.append(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), flatbuffersPath + '/python'))
+
+import flatbuffers
+import tflite.Model
+import tflite.SubGraph
+import argparse
+from operator_parser import OperatorParser
+from perf_predictor import PerfPredictor
+
+
+class TFLiteModelFileParser(object):
+ def __init__(self, args):
+ # Read flatbuffer file descriptor using argument
+ self.tflite_file = args.input_file
+
+ # Set print level (0 ~ 2)
+ # TODO: print information based on level
+ self.print_level = args.verbose
+ if (args.verbose > 2):
+ self.print_level = 2
+ if (args.verbose < 0):
+ self.print_level = 0
+
+ # Set tensor index list to print information
+ # TODO:
+ # Print tensors in list only
+ # Print all tensors if argument used and not specified index number
+ if (args.tensor != None):
+ if (len(args.tensor) == 0):
+ self.print_all_tensor = True
+ else:
+ self.print_all_tensor = False
+ self.print_tensor_index = []
+
+ for tensor_index in args.tensor:
+ self.print_tensor_index.append(int(tensor_index))
+
+ # Set operator index list to print information
+ # TODO:
+ # Print operators in list only
+ # Print all operators if argument used and not specified index number
+ if (args.operator != None):
+ if (len(args.operator) == 0):
+ self.print_all_oeprator = True
+ else:
+ self.print_all_oeprator = False
+ self.print_operator_index = []
+
+ for operator_index in args.operator:
+ self.print_operator_index.append(int(operator_index))
+
+ def main(self):
+ # Generate Model: top structure of tflite model file
+ buf = self.tflite_file.read()
+ buf = bytearray(buf)
+ tf_model = tflite.Model.Model.GetRootAsModel(buf, 0)
+
+ # Model file can have many models
+ # 1st subgraph is main model
+ model_name = "Main model"
+ for subgraph_index in range(tf_model.SubgraphsLength()):
+ tf_subgraph = tf_model.Subgraphs(subgraph_index)
+ if (subgraph_index != 0):
+ model_name = "Model #" + str(subgraph_index)
+
+ print("[" + model_name + "]\n")
+
+ # Model inputs & outputs
+ model_inputs = tf_subgraph.InputsAsNumpy()
+ model_outputs = tf_subgraph.OutputsAsNumpy()
+
+ print(model_name + " input tensors: " + str(model_inputs))
+ print(model_name + " output tensors: " + str(model_outputs))
+
+ # Parse Operators and print all of operators
+ op_parser = OperatorParser(tf_model, tf_subgraph, PerfPredictor())
+ op_parser.Parse()
+ op_parser.PrintAll()
+
+
+if __name__ == '__main__':
+ # Define argument and read
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument(
+ "input_file", type=argparse.FileType('rb'), help="tflite file to read")
+ arg_parser.add_argument(
+ '-v',
+ '--verbose',
+ action='count',
+ default=0,
+ help="set print level (0~2, default: 0)")
+ arg_parser.add_argument(
+ '-t', '--tensor', nargs='*', help="tensor ID to print information (default: all)")
+ arg_parser.add_argument(
+ '-o',
+ '--operator',
+ nargs='*',
+ help="operator ID to print information (default: all)")
+ args = arg_parser.parse_args()
+
+ # Call main function
+ TFLiteModelFileParser(args).main()
diff --git a/tools/tflitefile_tool/operation.py b/tools/tflitefile_tool/operation.py
new file mode 100755
index 000000000..77fc5db9a
--- /dev/null
+++ b/tools/tflitefile_tool/operation.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+
+import tflite.Conv2DOptions
+import tflite.Pool2DOptions
+import tflite.BuiltinOptions
+import tflite.Tensor
+from tensor_wrapping import Tensor
+import math
+'''
+NOTICE
+- an internal class. do not import outside this file.
+- REF: https://stackoverflow.com/questions/551038/private-implementation-class-in-python
+'''
+
+
+class _OperationComputeMethod(object):
+ '''
+ NOTE: How to count operations of convolution(and also pooling)?
+
+ If we know operations of output's one element, we can calculate total output's operations.
+ For example, consider output Shape[3,3]
+ [ e11 e12 e13 ]
+ [ e21 e22 e23 ]
+ [ e31 e32 e33 ]
+ If we know operations for calculation of e11, we can know total operations of output(e11, e12, ... e33)
+ by operations of e11 * 9(total number of elements)
+
+ So we only need to know how to calculate operations of e11.
+ For this, just think how to conv operation to the output's element
+ If input_channel is 1, we can only think of kernel_size(kernel_w and kernel_h).
+ For example, consider input Shape[3,3] and kernel Shape[2,2]
+ [ i11 i12 i13 ] [ k11 k12 ] [ o11 o12 o13 ]
+ [ i21 i22 i23 ] * [ k21 k22 ] = [ o21 o22 o23 ]
+ [ i31 i32 i33 ] [ o31 o32 o33 ]
+
+ Conv operation: for o11, i11 * k11 + i21 * k21 + i12 * k12 + i22 * k22 = o11
+ On above conv operation, mul operations are done at 4 times(== kernel_w * kernel_h)
+ and add operations are dont at 3 times(== kernel_w * kernel_h - 1)
+ and also, bias will be done and it will be counted on add operations.
+
+ Anyway, we can calculate total operations on this way. This can apply to the way of pooling.
+ '''
+
+ def ComputeOperationForConv2D(tf_operator, inputs, outputs):
+ assert (
+ tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
+ .Conv2DOptions)
+
+ # NOTE: Assume that conv2d operator always take 3 tensors as inputs
+ # and both width and height are the same.
+ # operator_inputs[]: [input_tensor, weight_tensor, bias_tensor]
+ # operator_outputs[]: [output_tensor]
+ # tflite's tensor shape: [N,H,W,C]
+ input_tensor = inputs[0].tf_tensor
+ weight_tensor = inputs[1].tf_tensor
+ output_tensor = outputs[0].tf_tensor
+
+ # kernel_ops = (kernel_w * kernel_h * input_channel * 2(multiply and add))
+ kernel_ops = (
+ weight_tensor.Shape(2) * weight_tensor.Shape(1) * input_tensor.Shape(3))
+
+ # total ops
+ # = batch_size * output_channel * output_width * output_height * kernel_ops
+ total_ops = (output_tensor.Shape(0) * output_tensor.Shape(3) *
+ output_tensor.Shape(2) * output_tensor.Shape(1))
+
+ add_instr_num = (total_ops * (kernel_ops + 1)) # bias
+ mul_instr_num = (total_ops * (kernel_ops))
+ nonlinear_instr_num = 0
+ return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+
+ '''
+ NOTE: Reference the comment 'NOTE' of ComputeOperationForConv2D
+ '''
+
+ def ComputeOperationForPooling(tf_operator, inputs, outputs):
+ assert (
+ tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
+ .Pool2DOptions)
+
+ input_tensor = inputs[0].tf_tensor
+ output_tensor = outputs[0].tf_tensor
+
+ pool2d_options = tflite.Pool2DOptions.Pool2DOptions()
+ pool2d_options.Init(tf_operator.BuiltinOptions().Bytes,
+ tf_operator.BuiltinOptions().Pos)
+
+ # kernel_ops = kernel_w * kernel_h
+ kernel_ops = (pool2d_options.FilterWidth() * pool2d_options.FilterHeight())
+
+ # total ops
+ # = batch_size * output_channel * output_width * output_height *
+ # kernel_ops(kernel_w * kernel_h)
+ total_ops = (output_tensor.Shape(0) * output_tensor.Shape(3) *
+ output_tensor.Shape(2) * output_tensor.Shape(1))
+
+ add_instr_num = (total_ops * kernel_ops - 1)
+ mul_instr_num = (total_ops * kernel_ops)
+ nonlinear_instr_num = 0
+ return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+
+ def ComputeOperationForSoftmax(tf_operator, inputs, outputs):
+ assert (
+ tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
+ .SoftmaxOptions)
+
+ input_tensor = inputs[0].tf_tensor
+
+ batch_size = input_tensor.Shape(0)
+ input_dim = input_tensor.Shape(1)
+
+ # Softmax(x_i) = exp(x_i) / sum of exp(x)
+ add_instr_num = input_dim - 1 # sum of exp(x)
+ mul_instr_num = input_dim # /
+ nonlinear_instr_num = input_dim + input_dim # sum of exp(x) and exp(x_i)
+ return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+
+ def ComputeOperationForFullyConnected(tf_operator, inputs, outputs):
+ assert (
+ tf_operator.BuiltinOptionsType() == tflite.BuiltinOptions.BuiltinOptions()
+ .FullyConnectedOptions)
+
+ # NOTE: Assume that fully_connected operator always take 3 tensors as inputs
+ # and its X tensor's shape is [1, 1, 1, input_dim] with
+ # its output Y [1, output_dim]
+ input_tensor = inputs[0].tf_tensor
+ output_tensor = outputs[0].tf_tensor
+
+ # ops_per_element
+ # = input_dim(multiplication) + input_dim-1(addition) + 1(bias)
+ # total_ops
+ # = ops_per_elem * output_dim
+
+ add_instr_num = (input_tensor.Shape(3) * output_tensor.Shape(1))
+ mul_instr_num = (input_tensor.Shape(3) * output_tensor.Shape(1))
+ nonlinear_instr_num = 0
+ return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+
+ def ComputeOperationForNothing(tf_operator, inputs, outputs):
+ add_instr_num = 0
+ mul_instr_num = 0
+ nonlinear_instr_num = 0
+ return (add_instr_num, mul_instr_num, nonlinear_instr_num)
+
+ def NYI_ComputeOperation(tf_operator, inputs, outputs):
+ pass
+
+ operation_to_method_map = {
+ # Inceptionv3
+ "CONV_2D": ComputeOperationForConv2D,
+ "AVERAGE_POOL_2D": ComputeOperationForPooling,
+ "MAX_POOL_2D": ComputeOperationForPooling,
+ "SOFTMAX": ComputeOperationForSoftmax,
+ "FULLY_CONNECTED": ComputeOperationForFullyConnected,
+ "CONCATENATION": ComputeOperationForNothing,
+ # ADAS
+ "TOPK_V2": NYI_ComputeOperation,
+ "SUB": NYI_ComputeOperation,
+ "STRIDED_SLICE": NYI_ComputeOperation,
+ "RESHAPE": NYI_ComputeOperation,
+ "GATHER": NYI_ComputeOperation,
+ "RESIZE_BILINEAR": NYI_ComputeOperation,
+ "CAST": NYI_ComputeOperation,
+ "ADD": NYI_ComputeOperation,
+ "MUL": NYI_ComputeOperation,
+ "DIV": NYI_ComputeOperation,
+ "CUSTOM(TensorFlowMax)": NYI_ComputeOperation,
+ "CUSTOM": NYI_ComputeOperation,
+ }
+
+
+class Operation(object):
+ def __init__(self, tf_operator, operator_str, inputs, outputs):
+ self.tf_operator = tf_operator
+ self.operator_str = operator_str
+ self.inputs = inputs
+ self.outputs = outputs
+ self.add_instr_num = 0
+ self.mul_instr_num = 0
+ self.nonlinear_instr_num = 0
+ self.can_compute = True
+ self.Compute()
+
+ def Compute(self):
+ comp_map = _OperationComputeMethod().operation_to_method_map
+ if not self.operator_str in comp_map.keys():
+ self.can_compute = False
+ return
+
+ method = comp_map[self.operator_str]
+ if method.__name__ == _OperationComputeMethod().NYI_ComputeOperation.__name__:
+ self.can_compute = False
+ return
+
+ self.add_instr_num, self.mul_instr_num, self.nonlinear_instr_num = method(
+ self.tf_operator, self.inputs, self.outputs)
+
+ def TotalInstrNum(self):
+ return (self.add_instr_num + self.mul_instr_num + self.nonlinear_instr_num)
diff --git a/tools/tflitefile_tool/operator_parser.py b/tools/tflitefile_tool/operator_parser.py
new file mode 100755
index 000000000..9728d53b7
--- /dev/null
+++ b/tools/tflitefile_tool/operator_parser.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+
+import tflite.Model
+import tflite.SubGraph
+import tflite.Operator
+import tflite.OperatorCode
+import tflite.BuiltinOperator
+from operator_wrapping import Operator, EnumStrMaps
+from tensor_wrapping import Tensor, SetTensorTypeStr
+from operation import Operation
+
+
+class OperatorParser(object):
+ def __init__(self, tf_model, tf_subgraph, perf_predictor=None):
+ self.tf_model = tf_model
+ self.tf_subgraph = tf_subgraph
+ self.perf_predictor = perf_predictor
+ self.operators_in_list = list()
+ self.operators_per_type = dict()
+ # Tensor type string table
+ SetTensorTypeStr()
+
+ def Parse(self):
+ for operator_idx in range(self.tf_subgraph.OperatorsLength()):
+ tf_operator = self.tf_subgraph.Operators(operator_idx)
+ opcode_str = self.GetOpcodeStr(tf_operator)
+ input_tensors = self.GetInputTensors(tf_operator)
+ output_tensors = self.GetOutputTensors(tf_operator)
+
+ op = Operator(operator_idx, tf_operator, input_tensors, output_tensors,
+ opcode_str)
+ self.AppendOperator(op)
+
+ def GetOpcodeStr(self, tf_operator):
+ opcode_list_idx = tf_operator.OpcodeIndex()
+ opcode_id = self.tf_model.OperatorCodes(opcode_list_idx).BuiltinCode()
+ opcode_str = EnumStrMaps.BuiltinOpcode[opcode_id]
+ if opcode_id == 32:
+ # Custom operator
+ custom_operator = self.tf_model.OperatorCodes(tf_operator.OpcodeIndex())
+ custom_op_name = custom_operator.CustomCode().decode('utf-8')
+ opcode_str = opcode_str + "(" + custom_op_name + ")"
+ return opcode_str
+
+ def GetInputTensors(self, tf_operator):
+ operator_inputs = tf_operator.InputsAsNumpy()
+ return self.GetTensors(operator_inputs)
+
+ def GetOutputTensors(self, tf_operator):
+ operator_outputs = tf_operator.OutputsAsNumpy()
+ return self.GetTensors(operator_outputs)
+
+ def GetTensors(self, tf_tensors_index):
+ return_list = list()
+ for tensor_idx in tf_tensors_index:
+ if (tensor_idx < 0):
+ return_list.append(Tensor(tensor_idx, 0, 0))
+ continue
+ tf_tensor = self.tf_subgraph.Tensors(tensor_idx)
+ buffer_idx = tf_tensor.Buffer()
+ tf_buffer = self.tf_model.Buffers(buffer_idx)
+ return_list.append(Tensor(tensor_idx, tf_tensor, tf_buffer))
+ return return_list
+
+ def AppendOperator(self, operator):
+ self.operators_in_list.append(operator)
+
+ opcode_str = operator.opcode_str
+ if opcode_str not in self.operators_per_type:
+ self.operators_per_type[opcode_str] = list()
+ self.operators_per_type[opcode_str].append(operator)
+
+ def PrintAll(self):
+ print('')
+ self.PrintAllOperatorsInList()
+ print('')
+ self.PrintAllTypesInfo()
+ print('')
+
+ def PrintAllOperatorsInList(self):
+ for operator in self.operators_in_list:
+ operator.PrintInfo(self.perf_predictor)
+ print('')
+
+ def PrintAllTypesInfo(self):
+ print("Number of all operator types: {0}".format(len(self.operators_per_type)))
+
+ # number of instructions of all operator types
+ total_instrs = 0
+
+ # (a string of the operator type, a list of operators which are the same operator type)
+ for type_str, oper_list in self.operators_per_type.items():
+ # this operator type can be computed?
+ can_compute = oper_list[0].operation.can_compute
+
+ # number of occurrence of this operator type
+ occur = len(oper_list)
+
+ # total number of instructions of the same operator types
+ if can_compute:
+ instrs = sum(operator.operation.TotalInstrNum() for operator in oper_list)
+ total_instrs = total_instrs + instrs
+ instrs = "{:,}".format(instrs)
+ else:
+ instrs = "???"
+
+ print("\t{type_str:38}: {occur:4} \t (instrs: {instrs})".format(
+ type_str=type_str, occur=occur, instrs=instrs))
+
+ total_instrs = "{:,}".format(total_instrs)
+ print("{0:46}: {1:4} \t (total instrs: {2})".format("Number of all operators",
+ len(self.operators_in_list),
+ total_instrs))
diff --git a/tools/tflitefile_tool/operator_wrapping.py b/tools/tflitefile_tool/operator_wrapping.py
new file mode 100755
index 000000000..1b7f55a4c
--- /dev/null
+++ b/tools/tflitefile_tool/operator_wrapping.py
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+
+import tflite.Operator
+import tflite.OperatorCode
+import tflite.BuiltinOperator
+import tflite.ActivationFunctionType
+from tensor_wrapping import Tensor
+from operation import Operation
+from perf_predictor import PerfPredictor
+
+
+# Match enum value integer to name string
+# Assumption 1: enum value is defined by old style (can be used on python 2)
+# Assumption 2: when class define enum value, only constant value is defined and methods are not defined
+# Assumption 3: only integer value is set by constant definition
+def BuildEnumClassStrMap(obj):
+ ret = {}
+ for fieldName in dir(obj):
+ if (not fieldName.startswith('_')):
+ fieldValue = getattr(obj, fieldName)
+ if (isinstance(fieldValue, (int))):
+ ret[fieldValue] = fieldName
+ return ret
+
+
+class EnumStrMaps():
+ BuiltinOpcode = BuildEnumClassStrMap(tflite.BuiltinOperator.BuiltinOperator())
+ ActivationFunctionType = BuildEnumClassStrMap(
+ tflite.ActivationFunctionType.ActivationFunctionType())
+ BuiltinOptions = BuildEnumClassStrMap(tflite.BuiltinOptions.BuiltinOptions())
+
+
+def GetStrTensorIndex(tensors):
+ return_string = "["
+ for idx in range(len(tensors)):
+ if idx != 0:
+ return_string += ", "
+ return_string += str(tensors[idx].tensor_idx)
+ return_string += "]"
+ return return_string
+
+
+def GetAttribute(o, *args):
+ import functools
+ return functools.reduce(getattr, args, o)
+
+
+def BuildBuiltinOptionGen():
+ bo_gen = {}
+ for val_enum in EnumStrMaps.BuiltinOptions:
+ val_str = EnumStrMaps.BuiltinOptions[val_enum]
+ try:
+ # Dynamically import Builtin Option classes
+ # 0 (NONE) is the only exception that does not have no corresponding flatbuffer-generated class
+ module = __import__("tflite." + val_str)
+ bo_gen[val_enum] = GetAttribute(module, val_str, val_str)
+ except ImportError as e:
+ assert val_enum == 0 and val_str == "NONE"
+ return bo_gen
+
+
+class OptionLoader:
+ builtinOptionGen = BuildBuiltinOptionGen()
+
+ @staticmethod
+ def GetBuiltinOptions(options_type, options_table):
+ options = OptionLoader.builtinOptionGen[options_type]()
+ options.Init(options_table.Bytes, options_table.Pos)
+ return options
+
+
+class Operator(object):
+ def __init__(self, operator_idx, tf_operator, input_tensors, output_tensors,
+ opcode_str):
+ self.operator_idx = operator_idx
+ self.tf_operator = tf_operator
+ self.inputs = input_tensors
+ self.outputs = output_tensors
+ self.opcode_str = opcode_str
+ self.operation = Operation(self.tf_operator, self.opcode_str, self.inputs,
+ self.outputs)
+
+ def PrintInfo(self, perf_predictor=None):
+ # total instruction num
+ instrs = "{:,}".format(
+ self.operation.TotalInstrNum()) if self.operation.can_compute else "???"
+
+ # total operation cycles
+ cycles = "{:,}".format(
+ (perf_predictor.PredictCycles(self.operation)
+ )) if self.operation.can_compute and perf_predictor != None else "???"
+
+ print("Operator {0}: {1} (instrs: {2}, cycls: {3})".format(
+ self.operator_idx, self.opcode_str, instrs, cycles))
+
+ self.PrintOptionInfo()
+
+ print("\tInput Tensors" + GetStrTensorIndex(self.inputs))
+ for tensor in self.inputs:
+ tensor.PrintInfo("\t\t")
+ print("\tOutput Tensors" + GetStrTensorIndex(self.outputs))
+ for tensor in self.outputs:
+ tensor.PrintInfo("\t\t")
+
+ def PrintOptionInfo(self):
+ # FIXME: workaround for ops such as custom
+ try:
+ options = OptionLoader.GetBuiltinOptions(
+ self.tf_operator.BuiltinOptionsType(), self.tf_operator.BuiltinOptions())
+ except KeyError:
+ return
+
+ # fused activation function
+ try:
+ activation_code = options.FusedActivationFunction()
+ fused_activation = EnumStrMaps.ActivationFunctionType[activation_code]
+ print("\tFused Activation: " + fused_activation)
+ except AttributeError:
+ # This operator does not support FusedActivationFunction
+ pass
diff --git a/tools/tflitefile_tool/perf_predictor.py b/tools/tflitefile_tool/perf_predictor.py
new file mode 100755
index 000000000..8880c8e71
--- /dev/null
+++ b/tools/tflitefile_tool/perf_predictor.py
@@ -0,0 +1,15 @@
+#!/usr/bin/python
+
+from operation import Operation
+
+
+class PerfPredictor(object):
+ def __init__(self, add_cycle=1, mul_cycle=1, nonlinear_cycle=1):
+ self.add_cycle = add_cycle
+ self.mul_cycle = mul_cycle
+ self.nonlinear_cycle = nonlinear_cycle
+
+ def PredictCycles(self, operation):
+ return (operation.add_instr_num * self.add_cycle +
+ operation.mul_instr_num * self.mul_cycle +
+ operation.nonlinear_instr_num * self.nonlinear_cycle)
diff --git a/tools/tflitefile_tool/select_operator.py b/tools/tflitefile_tool/select_operator.py
new file mode 100755
index 000000000..55ca1acd9
--- /dev/null
+++ b/tools/tflitefile_tool/select_operator.py
@@ -0,0 +1,825 @@
+#!/usr/bin/python
+import os
+import sys
+import numpy
+
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tflite'))
+sys.path.append(
+ os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), '../../externals/flatbuffers/python'))
+
+import flatbuffers
+import tflite.Model
+import tflite.SubGraph
+import tflite.BuiltinOptions
+import argparse
+
+
+# Assume we use only main model in model file
+# Get selected operators from file, and return operator index list
+def GetOperatorList(oplist_file):
+ lines = oplist_file.readlines()
+ opcode_list = []
+
+ for line in lines:
+ words = line.split()
+ for word in words:
+ if word.isdigit():
+ opcode_list.append(int(word))
+ else:
+ opcode_range = word.split('-')
+ if ((len(opcode_range) == 2) and opcode_range[0].isdigit()
+ and opcode_range[1].isdigit()):
+ start = int(opcode_range[0])
+ end = int(opcode_range[1])
+ for num in range(start, end + 1):
+ opcode_list.append(int(num))
+ else:
+ print("Error: Cannot get operator list")
+ print(
+ "Please pass operators as operator index or range list split by space and/or line"
+ )
+ exit(1)
+
+ if len(opcode_list) == 0:
+ print("No selected operator")
+ exit(1)
+
+ return opcode_list
+
+
+def GenerateOperatorCodes(new_builder, sample_model, used_operators_dic):
+ operator_code_num = sample_model.OperatorCodesLength()
+ new_operator_code_list = []
+ new_operator_code_string_list = {}
+
+ if operator_code_num == 0:
+ return 0
+
+ # Create operator_code string
+ for operator_code_idx in range(operator_code_num):
+ if operator_code_idx in used_operators_dic:
+ operator_code = sample_model.OperatorCodes(operator_code_idx)
+ operator_code_string = operator_code.CustomCode()
+ if (operator_code_string !=
+ "") and (not operator_code_string in new_operator_code_string_list):
+ new_operator_code_string_list[
+ operator_code_string] = new_builder.CreateString(operator_code_string)
+
+ # Create tables of operator_code
+ for operator_code_idx in range(operator_code_num):
+ if operator_code_idx in used_operators_dic:
+ operator_code = sample_model.OperatorCodes(operator_code_idx)
+
+ # Create operator_code table
+ tflite.OperatorCode.OperatorCodeStart(new_builder)
+ tflite.OperatorCode.OperatorCodeAddBuiltinCode(new_builder,
+ operator_code.BuiltinCode())
+
+ new_operator_code_string = operator_code.CustomCode()
+ if new_operator_code_string in new_operator_code_string_list:
+ tflite.OperatorCode.OperatorCodeAddCustomCode(
+ new_builder, new_operator_code_string_list[new_operator_code_string])
+ new_operator_code = tflite.OperatorCode.OperatorCodeEnd(new_builder)
+ new_operator_code_list.append(new_operator_code)
+
+ # Create operator_code vector
+ new_operator_code_num = len(new_operator_code_list)
+ tflite.Model.ModelStartOperatorCodesVector(new_builder, new_operator_code_num)
+ for operator_code_idx in reversed(range(new_operator_code_num)):
+ new_builder.PrependUOffsetTRelative(new_operator_code_list[operator_code_idx])
+
+ return new_builder.EndVector(new_operator_code_num)
+
+
+def GenerateQuantization(new_builder, selected_quantization):
+ # Create min vector
+ min_num = selected_quantization.MinLength()
+ if min_num != 0:
+ tflite.QuantizationParameters.QuantizationParametersStartMinVector(
+ new_builder, min_num)
+ for min_idx in reversed(range(min_num)):
+ new_builder.PrependFloat32(selected_quantization.Min(min_idx))
+ new_min = new_builder.EndVector(min_num)
+
+ # Create max vector
+ max_num = selected_quantization.MaxLength()
+ if max_num != 0:
+ tflite.QuantizationParameters.QuantizationParametersStartMaxVector(
+ new_builder, max_num)
+ for max_idx in reversed(range(max_num)):
+ new_builder.PrependFloat32(selected_quantization.Max(max_idx))
+ new_max = new_builder.EndVector(max_num)
+
+ # Create scale vector
+ scale_num = selected_quantization.ScaleLength()
+ if scale_num != 0:
+ tflite.QuantizationParameters.QuantizationParametersStartScaleVector(
+ new_builder, scale_num)
+ for scale_idx in reversed(range(scale_num)):
+ new_builder.PrependFloat32(selected_quantization.Scale(scale_idx))
+ new_scale = new_builder.EndVector(scale_num)
+
+ # Create zero_point vector
+ zeropoint_num = selected_quantization.ZeroPointLength()
+ if zeropoint_num != 0:
+ tflite.QuantizationParameters.QuantizationParametersStartScaleVector(
+ new_builder, zeropoint_num)
+ for zeropoint_idx in reversed(range(zeropoint_num)):
+ new_builder.PrependFloat32(selected_quantization.ZeroPoint(zeropoint_idx))
+ new_zeropoint = new_builder.EndVector(zeropoint_num)
+
+ # Create quantization
+ tflite.QuantizationParameters.QuantizationParametersStart(new_builder)
+ if min_num != 0:
+ tflite.QuantizationParameters.QuantizationParametersAddMin(new_builder, new_min)
+ if max_num != 0:
+ tflite.QuantizationParameters.QuantizationParametersAddMax(new_builder, new_max)
+ if scale_num != 0:
+ tflite.QuantizationParameters.QuantizationParametersAddScale(
+ new_builder, new_scale)
+ if zeropoint_num != 0:
+ tflite.QuantizationParameters.QuantizationParametersAddZeroPoint(
+ new_builder, new_zeropoint)
+
+ return tflite.QuantizationParameters.QuantizationParametersEnd(new_builder)
+
+
+def GenerateTensor(new_builder, selected_tensor, used_buffers_dic):
+
+ # Create shape vector for tensor
+ shape_num = selected_tensor.ShapeLength()
+ tflite.Tensor.TensorStartShapeVector(new_builder, shape_num)
+ if shape_num != 0:
+ for shape_idx in reversed(range(shape_num)):
+ new_builder.PrependInt32(selected_tensor.Shape(shape_idx))
+ new_shape = new_builder.EndVector(shape_num)
+
+ # Create tensor_type
+ tensor_type = selected_tensor.Type()
+
+ # Create input vector for tensor
+ buffer_idx = selected_tensor.Buffer()
+ new_buffer_idx = used_buffers_dic[buffer_idx]
+
+ # Create name string
+ name_string = selected_tensor.Name()
+ if name_string != "":
+ new_name = new_builder.CreateString(name_string)
+
+ # Create quantization
+ quantization = selected_tensor.Quantization()
+ if quantization != 0:
+ new_quantization = GenerateQuantization(new_builder, quantization)
+
+ # Create tensor
+ tflite.Tensor.TensorStart(new_builder)
+ tflite.Tensor.TensorAddShape(new_builder, new_shape)
+ tflite.Tensor.TensorAddType(new_builder, tensor_type)
+ tflite.Tensor.TensorAddBuffer(new_builder, new_buffer_idx)
+ if name_string != "":
+ tflite.Tensor.TensorAddName(new_builder, new_name)
+ if quantization != 0:
+ tflite.Tensor.TensorAddQuantization(new_builder, new_quantization)
+
+ return tflite.Tensor.TensorEnd(new_builder)
+
+
+def GenerateTensors(new_builder, selected_subgraph, used_tensors_dic, used_buffers_dic):
+ tensor_num = selected_subgraph.TensorsLength()
+ new_tensor_list = []
+
+ if tensor_num == 0:
+ return 0
+
+ for tensor_idx in range(tensor_num):
+ if tensor_idx in used_tensors_dic:
+ selected_tensor = selected_subgraph.Tensors(tensor_idx)
+ new_tensor = GenerateTensor(new_builder, selected_tensor, used_buffers_dic)
+ new_tensor_list.append(new_tensor)
+
+ new_tensor_num = len(new_tensor_list)
+ if new_tensor_num == 0:
+ return 0
+
+ tflite.SubGraph.SubGraphStartTensorsVector(new_builder, new_tensor_num)
+ for new_tensor in reversed(new_tensor_list):
+ new_builder.PrependUOffsetTRelative(new_tensor)
+
+ return new_builder.EndVector(new_tensor_num)
+
+
+import tflite.Conv2DOptions
+import tflite.DepthwiseConv2DOptions
+import tflite.Pool2DOptions
+import tflite.FullyConnectedOptions
+import tflite.SoftmaxOptions
+import tflite.ConcatenationOptions
+import tflite.ReshapeOptions
+import tflite.AddOptions
+import tflite.SubOptions
+import tflite.MulOptions
+import tflite.DivOptions
+import tflite.ResizeBilinearOptions
+import tflite.StridedSliceOptions
+import tflite.CastOptions
+import tflite.TopKV2Options
+import tflite.GatherOptions
+
+
+def GenerateBuiltinOption(new_builder, selected_builtin_option, builtin_option_type):
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().Conv2DOptions:
+
+ conv2d_options = tflite.Conv2DOptions.Conv2DOptions()
+ conv2d_options.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.Conv2DOptions.Conv2DOptionsStart(new_builder)
+ tflite.Conv2DOptions.Conv2DOptionsAddPadding(new_builder,
+ conv2d_options.Padding())
+ tflite.Conv2DOptions.Conv2DOptionsAddStrideW(new_builder,
+ conv2d_options.StrideW())
+ tflite.Conv2DOptions.Conv2DOptionsAddStrideH(new_builder,
+ conv2d_options.StrideH())
+ tflite.Conv2DOptions.Conv2DOptionsAddFusedActivationFunction(
+ new_builder, conv2d_options.FusedActivationFunction())
+ return tflite.Conv2DOptions.Conv2DOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions(
+ ).DepthwiseConv2DOptions:
+
+ depthconv2d_option = tflite.DepthwiseConv2DOptions.DepthwiseConv2DOptions()
+ depthconv2d_option.Init(selected_builtin_option.Bytes,
+ selected_builtin_option.Pos)
+
+ tflite.DepthwiseConv2DOptions.DepthwiseConv2DOptionsStart(new_builder)
+ tflite.DepthwiseConv2DOptions.DepthwiseConv2DOptionsAddPadding(
+ new_builder, depthconv2d_option.Padding())
+ tflite.DepthwiseConv2DOptions.DepthwiseConv2DOptionsAddStrideW(
+ new_builder, depthconv2d_option.StrideW())
+ tflite.DepthwiseConv2DOptions.DepthwiseConv2DOptionsAddStrideH(
+ new_builder, depthconv2d_option.StrideH())
+ tflite.DepthwiseConv2DOptions.DepthwiseConv2DOptionsAddDepthMultiplier(
+ new_builder, depthconv2d_option.DepthMultiplier())
+ tflite.DepthwiseConv2DOptions.DepthwiseConv2DOptionsAddFusedActivationFunction(
+ new_builder, depthconv2d_option.FusedActivationFunction())
+ return tflite.DepthwiseConv2DOptions.DepthwiseConv2DOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().Pool2DOptions:
+
+ pool2d_option = tflite.Pool2DOptions.Pool2DOptions()
+ pool2d_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.Pool2DOptions.Pool2DOptionsStart(new_builder)
+ tflite.Pool2DOptions.Pool2DOptionsAddPadding(new_builder, pool2d_option.Padding())
+ tflite.Pool2DOptions.Pool2DOptionsAddStrideW(new_builder, pool2d_option.StrideW())
+ tflite.Pool2DOptions.Pool2DOptionsAddStrideH(new_builder, pool2d_option.StrideH())
+ tflite.Pool2DOptions.Pool2DOptionsAddFilterWidth(new_builder,
+ pool2d_option.FilterWidth())
+ tflite.Pool2DOptions.Pool2DOptionsAddFilterHeight(new_builder,
+ pool2d_option.FilterHeight())
+ tflite.Pool2DOptions.Pool2DOptionsAddFusedActivationFunction(
+ new_builder, pool2d_option.FusedActivationFunction())
+ return tflite.Pool2DOptions.Pool2DOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions(
+ ).FullyConnectedOptions:
+
+ fc_option = tflite.FullyConnectedOptions.FullyConnectedOptions()
+ fc_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.FullyConnectedOptions.FullyConnectedOptionsStart(new_builder)
+ tflite.FullyConnectedOptions.FullyConnectedOptionsAddFusedActivationFunction(
+ new_builder, fc_option.FusedActivationFunction())
+ return tflite.FullyConnectedOptions.FullyConnectedOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().SoftmaxOptions:
+
+ softmax_option = tflite.SoftmaxOptions.SoftmaxOptions()
+ softmax_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.SoftmaxOptions.SoftmaxOptionsStart(new_builder)
+ tflite.SoftmaxOptions.SoftmaxOptionsAddBeta(new_builder, softmax_option.Beta())
+ return tflite.SoftmaxOptions.SoftmaxOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().ConcatenationOptions:
+
+ concat_option = tflite.ConcatenationOptions.ConcatenationOptions()
+ concat_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.ConcatenationOptions.ConcatenationOptionsStart(new_builder)
+ tflite.ConcatenationOptions.ConcatenationOptionsAddAxis(
+ new_builder, concat_option.Axis())
+ tflite.ConcatenationOptions.ConcatenationOptionsAddFusedActivationFunction(
+ new_builder, concat_option.FusedActivationFunction())
+ return tflite.ConcatenationOptions.ConcatenationOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().ReshapeOptions:
+
+ reshape_option = tflite.ReshapeOptions.ReshapeOptions()
+ reshape_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ shape_num = reshape_option.NewShapeLength()
+ if shape_num != 0:
+ tflite.ReshapeOptions.ReshapeOptionsStartNewShapeVector(
+ new_builder, shape_num)
+ for new_shape_idx in reversed(range(shape_num)):
+ new_shape_val = reshape_option.NewShape(new_shape_idx)
+ new_builder.PrependInt32(new_shape_val)
+ new_shape = new_builder.EndVector(shape_num)
+
+ tflite.ReshapeOptions.ReshapeOptionsStart(new_builder)
+ if shape_num != 0:
+ tflite.ReshapeOptions.ReshapeOptionsAddNewShape(new_builder, new_shape)
+ return tflite.ReshapeOptions.ReshapeOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().AddOptions:
+
+ add_option = tflite.AddOptions.AddOptions()
+ add_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.AddOptions.AddOptionsStart(new_builder)
+ tflite.AddOptions.AddOptionsAddFusedActivationFunction(
+ new_builder, add_option.FusedActivationFunction())
+ return tflite.AddOptions.AddOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().SubOptions:
+
+ sub_option = tflite.SubOptions.SubOptions()
+ sub_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.SubOptions.SubOptionsStart(new_builder)
+ tflite.SubOptions.SubOptionsAddFusedActivationFunction(
+ new_builder, sub_option.FusedActivationFunction())
+ return tflite.SubOptions.SubOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().MulOptions:
+
+ mul_option = tflite.MulOptions.MulOptions()
+ mul_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.MulOptions.MulOptionsStart(new_builder)
+ tflite.MulOptions.MulOptionsAddFusedActivationFunction(
+ new_builder, mul_option.FusedActivationFunction())
+ return tflite.MulOptions.MulOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().DivOptions:
+
+ div_option = tflite.DivOptions.DivOptions()
+ div_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.DivOptions.DivOptionsStart(new_builder)
+ tflite.DivOptions.DivOptionsAddFusedActivationFunction(
+ new_builder, div_option.FusedActivationFunction())
+ return tflite.DivOptions.DivOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions(
+ ).ResizeBilinearOptions:
+
+ resize_bilinear_option = tflite.ResizeBilinearOptions.ResizeBilinearOptions()
+ resize_bilinear_option.Init(selected_builtin_option.Bytes,
+ selected_builtin_option.Pos)
+
+ tflite.ResizeBilinearOptions.ResizeBilinearOptionsStart(new_builder)
+ tflite.ResizeBilinearOptions.ResizeBilinearOptionsAddAlignCorners(
+ new_builder, resize_bilinear_option.AlignCorners())
+ return tflite.ResizeBilinearOptions.ResizeBilinearOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().StridedSliceOptions:
+
+ stride_slice_option = tflite.StridedSliceOptions.StridedSliceOptions()
+ stride_slice_option.Init(selected_builtin_option.Bytes,
+ selected_builtin_option.Pos)
+
+ tflite.StridedSliceOptions.StridedSliceOptionsStart(new_builder)
+ tflite.StridedSliceOptions.StridedSliceOptionsAddBeginMask(
+ new_builder, stride_slice_option.BeginMask())
+ tflite.StridedSliceOptions.StridedSliceOptionsAddEndMask(
+ new_builder, stride_slice_option.EndMask())
+ tflite.StridedSliceOptions.StridedSliceOptionsAddEllipsisMask(
+ new_builder, stride_slice_option.EllipsisMask())
+ tflite.StridedSliceOptions.StridedSliceOptionsAddNewAxisMask(
+ new_builder, stride_slice_option.NewAxisMask())
+ tflite.StridedSliceOptions.StridedSliceOptionsAddShrinkAxisMask(
+ new_builder, stride_slice_option.ShrinkAxisMask())
+
+ return tflite.StridedSliceOptions.StridedSliceOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().CastOptions:
+
+ cast_option = tflite.CastOptions.CastOptions()
+ cast_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.CastOptions.CastOptionsStart(new_builder)
+ return tflite.CastOptions.CastOptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().TopKV2Options:
+
+ topkv2_option = tflite.TopKV2Options.TopKV2Options()
+ topkv2_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.TopKV2Options.TopKV2OptionsStart(new_builder)
+ return tflite.TopKV2Options.TopKV2OptionsEnd(new_builder)
+
+ if builtin_option_type == tflite.BuiltinOptions.BuiltinOptions().GatherOptions:
+
+ gather_option = tflite.GatherOptions.GatherOptions()
+ gather_option.Init(selected_builtin_option.Bytes, selected_builtin_option.Pos)
+
+ tflite.GatherOptions.GatherOptionsStart(new_builder)
+ tflite.GatherOptions.GatherOptionsAddAxis(new_builder, gather_option.Axis())
+ return tflite.GatherOptions.GatherOptionsEnd(new_builder)
+
+ # Cannot handle builtin option type yet
+ return 0
+
+
+def GenerateOperator(new_builder, selected_operator, used_tensors_dic,
+ used_operators_dic):
+
+ # define opcode_index
+ opcode_index = selected_operator.OpcodeIndex()
+ new_opcode_index = used_operators_dic[opcode_index]
+
+ # create input vector
+ input_num = selected_operator.InputsLength()
+ if input_num != 0:
+ new_input_list = []
+ tflite.Operator.OperatorStartInputsVector(new_builder, input_num)
+ for input_idx in reversed(range(input_num)):
+ input_tensor_idx = selected_operator.Inputs(input_idx)
+ new_input_tensor_idx = used_tensors_dic[input_tensor_idx]
+ new_builder.PrependInt32(new_input_tensor_idx)
+ new_input_list.append(new_input_tensor_idx)
+ new_input = new_builder.EndVector(input_num)
+
+ # create output_vector
+ output_num = selected_operator.OutputsLength()
+ if output_num != 0:
+ tflite.Operator.OperatorStartOutputsVector(new_builder, output_num)
+ for output_idx in reversed(range(output_num)):
+ output_tensor_idx = selected_operator.Outputs(output_idx)
+ new_output_tensor_idx = used_tensors_dic[output_tensor_idx]
+ new_builder.PrependInt32(new_output_tensor_idx)
+ new_output = new_builder.EndVector(output_num)
+
+ # Create builtin_option
+ builtin_option_type = selected_operator.BuiltinOptionsType()
+ if builtin_option_type != 0:
+ selected_builtin_option = selected_operator.BuiltinOptions()
+ new_builtin_option = GenerateBuiltinOption(new_builder, selected_builtin_option,
+ builtin_option_type)
+
+ # Create custum option vector
+ custom_option_num = selected_operator.CustomOptionsLength()
+ if custom_option_num != 0:
+ tflite.Operator.OperatorStartCustomOptionsVector(new_builder, custom_option_num)
+ for custom_option_idx in reversed(range(custom_option_num)):
+ new_builder.PrependUint8(selected_operator.CustomOptions(custom_option_idx))
+ new_custom_option = new_builder.EndVector(custom_option_num)
+
+ # Create custum option type
+ custom_option_type = selected_operator.CustomOptionsFormat()
+
+ # Create operator
+ tflite.Operator.OperatorStart(new_builder)
+ tflite.Operator.OperatorAddOpcodeIndex(new_builder, new_opcode_index)
+ if input_num != 0:
+ tflite.Operator.OperatorAddInputs(new_builder, new_input)
+ if output_num != 0:
+ tflite.Operator.OperatorAddOutputs(new_builder, new_output)
+ tflite.Operator.OperatorAddBuiltinOptionsType(new_builder, builtin_option_type)
+ if builtin_option_type != 0:
+ tflite.Operator.OperatorAddBuiltinOptions(new_builder, new_builtin_option)
+ if custom_option_num != 0:
+ tflite.Operator.OperatorAddCustomOptions(new_builder, new_custom_option)
+ tflite.Operator.OperatorAddCustomOptionsFormat(new_builder, custom_option_type)
+ return tflite.Operator.OperatorEnd(new_builder)
+
+
+def GenerateOperators(new_builder, selected_subgraph, opcode_list, used_tensors_dic,
+ used_operators_dic):
+ operator_num = selected_subgraph.OperatorsLength()
+ new_operator_list = []
+
+ if operator_num == 0:
+ return 0
+
+ for operator_idx in range(operator_num):
+ if operator_idx in opcode_list:
+ selected_operator = selected_subgraph.Operators(operator_idx)
+ new_operator = GenerateOperator(new_builder, selected_operator,
+ used_tensors_dic, used_operators_dic)
+ new_operator_list.append(new_operator)
+
+ new_operator_num = len(new_operator_list)
+ if new_operator_num == 0:
+ return 0
+
+ tflite.SubGraph.SubGraphStartOperatorsVector(new_builder, new_operator_num)
+ for new_operator in reversed(new_operator_list):
+ new_builder.PrependUOffsetTRelative(new_operator)
+
+ return new_builder.EndVector(new_operator_num)
+
+
+def GenerateSubgraph(new_builder, selected_subgraph, opcode_list, new_input_tensor,
+ new_output_tensor, used_tensors_dic, used_buffers_dic,
+ used_operators_dic):
+
+ # Tensors
+ tensors = GenerateTensors(new_builder, selected_subgraph, used_tensors_dic,
+ used_buffers_dic)
+
+ # Create input vector for subgraph table
+ new_input_tensor_num = len(new_input_tensor)
+ if new_input_tensor_num != 0:
+ tflite.SubGraph.SubGraphStartInputsVector(new_builder, new_input_tensor_num)
+ for input_tensor_idx in reversed(new_input_tensor):
+ new_input_tensor_idx = used_tensors_dic[input_tensor_idx]
+ new_builder.PrependInt32(new_input_tensor_idx)
+ new_inputs = new_builder.EndVector(new_input_tensor_num)
+
+ # Create output vector for subgraph table
+ new_output_tensor_num = len(new_output_tensor)
+ if new_output_tensor_num != 0:
+ tflite.SubGraph.SubGraphStartInputsVector(new_builder, new_output_tensor_num)
+ for output_tensor_idx in reversed(new_output_tensor):
+ new_output_tensor_idx = used_tensors_dic[output_tensor_idx]
+ new_builder.PrependInt32(new_output_tensor_idx)
+ new_outputs = new_builder.EndVector(new_output_tensor_num)
+
+ # Operators
+ operators = GenerateOperators(new_builder, selected_subgraph, opcode_list,
+ used_tensors_dic, used_operators_dic)
+
+ # Name
+ subgraph_name = selected_subgraph.Name()
+ have_name = False
+ if subgraph_name != "":
+ have_name = True
+ new_subgraph_name = new_builder.CreateString(subgraph_name)
+
+ tflite.SubGraph.SubGraphStart(new_builder)
+ tflite.SubGraph.SubGraphAddTensors(new_builder, tensors)
+ if new_input_tensor_num != 0:
+ tflite.SubGraph.SubGraphAddInputs(new_builder, new_inputs)
+ if new_output_tensor_num != 0:
+ tflite.SubGraph.SubGraphAddOutputs(new_builder, new_outputs)
+ tflite.SubGraph.SubGraphAddOperators(new_builder, operators)
+ if have_name:
+ tflite.SubGraph.SubGraphAddName(new_builder, new_subgraph_name)
+
+ return tflite.SubGraph.SubGraphEnd(new_builder)
+
+
+def GenerateSubgraphs(new_builder, sample_model, opcode_list, new_input_tensor,
+ new_output_tensor, used_tensors_dic, used_buffers_dic,
+ used_operators_dic):
+ new_subgraph_list = []
+
+ # We think only main graph
+ selected_subgraph = sample_model.Subgraphs(0)
+ new_subgraph = GenerateSubgraph(new_builder, selected_subgraph, opcode_list,
+ new_input_tensor, new_output_tensor, used_tensors_dic,
+ used_buffers_dic, used_operators_dic)
+ new_subgraph_list.append(new_subgraph)
+
+ new_subgraph_num = 1
+ tflite.Model.ModelStartSubgraphsVector(new_builder, new_subgraph_num)
+ for subgraph_idx in reversed(range(new_subgraph_num)):
+ new_builder.PrependUOffsetTRelative(new_subgraph_list[subgraph_idx])
+
+ return new_builder.EndVector(new_subgraph_num)
+
+
+def GenerateBuffers(new_builder, sample_model, used_buffers_dic):
+ buffer_num = sample_model.BuffersLength()
+ new_buffer_data_list = {}
+ new_buffer_list = []
+
+ if buffer_num == 0:
+ return 0
+
+ # Create data vector for buffer table
+ for buffer_idx in range(buffer_num):
+ buffer = sample_model.Buffers(buffer_idx)
+ buffer_length = buffer.DataLength()
+
+ if (buffer_length != 0) and (buffer_idx in used_buffers_dic):
+ tflite.Buffer.BufferStartDataVector(new_builder, buffer_length)
+ for buffer_data_idx in reversed(range(buffer_length)):
+ new_builder.PrependUint8(buffer.Data(buffer_data_idx))
+ new_buffer = new_builder.EndVector(buffer_length)
+ new_buffer_data_list[buffer_idx] = new_buffer
+
+ # Create tables of buffer
+ for buffer_idx in range(buffer_num):
+ buffer = sample_model.Buffers(buffer_idx)
+
+ if buffer_idx in used_buffers_dic:
+ # Create buffer table
+ tflite.Buffer.BufferStart(new_builder)
+ if buffer.DataLength() != 0:
+ tflite.Buffer.BufferAddData(new_builder, new_buffer_data_list[buffer_idx])
+ new_buffer = tflite.Buffer.BufferEnd(new_builder)
+ new_buffer_list.append(new_buffer)
+
+ # Create buffer vector
+ new_buffer_num = len(new_buffer_list)
+ if new_buffer_num == 0:
+ return 0
+
+ tflite.Model.ModelStartBuffersVector(new_builder, new_buffer_num)
+ for new_buffer_idx in reversed(range(new_buffer_num)):
+ new_builder.PrependUOffsetTRelative(new_buffer_list[new_buffer_idx])
+
+ return new_builder.EndVector(new_buffer_num)
+
+
+def GenerateModel(new_builder, sample_model, opcode_list, new_input_tensors,
+ new_output_tensors, used_tensors_dic, used_buffers_dic,
+ used_operators_dic):
+ # uint
+ version = sample_model.Version()
+
+ # pointer of operator code 'table' vector
+ operator_codes = GenerateOperatorCodes(new_builder, sample_model, used_operators_dic)
+
+ # subgraphs
+ subgraphs = GenerateSubgraphs(new_builder, sample_model, opcode_list,
+ new_input_tensors, new_output_tensors, used_tensors_dic,
+ used_buffers_dic, used_operators_dic)
+
+ # description
+ description_string = new_builder.CreateString(sample_model.Description())
+
+ # buffers
+ buffers = GenerateBuffers(new_builder, sample_model, used_buffers_dic)
+
+ # Generate model
+ tflite.Model.ModelStart(new_builder)
+ tflite.Model.ModelAddVersion(new_builder, version)
+ tflite.Model.ModelAddOperatorCodes(new_builder, operator_codes)
+ tflite.Model.ModelAddSubgraphs(new_builder, subgraphs)
+ tflite.Model.ModelAddDescription(new_builder, description_string)
+ tflite.Model.ModelAddBuffers(new_builder, buffers)
+
+ return tflite.Model.ModelEnd(new_builder)
+
+
+def Finish(new_builder, new_model):
+ # Cusrom implementation: identifier
+ # Python API don't support identifier input yet
+ # Reference: Finish(self, rootTable)) in builder.py, Finish(uoffset_t root, const char *file_identifier, bool size_prefix) in flatbuffers.h
+ new_builder.Prep(new_builder.minalign,
+ flatbuffers.number_types.UOffsetTFlags.bytewidth)
+
+ new_builder.PrependByte(0x33)
+ new_builder.PrependByte(0x4c)
+ new_builder.PrependByte(0x46)
+ new_builder.PrependByte(0x54)
+
+ new_builder.PrependUOffsetTRelative(new_model)
+ new_builder.finished = True
+ return new_builder.Head()
+
+
+def main(args):
+ input_model_file = args.input_model
+ oplist_file = args.opcode_list
+ output_model_file = args.output_model
+
+ # Parse operator list file
+ opcode_list = GetOperatorList(oplist_file)
+
+ # Get sample model and subgraph
+ # We use only 1st subgraph
+ sample_buf = input_model_file.read()
+ sample_buf = bytearray(sample_buf)
+ sample_model = tflite.Model.Model.GetRootAsModel(sample_buf, 0)
+ sample_subgraph = sample_model.Subgraphs(0)
+
+ # Collect used tensor & used operator
+ used_tensors = []
+ used_operators = []
+
+ for opcode_idx in opcode_list:
+ opcode = sample_subgraph.Operators(opcode_idx)
+ for input_idx in range(opcode.InputsLength()):
+ input_tensor_idx = opcode.Inputs(input_idx)
+ if not input_tensor_idx in used_tensors:
+ # default: same as input sample
+ used_tensors.append(input_tensor_idx)
+
+ for output_idx in range(opcode.OutputsLength()):
+ output_tensor_idx = opcode.Outputs(output_idx)
+ if not output_tensor_idx in used_tensors:
+ # default: same as input sample
+ used_tensors.append(output_tensor_idx)
+
+ opcode_idx = opcode.OpcodeIndex()
+ if not opcode_idx in used_operators:
+ used_operators.append(opcode_idx)
+
+ used_tensors.sort()
+ used_operators.sort()
+
+ # Collect used buffer
+ # buffer[0] should be blank. So it should start from 1
+ used_buffers = [0]
+
+ for used_tensor in used_tensors:
+ # key and value is same in prepare phase
+ buf_idx = (sample_subgraph.Tensors(used_tensor)).Buffer()
+ used_buffers.append(buf_idx)
+ used_buffers.sort()
+
+ # Assign new index for operator
+ used_operators_dic = {}
+
+ for new_operator_idx in range(len(used_operators)):
+ sample_operator_idx = used_operators[new_operator_idx]
+ used_operators_dic[sample_operator_idx] = new_operator_idx
+
+ # Assign new index for tensor
+ used_tensors_dic = {}
+
+ for new_tensor_idx in range(len(used_tensors)):
+ sample_tensor_idx = used_tensors[new_tensor_idx]
+ used_tensors_dic[sample_tensor_idx] = new_tensor_idx
+
+ # Assign new index for buffer
+ used_buffers_dic = {}
+
+ for new_buffer_idx in range(len(used_buffers)):
+ sample_buffer_idx = used_buffers[new_buffer_idx]
+ used_buffers_dic[sample_buffer_idx] = new_buffer_idx
+
+ # Find input & output tensor in new model
+ new_input_tensors = used_tensors[:]
+ new_output_tensors = used_tensors[:]
+
+ for opcode_idx in opcode_list:
+ opcode = sample_subgraph.Operators(opcode_idx)
+ for input_idx in range(opcode.InputsLength()):
+ input_tensor_idx = opcode.Inputs(input_idx)
+ if input_tensor_idx in new_output_tensors:
+ new_output_tensors.remove(input_tensor_idx)
+ if input_tensor_idx in new_input_tensors:
+ matched_buffer_idx = sample_subgraph.Tensors(input_tensor_idx).Buffer()
+ matched_buffer = sample_model.Buffers(matched_buffer_idx)
+ if matched_buffer.DataLength() != 0:
+ new_input_tensors.remove(input_tensor_idx)
+
+ for output_idx in range(opcode.OutputsLength()):
+ output_tensor_idx = opcode.Outputs(output_idx)
+ if output_tensor_idx in new_input_tensors:
+ new_input_tensors.remove(output_tensor_idx)
+ if output_tensor_idx in new_output_tensors:
+ matched_buffer_idx = sample_subgraph.Tensors(output_tensor_idx).Buffer()
+ matched_buffer = sample_model.Buffers(matched_buffer_idx)
+ if matched_buffer.DataLength() != 0:
+ new_output_tensors.remove(input_tensor_idx)
+
+ new_input_tensors_newidx = []
+ new_output_tensors_newidx = []
+
+ for input_tensor_idx in new_input_tensors:
+ new_input_tensors_newidx.append(used_tensors_dic[input_tensor_idx])
+ for output_tensor_idx in new_output_tensors:
+ new_output_tensors_newidx.append(used_tensors_dic[output_tensor_idx])
+
+ print("Input tensor(s): " + str(new_input_tensors_newidx))
+ print("Output tensor(s): " + str(new_output_tensors_newidx))
+
+ # Create new model file
+ new_builder = flatbuffers.Builder(1024)
+
+ new_model = GenerateModel(new_builder, sample_model, opcode_list, new_input_tensors,
+ new_output_tensors, used_tensors_dic, used_buffers_dic,
+ used_operators_dic)
+
+ Finish(new_builder, new_model)
+ new_buf = new_builder.Output()
+
+ output_model_file.write(new_buf)
+
+
+if __name__ == '__main__':
+ # Define argument and read
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument(
+ "input_model",
+ type=argparse.FileType('rb'),
+ help="input tflite model file to read")
+ arg_parser.add_argument(
+ "opcode_list",
+ type=argparse.FileType('r'),
+ help="text file including selected operator list")
+ arg_parser.add_argument(
+ "output_model", type=argparse.FileType('wb'), help="output tflite model file")
+ args = arg_parser.parse_args()
+
+ # Call main function
+ main(args)
diff --git a/tools/tflitefile_tool/tensor_wrapping.py b/tools/tflitefile_tool/tensor_wrapping.py
new file mode 100755
index 000000000..b1fba57d2
--- /dev/null
+++ b/tools/tflitefile_tool/tensor_wrapping.py
@@ -0,0 +1,54 @@
+#!/usr/bin/python
+
+import tflite.Tensor
+import tflite.TensorType
+
+TensorTypeList = {}
+
+
+def SetTensorTypeStr():
+ tensorTypeObj = tflite.TensorType.TensorType()
+
+ for fieldName in dir(tensorTypeObj):
+ if (not fieldName.startswith('_')):
+ fieldValue = getattr(tensorTypeObj, fieldName)
+ if (isinstance(fieldValue, (int))):
+ TensorTypeList[fieldValue] = fieldName
+
+
+class Tensor(object):
+ def __init__(self, tensor_idx, tf_tensor, tf_buffer):
+ self.tensor_idx = tensor_idx
+ self.tf_tensor = tf_tensor
+ self.tf_buffer = tf_buffer
+
+ def PrintInfo(self, depth_str=""):
+ print_str = ""
+ if self.tensor_idx < 0:
+ print_str = "Tensor {0:4}".format(self.tensor_idx)
+ else:
+ buffer_idx = self.tf_tensor.Buffer()
+ isEmpty = "Filled"
+ if (self.tf_buffer.DataLength() == 0):
+ isEmpty = " Empty"
+ shape_str = self.GetShapeString()
+ type_name = TensorTypeList[self.tf_tensor.Type()]
+
+ shape_name = ""
+ if self.tf_tensor.Name() != 0:
+ shape_name = self.tf_tensor.Name()
+
+ print_str = "Tensor {0:4} : buffer {1:4} | {2} | {3:7} | Shape {4} ({5})".format(
+ self.tensor_idx, buffer_idx, isEmpty, type_name, shape_str, shape_name)
+ print(depth_str + print_str)
+
+ def GetShapeString(self):
+ if self.tf_tensor.ShapeLength() == 0:
+ return "Scalar"
+ return_string = "["
+ for shape_idx in range(self.tf_tensor.ShapeLength()):
+ if (shape_idx != 0):
+ return_string += ", "
+ return_string += str(self.tf_tensor.Shape(shape_idx))
+ return_string += "]"
+ return return_string
diff --git a/tools/tflitefile_tool/tflite/ActivationFunctionType.py b/tools/tflitefile_tool/tflite/ActivationFunctionType.py
new file mode 100644
index 000000000..a32353964
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ActivationFunctionType.py
@@ -0,0 +1,12 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+
+class ActivationFunctionType(object):
+ NONE = 0
+ RELU = 1
+ RELU_N1_TO_1 = 2
+ RELU6 = 3
+ TANH = 4
+ SIGN_BIT = 5
diff --git a/tools/tflitefile_tool/tflite/AddOptions.py b/tools/tflitefile_tool/tflite/AddOptions.py
new file mode 100644
index 000000000..c9f3387ca
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/AddOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class AddOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsAddOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = AddOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # AddOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # AddOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def AddOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def AddOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+
+def AddOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/ArgMaxOptions.py b/tools/tflitefile_tool/tflite/ArgMaxOptions.py
new file mode 100644
index 000000000..23cbfd731
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ArgMaxOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class ArgMaxOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsArgMaxOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ArgMaxOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # ArgMaxOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ArgMaxOptions
+ def OutputType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def ArgMaxOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def ArgMaxOptionsAddOutputType(builder, outputType):
+ builder.PrependInt8Slot(0, outputType, 0)
+
+
+def ArgMaxOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/ArgMinOptions.py b/tools/tflitefile_tool/tflite/ArgMinOptions.py
new file mode 100644
index 000000000..6a2dcdfe1
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ArgMinOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class ArgMinOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsArgMinOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ArgMinOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # ArgMinOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ArgMinOptions
+ def OutputType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def ArgMinOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def ArgMinOptionsAddOutputType(builder, outputType):
+ builder.PrependInt8Slot(0, outputType, 0)
+
+
+def ArgMinOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/BatchToSpaceNDOptions.py b/tools/tflitefile_tool/tflite/BatchToSpaceNDOptions.py
new file mode 100644
index 000000000..48a7d4c23
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/BatchToSpaceNDOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class BatchToSpaceNDOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsBatchToSpaceNDOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = BatchToSpaceNDOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # BatchToSpaceNDOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def BatchToSpaceNDOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def BatchToSpaceNDOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/BidirectionalSequenceRNNOptions.py b/tools/tflitefile_tool/tflite/BidirectionalSequenceRNNOptions.py
new file mode 100644
index 000000000..5c057b6bf
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/BidirectionalSequenceRNNOptions.py
@@ -0,0 +1,51 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class BidirectionalSequenceRNNOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsBidirectionalSequenceRNNOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = BidirectionalSequenceRNNOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # BidirectionalSequenceRNNOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # BidirectionalSequenceRNNOptions
+ def TimeMajor(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)
+ return 0
+
+ # BidirectionalSequenceRNNOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def BidirectionalSequenceRNNOptionsStart(builder):
+ builder.StartObject(2)
+
+
+def BidirectionalSequenceRNNOptionsAddTimeMajor(builder, timeMajor):
+ builder.PrependBoolSlot(0, timeMajor, 0)
+
+
+def BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder,
+ fusedActivationFunction):
+ builder.PrependInt8Slot(1, fusedActivationFunction, 0)
+
+
+def BidirectionalSequenceRNNOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/Buffer.py b/tools/tflitefile_tool/tflite/Buffer.py
new file mode 100644
index 000000000..d3fa8e9de
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/Buffer.py
@@ -0,0 +1,61 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class Buffer(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsBuffer(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = Buffer()
+ x.Init(buf, n + offset)
+ return x
+
+ # Buffer
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # Buffer
+ def Data(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Uint8Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
+ return 0
+
+ # Buffer
+ def DataAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
+ return 0
+
+ # Buffer
+ def DataLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+
+def BufferStart(builder):
+ builder.StartObject(1)
+
+
+def BufferAddData(builder, data):
+ builder.PrependUOffsetTRelativeSlot(
+ 0, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0)
+
+
+def BufferStartDataVector(builder, numElems):
+ return builder.StartVector(1, numElems, 1)
+
+
+def BufferEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/BuiltinOperator.py b/tools/tflitefile_tool/tflite/BuiltinOperator.py
new file mode 100644
index 000000000..2beda098e
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/BuiltinOperator.py
@@ -0,0 +1,86 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+
+class BuiltinOperator(object):
+ ADD = 0
+ AVERAGE_POOL_2D = 1
+ CONCATENATION = 2
+ CONV_2D = 3
+ DEPTHWISE_CONV_2D = 4
+ DEQUANTIZE = 6
+ EMBEDDING_LOOKUP = 7
+ FLOOR = 8
+ FULLY_CONNECTED = 9
+ HASHTABLE_LOOKUP = 10
+ L2_NORMALIZATION = 11
+ L2_POOL_2D = 12
+ LOCAL_RESPONSE_NORMALIZATION = 13
+ LOGISTIC = 14
+ LSH_PROJECTION = 15
+ LSTM = 16
+ MAX_POOL_2D = 17
+ MUL = 18
+ RELU = 19
+ RELU_N1_TO_1 = 20
+ RELU6 = 21
+ RESHAPE = 22
+ RESIZE_BILINEAR = 23
+ RNN = 24
+ SOFTMAX = 25
+ SPACE_TO_DEPTH = 26
+ SVDF = 27
+ TANH = 28
+ CONCAT_EMBEDDINGS = 29
+ SKIP_GRAM = 30
+ CALL = 31
+ CUSTOM = 32
+ EMBEDDING_LOOKUP_SPARSE = 33
+ PAD = 34
+ UNIDIRECTIONAL_SEQUENCE_RNN = 35
+ GATHER = 36
+ BATCH_TO_SPACE_ND = 37
+ SPACE_TO_BATCH_ND = 38
+ TRANSPOSE = 39
+ MEAN = 40
+ SUB = 41
+ DIV = 42
+ SQUEEZE = 43
+ UNIDIRECTIONAL_SEQUENCE_LSTM = 44
+ STRIDED_SLICE = 45
+ BIDIRECTIONAL_SEQUENCE_RNN = 46
+ EXP = 47
+ TOPK_V2 = 48
+ SPLIT = 49
+ LOG_SOFTMAX = 50
+ DELEGATE = 51
+ BIDIRECTIONAL_SEQUENCE_LSTM = 52
+ CAST = 53
+ PRELU = 54
+ MAXIMUM = 55
+ ARG_MAX = 56
+ MINIMUM = 57
+ LESS = 58
+ NEG = 59
+ PADV2 = 60
+ GREATER = 61
+ GREATER_EQUAL = 62
+ LESS_EQUAL = 63
+ SELECT = 64
+ SLICE = 65
+ SIN = 66
+ TRANSPOSE_CONV = 67
+ SPARSE_TO_DENSE = 68
+ TILE = 69
+ EXPAND_DIMS = 70
+ EQUAL = 71
+ NOT_EQUAL = 72
+ LOG = 73
+ SUM = 74
+ SQRT = 75
+ RSQRT = 76
+ SHAPE = 77
+ POW = 78
+ ARG_MIN = 79
+ FAKE_QUANT = 80
diff --git a/tools/tflitefile_tool/tflite/BuiltinOptions.py b/tools/tflitefile_tool/tflite/BuiltinOptions.py
new file mode 100644
index 000000000..5d3040839
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/BuiltinOptions.py
@@ -0,0 +1,65 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+
+class BuiltinOptions(object):
+ NONE = 0
+ Conv2DOptions = 1
+ DepthwiseConv2DOptions = 2
+ ConcatEmbeddingsOptions = 3
+ LSHProjectionOptions = 4
+ Pool2DOptions = 5
+ SVDFOptions = 6
+ RNNOptions = 7
+ FullyConnectedOptions = 8
+ SoftmaxOptions = 9
+ ConcatenationOptions = 10
+ AddOptions = 11
+ L2NormOptions = 12
+ LocalResponseNormalizationOptions = 13
+ LSTMOptions = 14
+ ResizeBilinearOptions = 15
+ CallOptions = 16
+ ReshapeOptions = 17
+ SkipGramOptions = 18
+ SpaceToDepthOptions = 19
+ EmbeddingLookupSparseOptions = 20
+ MulOptions = 21
+ PadOptions = 22
+ GatherOptions = 23
+ BatchToSpaceNDOptions = 24
+ SpaceToBatchNDOptions = 25
+ TransposeOptions = 26
+ ReducerOptions = 27
+ SubOptions = 28
+ DivOptions = 29
+ SqueezeOptions = 30
+ SequenceRNNOptions = 31
+ StridedSliceOptions = 32
+ ExpOptions = 33
+ TopKV2Options = 34
+ SplitOptions = 35
+ LogSoftmaxOptions = 36
+ CastOptions = 37
+ DequantizeOptions = 38
+ MaximumMinimumOptions = 39
+ ArgMaxOptions = 40
+ LessOptions = 41
+ NegOptions = 42
+ PadV2Options = 43
+ GreaterOptions = 44
+ GreaterEqualOptions = 45
+ LessEqualOptions = 46
+ SelectOptions = 47
+ SliceOptions = 48
+ TransposeConvOptions = 49
+ SparseToDenseOptions = 50
+ TileOptions = 51
+ ExpandDimsOptions = 52
+ EqualOptions = 53
+ NotEqualOptions = 54
+ ShapeOptions = 55
+ PowOptions = 56
+ ArgMinOptions = 57
+ FakeQuantOptions = 58
diff --git a/tools/tflitefile_tool/tflite/CallOptions.py b/tools/tflitefile_tool/tflite/CallOptions.py
new file mode 100644
index 000000000..a82f001fa
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/CallOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class CallOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsCallOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = CallOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # CallOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # CallOptions
+ def Subgraph(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+ return 0
+
+
+def CallOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def CallOptionsAddSubgraph(builder, subgraph):
+ builder.PrependUint32Slot(0, subgraph, 0)
+
+
+def CallOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/CastOptions.py b/tools/tflitefile_tool/tflite/CastOptions.py
new file mode 100644
index 000000000..7f7a1dde3
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/CastOptions.py
@@ -0,0 +1,50 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class CastOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsCastOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = CastOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # CastOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # CastOptions
+ def InDataType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # CastOptions
+ def OutDataType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def CastOptionsStart(builder):
+ builder.StartObject(2)
+
+
+def CastOptionsAddInDataType(builder, inDataType):
+ builder.PrependInt8Slot(0, inDataType, 0)
+
+
+def CastOptionsAddOutDataType(builder, outDataType):
+ builder.PrependInt8Slot(1, outDataType, 0)
+
+
+def CastOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/CombinerType.py b/tools/tflitefile_tool/tflite/CombinerType.py
new file mode 100644
index 000000000..dfe8afb9f
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/CombinerType.py
@@ -0,0 +1,9 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+
+class CombinerType(object):
+ SUM = 0
+ MEAN = 1
+ SQRTN = 2
diff --git a/tools/tflitefile_tool/tflite/ConcatEmbeddingsOptions.py b/tools/tflitefile_tool/tflite/ConcatEmbeddingsOptions.py
new file mode 100644
index 000000000..6ca04a51f
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ConcatEmbeddingsOptions.py
@@ -0,0 +1,105 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class ConcatEmbeddingsOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsConcatEmbeddingsOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ConcatEmbeddingsOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # ConcatEmbeddingsOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ConcatEmbeddingsOptions
+ def NumChannels(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # ConcatEmbeddingsOptions
+ def NumColumnsPerChannel(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # ConcatEmbeddingsOptions
+ def NumColumnsPerChannelAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # ConcatEmbeddingsOptions
+ def NumColumnsPerChannelLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # ConcatEmbeddingsOptions
+ def EmbeddingDimPerChannel(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # ConcatEmbeddingsOptions
+ def EmbeddingDimPerChannelAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # ConcatEmbeddingsOptions
+ def EmbeddingDimPerChannelLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+
+def ConcatEmbeddingsOptionsStart(builder):
+ builder.StartObject(3)
+
+
+def ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels):
+ builder.PrependInt32Slot(0, numChannels, 0)
+
+
+def ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel):
+ builder.PrependUOffsetTRelativeSlot(
+ 1, flatbuffers.number_types.UOffsetTFlags.py_type(numColumnsPerChannel), 0)
+
+
+def ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel):
+ builder.PrependUOffsetTRelativeSlot(
+ 2, flatbuffers.number_types.UOffsetTFlags.py_type(embeddingDimPerChannel), 0)
+
+
+def ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def ConcatEmbeddingsOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/ConcatenationOptions.py b/tools/tflitefile_tool/tflite/ConcatenationOptions.py
new file mode 100644
index 000000000..ea089ac56
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ConcatenationOptions.py
@@ -0,0 +1,50 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class ConcatenationOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsConcatenationOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ConcatenationOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # ConcatenationOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ConcatenationOptions
+ def Axis(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # ConcatenationOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def ConcatenationOptionsStart(builder):
+ builder.StartObject(2)
+
+
+def ConcatenationOptionsAddAxis(builder, axis):
+ builder.PrependInt32Slot(0, axis, 0)
+
+
+def ConcatenationOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(1, fusedActivationFunction, 0)
+
+
+def ConcatenationOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/Conv2DOptions.py b/tools/tflitefile_tool/tflite/Conv2DOptions.py
new file mode 100644
index 000000000..913729522
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/Conv2DOptions.py
@@ -0,0 +1,94 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class Conv2DOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsConv2DOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = Conv2DOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # Conv2DOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # Conv2DOptions
+ def Padding(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # Conv2DOptions
+ def StrideW(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # Conv2DOptions
+ def StrideH(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # Conv2DOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # Conv2DOptions
+ def DilationWFactor(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 1
+
+ # Conv2DOptions
+ def DilationHFactor(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 1
+
+
+def Conv2DOptionsStart(builder):
+ builder.StartObject(6)
+
+
+def Conv2DOptionsAddPadding(builder, padding):
+ builder.PrependInt8Slot(0, padding, 0)
+
+
+def Conv2DOptionsAddStrideW(builder, strideW):
+ builder.PrependInt32Slot(1, strideW, 0)
+
+
+def Conv2DOptionsAddStrideH(builder, strideH):
+ builder.PrependInt32Slot(2, strideH, 0)
+
+
+def Conv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(3, fusedActivationFunction, 0)
+
+
+def Conv2DOptionsAddDilationWFactor(builder, dilationWFactor):
+ builder.PrependInt32Slot(4, dilationWFactor, 1)
+
+
+def Conv2DOptionsAddDilationHFactor(builder, dilationHFactor):
+ builder.PrependInt32Slot(5, dilationHFactor, 1)
+
+
+def Conv2DOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/CustomOptionsFormat.py b/tools/tflitefile_tool/tflite/CustomOptionsFormat.py
new file mode 100644
index 000000000..18bc07d02
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/CustomOptionsFormat.py
@@ -0,0 +1,7 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+
+class CustomOptionsFormat(object):
+ FLEXBUFFERS = 0
diff --git a/tools/tflitefile_tool/tflite/DepthwiseConv2DOptions.py b/tools/tflitefile_tool/tflite/DepthwiseConv2DOptions.py
new file mode 100644
index 000000000..9f0b3388f
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/DepthwiseConv2DOptions.py
@@ -0,0 +1,83 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class DepthwiseConv2DOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsDepthwiseConv2DOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = DepthwiseConv2DOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # DepthwiseConv2DOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # DepthwiseConv2DOptions
+ def Padding(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # DepthwiseConv2DOptions
+ def StrideW(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # DepthwiseConv2DOptions
+ def StrideH(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # DepthwiseConv2DOptions
+ def DepthMultiplier(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # DepthwiseConv2DOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def DepthwiseConv2DOptionsStart(builder):
+ builder.StartObject(5)
+
+
+def DepthwiseConv2DOptionsAddPadding(builder, padding):
+ builder.PrependInt8Slot(0, padding, 0)
+
+
+def DepthwiseConv2DOptionsAddStrideW(builder, strideW):
+ builder.PrependInt32Slot(1, strideW, 0)
+
+
+def DepthwiseConv2DOptionsAddStrideH(builder, strideH):
+ builder.PrependInt32Slot(2, strideH, 0)
+
+
+def DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier):
+ builder.PrependInt32Slot(3, depthMultiplier, 0)
+
+
+def DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(4, fusedActivationFunction, 0)
+
+
+def DepthwiseConv2DOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/DequantizeOptions.py b/tools/tflitefile_tool/tflite/DequantizeOptions.py
new file mode 100644
index 000000000..fe4cc9a06
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/DequantizeOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class DequantizeOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsDequantizeOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = DequantizeOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # DequantizeOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def DequantizeOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def DequantizeOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/DivOptions.py b/tools/tflitefile_tool/tflite/DivOptions.py
new file mode 100644
index 000000000..53bbae542
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/DivOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class DivOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsDivOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = DivOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # DivOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # DivOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def DivOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def DivOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+
+def DivOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/EmbeddingLookupSparseOptions.py b/tools/tflitefile_tool/tflite/EmbeddingLookupSparseOptions.py
new file mode 100644
index 000000000..12531d3d5
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/EmbeddingLookupSparseOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class EmbeddingLookupSparseOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsEmbeddingLookupSparseOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = EmbeddingLookupSparseOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # EmbeddingLookupSparseOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # EmbeddingLookupSparseOptions
+ def Combiner(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def EmbeddingLookupSparseOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def EmbeddingLookupSparseOptionsAddCombiner(builder, combiner):
+ builder.PrependInt8Slot(0, combiner, 0)
+
+
+def EmbeddingLookupSparseOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/EqualOptions.py b/tools/tflitefile_tool/tflite/EqualOptions.py
new file mode 100644
index 000000000..968712ff8
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/EqualOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class EqualOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsEqualOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = EqualOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # EqualOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def EqualOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def EqualOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/ExpOptions.py b/tools/tflitefile_tool/tflite/ExpOptions.py
new file mode 100644
index 000000000..f8c7bd867
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ExpOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class ExpOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsExpOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ExpOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # ExpOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def ExpOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def ExpOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/ExpandDimsOptions.py b/tools/tflitefile_tool/tflite/ExpandDimsOptions.py
new file mode 100644
index 000000000..2dd8d506c
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ExpandDimsOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class ExpandDimsOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsExpandDimsOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ExpandDimsOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # ExpandDimsOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def ExpandDimsOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def ExpandDimsOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/FakeQuantOptions.py b/tools/tflitefile_tool/tflite/FakeQuantOptions.py
new file mode 100644
index 000000000..fc8023e60
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/FakeQuantOptions.py
@@ -0,0 +1,72 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class FakeQuantOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsFakeQuantOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = FakeQuantOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # FakeQuantOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # FakeQuantOptions
+ def Min(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+ return 0.0
+
+ # FakeQuantOptions
+ def Max(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+ return 0.0
+
+ # FakeQuantOptions
+ def NumBits(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # FakeQuantOptions
+ def NarrowRange(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)
+ return 0
+
+
+def FakeQuantOptionsStart(builder):
+ builder.StartObject(4)
+
+
+def FakeQuantOptionsAddMin(builder, min):
+ builder.PrependFloat32Slot(0, min, 0.0)
+
+
+def FakeQuantOptionsAddMax(builder, max):
+ builder.PrependFloat32Slot(1, max, 0.0)
+
+
+def FakeQuantOptionsAddNumBits(builder, numBits):
+ builder.PrependInt32Slot(2, numBits, 0)
+
+
+def FakeQuantOptionsAddNarrowRange(builder, narrowRange):
+ builder.PrependBoolSlot(3, narrowRange, 0)
+
+
+def FakeQuantOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/FullyConnectedOptions.py b/tools/tflitefile_tool/tflite/FullyConnectedOptions.py
new file mode 100644
index 000000000..59c2a367a
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/FullyConnectedOptions.py
@@ -0,0 +1,50 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class FullyConnectedOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsFullyConnectedOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = FullyConnectedOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # FullyConnectedOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # FullyConnectedOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # FullyConnectedOptions
+ def WeightsFormat(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def FullyConnectedOptionsStart(builder):
+ builder.StartObject(2)
+
+
+def FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+
+def FullyConnectedOptionsAddWeightsFormat(builder, weightsFormat):
+ builder.PrependInt8Slot(1, weightsFormat, 0)
+
+
+def FullyConnectedOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/FullyConnectedOptionsWeightsFormat.py b/tools/tflitefile_tool/tflite/FullyConnectedOptionsWeightsFormat.py
new file mode 100644
index 000000000..143fc5122
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/FullyConnectedOptionsWeightsFormat.py
@@ -0,0 +1,8 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+
+class FullyConnectedOptionsWeightsFormat(object):
+ DEFAULT = 0
+ SHUFFLED4x16INT8 = 1
diff --git a/tools/tflitefile_tool/tflite/GatherOptions.py b/tools/tflitefile_tool/tflite/GatherOptions.py
new file mode 100644
index 000000000..cfb54496b
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/GatherOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class GatherOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsGatherOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = GatherOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # GatherOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # GatherOptions
+ def Axis(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+
+def GatherOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def GatherOptionsAddAxis(builder, axis):
+ builder.PrependInt32Slot(0, axis, 0)
+
+
+def GatherOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/GreaterEqualOptions.py b/tools/tflitefile_tool/tflite/GreaterEqualOptions.py
new file mode 100644
index 000000000..12df3c88c
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/GreaterEqualOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class GreaterEqualOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsGreaterEqualOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = GreaterEqualOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # GreaterEqualOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def GreaterEqualOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def GreaterEqualOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/GreaterOptions.py b/tools/tflitefile_tool/tflite/GreaterOptions.py
new file mode 100644
index 000000000..614cdb290
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/GreaterOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class GreaterOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsGreaterOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = GreaterOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # GreaterOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def GreaterOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def GreaterOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/L2NormOptions.py b/tools/tflitefile_tool/tflite/L2NormOptions.py
new file mode 100644
index 000000000..1d3ab1ec8
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/L2NormOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class L2NormOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsL2NormOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = L2NormOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # L2NormOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # L2NormOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def L2NormOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def L2NormOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+
+def L2NormOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/LSHProjectionOptions.py b/tools/tflitefile_tool/tflite/LSHProjectionOptions.py
new file mode 100644
index 000000000..055eb75ff
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/LSHProjectionOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class LSHProjectionOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsLSHProjectionOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = LSHProjectionOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # LSHProjectionOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # LSHProjectionOptions
+ def Type(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def LSHProjectionOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def LSHProjectionOptionsAddType(builder, type):
+ builder.PrependInt8Slot(0, type, 0)
+
+
+def LSHProjectionOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/LSHProjectionType.py b/tools/tflitefile_tool/tflite/LSHProjectionType.py
new file mode 100644
index 000000000..328179114
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/LSHProjectionType.py
@@ -0,0 +1,9 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+
+class LSHProjectionType(object):
+ UNKNOWN = 0
+ SPARSE = 1
+ DENSE = 2
diff --git a/tools/tflitefile_tool/tflite/LSTMKernelType.py b/tools/tflitefile_tool/tflite/LSTMKernelType.py
new file mode 100644
index 000000000..f0e96f3fc
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/LSTMKernelType.py
@@ -0,0 +1,8 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+
+class LSTMKernelType(object):
+ FULL = 0
+ BASIC = 1
diff --git a/tools/tflitefile_tool/tflite/LSTMOptions.py b/tools/tflitefile_tool/tflite/LSTMOptions.py
new file mode 100644
index 000000000..97c5bd8a4
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/LSTMOptions.py
@@ -0,0 +1,72 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class LSTMOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsLSTMOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = LSTMOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # LSTMOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # LSTMOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # LSTMOptions
+ def CellClip(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+ return 0.0
+
+ # LSTMOptions
+ def ProjClip(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+ return 0.0
+
+ # LSTMOptions
+ def KernelType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def LSTMOptionsStart(builder):
+ builder.StartObject(4)
+
+
+def LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+
+def LSTMOptionsAddCellClip(builder, cellClip):
+ builder.PrependFloat32Slot(1, cellClip, 0.0)
+
+
+def LSTMOptionsAddProjClip(builder, projClip):
+ builder.PrependFloat32Slot(2, projClip, 0.0)
+
+
+def LSTMOptionsAddKernelType(builder, kernelType):
+ builder.PrependInt8Slot(3, kernelType, 0)
+
+
+def LSTMOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/LessEqualOptions.py b/tools/tflitefile_tool/tflite/LessEqualOptions.py
new file mode 100644
index 000000000..ef93bcc9e
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/LessEqualOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class LessEqualOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsLessEqualOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = LessEqualOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # LessEqualOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def LessEqualOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def LessEqualOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/LessOptions.py b/tools/tflitefile_tool/tflite/LessOptions.py
new file mode 100644
index 000000000..a94b37f17
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/LessOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class LessOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsLessOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = LessOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # LessOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def LessOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def LessOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/LocalResponseNormalizationOptions.py b/tools/tflitefile_tool/tflite/LocalResponseNormalizationOptions.py
new file mode 100644
index 000000000..fd9117ac5
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/LocalResponseNormalizationOptions.py
@@ -0,0 +1,72 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class LocalResponseNormalizationOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsLocalResponseNormalizationOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = LocalResponseNormalizationOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # LocalResponseNormalizationOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # LocalResponseNormalizationOptions
+ def Radius(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # LocalResponseNormalizationOptions
+ def Bias(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+ return 0.0
+
+ # LocalResponseNormalizationOptions
+ def Alpha(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+ return 0.0
+
+ # LocalResponseNormalizationOptions
+ def Beta(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+ return 0.0
+
+
+def LocalResponseNormalizationOptionsStart(builder):
+ builder.StartObject(4)
+
+
+def LocalResponseNormalizationOptionsAddRadius(builder, radius):
+ builder.PrependInt32Slot(0, radius, 0)
+
+
+def LocalResponseNormalizationOptionsAddBias(builder, bias):
+ builder.PrependFloat32Slot(1, bias, 0.0)
+
+
+def LocalResponseNormalizationOptionsAddAlpha(builder, alpha):
+ builder.PrependFloat32Slot(2, alpha, 0.0)
+
+
+def LocalResponseNormalizationOptionsAddBeta(builder, beta):
+ builder.PrependFloat32Slot(3, beta, 0.0)
+
+
+def LocalResponseNormalizationOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/LogSoftmaxOptions.py b/tools/tflitefile_tool/tflite/LogSoftmaxOptions.py
new file mode 100644
index 000000000..1b059d22f
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/LogSoftmaxOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class LogSoftmaxOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsLogSoftmaxOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = LogSoftmaxOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # LogSoftmaxOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def LogSoftmaxOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def LogSoftmaxOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/MaximumMinimumOptions.py b/tools/tflitefile_tool/tflite/MaximumMinimumOptions.py
new file mode 100644
index 000000000..c99494be3
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/MaximumMinimumOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class MaximumMinimumOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsMaximumMinimumOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = MaximumMinimumOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # MaximumMinimumOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def MaximumMinimumOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def MaximumMinimumOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/MeanOptions.py b/tools/tflitefile_tool/tflite/MeanOptions.py
new file mode 100644
index 000000000..9d49119ac
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/MeanOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class MeanOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsMeanOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = MeanOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # MeanOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # MeanOptions
+ def KeepDims(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)
+ return 0
+
+
+def MeanOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def MeanOptionsAddKeepDims(builder, keepDims):
+ builder.PrependBoolSlot(0, keepDims, 0)
+
+
+def MeanOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/Model.py b/tools/tflitefile_tool/tflite/Model.py
new file mode 100644
index 000000000..4d1e01f44
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/Model.py
@@ -0,0 +1,171 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class Model(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsModel(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = Model()
+ x.Init(buf, n + offset)
+ return x
+
+ # Model
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # Model
+ def Version(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+ return 0
+
+ # Model
+ def OperatorCodes(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from .OperatorCode import OperatorCode
+ obj = OperatorCode()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # Model
+ def OperatorCodesLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Model
+ def Subgraphs(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from .SubGraph import SubGraph
+ obj = SubGraph()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # Model
+ def SubgraphsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Model
+ def Description(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return ""
+
+ # Model
+ def Buffers(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from .Buffer import Buffer
+ obj = Buffer()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # Model
+ def BuffersLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Model
+ def MetadataBuffer(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # Model
+ def MetadataBufferAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # Model
+ def MetadataBufferLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+
+def ModelStart(builder):
+ builder.StartObject(6)
+
+
+def ModelAddVersion(builder, version):
+ builder.PrependUint32Slot(0, version, 0)
+
+
+def ModelAddOperatorCodes(builder, operatorCodes):
+ builder.PrependUOffsetTRelativeSlot(
+ 1, flatbuffers.number_types.UOffsetTFlags.py_type(operatorCodes), 0)
+
+
+def ModelStartOperatorCodesVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def ModelAddSubgraphs(builder, subgraphs):
+ builder.PrependUOffsetTRelativeSlot(
+ 2, flatbuffers.number_types.UOffsetTFlags.py_type(subgraphs), 0)
+
+
+def ModelStartSubgraphsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def ModelAddDescription(builder, description):
+ builder.PrependUOffsetTRelativeSlot(
+ 3, flatbuffers.number_types.UOffsetTFlags.py_type(description), 0)
+
+
+def ModelAddBuffers(builder, buffers):
+ builder.PrependUOffsetTRelativeSlot(
+ 4, flatbuffers.number_types.UOffsetTFlags.py_type(buffers), 0)
+
+
+def ModelStartBuffersVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def ModelAddMetadataBuffer(builder, metadataBuffer):
+ builder.PrependUOffsetTRelativeSlot(
+ 5, flatbuffers.number_types.UOffsetTFlags.py_type(metadataBuffer), 0)
+
+
+def ModelStartMetadataBufferVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def ModelEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/MulOptions.py b/tools/tflitefile_tool/tflite/MulOptions.py
new file mode 100644
index 000000000..e15c4d606
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/MulOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class MulOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsMulOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = MulOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # MulOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # MulOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def MulOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def MulOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+
+def MulOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/NegOptions.py b/tools/tflitefile_tool/tflite/NegOptions.py
new file mode 100644
index 000000000..f3d98e782
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/NegOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class NegOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsNegOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = NegOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # NegOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def NegOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def NegOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/NotEqualOptions.py b/tools/tflitefile_tool/tflite/NotEqualOptions.py
new file mode 100644
index 000000000..25419ce53
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/NotEqualOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class NotEqualOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsNotEqualOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = NotEqualOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # NotEqualOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def NotEqualOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def NotEqualOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/Operator.py b/tools/tflitefile_tool/tflite/Operator.py
new file mode 100644
index 000000000..67cc8f0b5
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/Operator.py
@@ -0,0 +1,208 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class Operator(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsOperator(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = Operator()
+ x.Init(buf, n + offset)
+ return x
+
+ # Operator
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # Operator
+ def OpcodeIndex(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+ return 0
+
+ # Operator
+ def Inputs(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # Operator
+ def InputsAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # Operator
+ def InputsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Operator
+ def Outputs(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # Operator
+ def OutputsAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # Operator
+ def OutputsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Operator
+ def BuiltinOptionsType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
+ return 0
+
+ # Operator
+ def BuiltinOptions(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ from flatbuffers.table import Table
+ obj = Table(bytearray(), 0)
+ self._tab.Union(obj, o)
+ return obj
+ return None
+
+ # Operator
+ def CustomOptions(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Uint8Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
+ return 0
+
+ # Operator
+ def CustomOptionsAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
+ return 0
+
+ # Operator
+ def CustomOptionsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Operator
+ def CustomOptionsFormat(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # Operator
+ def MutatingVariableInputs(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.BoolFlags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
+ return 0
+
+ # Operator
+ def MutatingVariableInputsAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o)
+ return 0
+
+ # Operator
+ def MutatingVariableInputsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+
+def OperatorStart(builder):
+ builder.StartObject(8)
+
+
+def OperatorAddOpcodeIndex(builder, opcodeIndex):
+ builder.PrependUint32Slot(0, opcodeIndex, 0)
+
+
+def OperatorAddInputs(builder, inputs):
+ builder.PrependUOffsetTRelativeSlot(
+ 1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
+
+
+def OperatorStartInputsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def OperatorAddOutputs(builder, outputs):
+ builder.PrependUOffsetTRelativeSlot(
+ 2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
+
+
+def OperatorStartOutputsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def OperatorAddBuiltinOptionsType(builder, builtinOptionsType):
+ builder.PrependUint8Slot(3, builtinOptionsType, 0)
+
+
+def OperatorAddBuiltinOptions(builder, builtinOptions):
+ builder.PrependUOffsetTRelativeSlot(
+ 4, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions), 0)
+
+
+def OperatorAddCustomOptions(builder, customOptions):
+ builder.PrependUOffsetTRelativeSlot(
+ 5, flatbuffers.number_types.UOffsetTFlags.py_type(customOptions), 0)
+
+
+def OperatorStartCustomOptionsVector(builder, numElems):
+ return builder.StartVector(1, numElems, 1)
+
+
+def OperatorAddCustomOptionsFormat(builder, customOptionsFormat):
+ builder.PrependInt8Slot(6, customOptionsFormat, 0)
+
+
+def OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs):
+ builder.PrependUOffsetTRelativeSlot(
+ 7, flatbuffers.number_types.UOffsetTFlags.py_type(mutatingVariableInputs), 0)
+
+
+def OperatorStartMutatingVariableInputsVector(builder, numElems):
+ return builder.StartVector(1, numElems, 1)
+
+
+def OperatorEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/OperatorCode.py b/tools/tflitefile_tool/tflite/OperatorCode.py
new file mode 100644
index 000000000..0f945b901
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/OperatorCode.py
@@ -0,0 +1,62 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class OperatorCode(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsOperatorCode(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = OperatorCode()
+ x.Init(buf, n + offset)
+ return x
+
+ # OperatorCode
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # OperatorCode
+ def BuiltinCode(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # OperatorCode
+ def CustomCode(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return ""
+
+ # OperatorCode
+ def Version(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 1
+
+
+def OperatorCodeStart(builder):
+ builder.StartObject(3)
+
+
+def OperatorCodeAddBuiltinCode(builder, builtinCode):
+ builder.PrependInt8Slot(0, builtinCode, 0)
+
+
+def OperatorCodeAddCustomCode(builder, customCode):
+ builder.PrependUOffsetTRelativeSlot(
+ 1, flatbuffers.number_types.UOffsetTFlags.py_type(customCode), 0)
+
+
+def OperatorCodeAddVersion(builder, version):
+ builder.PrependInt32Slot(2, version, 1)
+
+
+def OperatorCodeEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/PadOptions.py b/tools/tflitefile_tool/tflite/PadOptions.py
new file mode 100644
index 000000000..46039443c
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/PadOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class PadOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsPadOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = PadOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # PadOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def PadOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def PadOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/PadV2Options.py b/tools/tflitefile_tool/tflite/PadV2Options.py
new file mode 100644
index 000000000..bddea9d46
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/PadV2Options.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class PadV2Options(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsPadV2Options(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = PadV2Options()
+ x.Init(buf, n + offset)
+ return x
+
+ # PadV2Options
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def PadV2OptionsStart(builder):
+ builder.StartObject(0)
+
+
+def PadV2OptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/Padding.py b/tools/tflitefile_tool/tflite/Padding.py
new file mode 100644
index 000000000..b8b908c0c
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/Padding.py
@@ -0,0 +1,8 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+
+class Padding(object):
+ SAME = 0
+ VALID = 1
diff --git a/tools/tflitefile_tool/tflite/Pool2DOptions.py b/tools/tflitefile_tool/tflite/Pool2DOptions.py
new file mode 100644
index 000000000..26e46f243
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/Pool2DOptions.py
@@ -0,0 +1,94 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class Pool2DOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsPool2DOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = Pool2DOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # Pool2DOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # Pool2DOptions
+ def Padding(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # Pool2DOptions
+ def StrideW(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # Pool2DOptions
+ def StrideH(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # Pool2DOptions
+ def FilterWidth(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # Pool2DOptions
+ def FilterHeight(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # Pool2DOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def Pool2DOptionsStart(builder):
+ builder.StartObject(6)
+
+
+def Pool2DOptionsAddPadding(builder, padding):
+ builder.PrependInt8Slot(0, padding, 0)
+
+
+def Pool2DOptionsAddStrideW(builder, strideW):
+ builder.PrependInt32Slot(1, strideW, 0)
+
+
+def Pool2DOptionsAddStrideH(builder, strideH):
+ builder.PrependInt32Slot(2, strideH, 0)
+
+
+def Pool2DOptionsAddFilterWidth(builder, filterWidth):
+ builder.PrependInt32Slot(3, filterWidth, 0)
+
+
+def Pool2DOptionsAddFilterHeight(builder, filterHeight):
+ builder.PrependInt32Slot(4, filterHeight, 0)
+
+
+def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(5, fusedActivationFunction, 0)
+
+
+def Pool2DOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/PowOptions.py b/tools/tflitefile_tool/tflite/PowOptions.py
new file mode 100644
index 000000000..8368ac542
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/PowOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class PowOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsPowOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = PowOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # PowOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def PowOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def PowOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/QuantizationParameters.py b/tools/tflitefile_tool/tflite/QuantizationParameters.py
new file mode 100644
index 000000000..7d5e53072
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/QuantizationParameters.py
@@ -0,0 +1,160 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class QuantizationParameters(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsQuantizationParameters(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = QuantizationParameters()
+ x.Init(buf, n + offset)
+ return x
+
+ # QuantizationParameters
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # QuantizationParameters
+ def Min(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Float32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # QuantizationParameters
+ def MinAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
+ return 0
+
+ # QuantizationParameters
+ def MinLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # QuantizationParameters
+ def Max(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Float32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # QuantizationParameters
+ def MaxAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
+ return 0
+
+ # QuantizationParameters
+ def MaxLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # QuantizationParameters
+ def Scale(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Float32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # QuantizationParameters
+ def ScaleAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
+ return 0
+
+ # QuantizationParameters
+ def ScaleLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # QuantizationParameters
+ def ZeroPoint(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int64Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+ return 0
+
+ # QuantizationParameters
+ def ZeroPointAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
+ return 0
+
+ # QuantizationParameters
+ def ZeroPointLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+
+def QuantizationParametersStart(builder):
+ builder.StartObject(4)
+
+
+def QuantizationParametersAddMin(builder, min):
+ builder.PrependUOffsetTRelativeSlot(
+ 0, flatbuffers.number_types.UOffsetTFlags.py_type(min), 0)
+
+
+def QuantizationParametersStartMinVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def QuantizationParametersAddMax(builder, max):
+ builder.PrependUOffsetTRelativeSlot(
+ 1, flatbuffers.number_types.UOffsetTFlags.py_type(max), 0)
+
+
+def QuantizationParametersStartMaxVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def QuantizationParametersAddScale(builder, scale):
+ builder.PrependUOffsetTRelativeSlot(
+ 2, flatbuffers.number_types.UOffsetTFlags.py_type(scale), 0)
+
+
+def QuantizationParametersStartScaleVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def QuantizationParametersAddZeroPoint(builder, zeroPoint):
+ builder.PrependUOffsetTRelativeSlot(
+ 3, flatbuffers.number_types.UOffsetTFlags.py_type(zeroPoint), 0)
+
+
+def QuantizationParametersStartZeroPointVector(builder, numElems):
+ return builder.StartVector(8, numElems, 8)
+
+
+def QuantizationParametersEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/RNNOptions.py b/tools/tflitefile_tool/tflite/RNNOptions.py
new file mode 100644
index 000000000..508b9c8c9
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/RNNOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class RNNOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsRNNOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = RNNOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # RNNOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # RNNOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def RNNOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def RNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+
+def RNNOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/ReducerOptions.py b/tools/tflitefile_tool/tflite/ReducerOptions.py
new file mode 100644
index 000000000..5b6fa1acf
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ReducerOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class ReducerOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsReducerOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ReducerOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # ReducerOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ReducerOptions
+ def KeepDims(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)
+ return 0
+
+
+def ReducerOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def ReducerOptionsAddKeepDims(builder, keepDims):
+ builder.PrependBoolSlot(0, keepDims, 0)
+
+
+def ReducerOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/ReshapeOptions.py b/tools/tflitefile_tool/tflite/ReshapeOptions.py
new file mode 100644
index 000000000..b6b2b3551
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ReshapeOptions.py
@@ -0,0 +1,61 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class ReshapeOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsReshapeOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ReshapeOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # ReshapeOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ReshapeOptions
+ def NewShape(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # ReshapeOptions
+ def NewShapeAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # ReshapeOptions
+ def NewShapeLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+
+def ReshapeOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def ReshapeOptionsAddNewShape(builder, newShape):
+ builder.PrependUOffsetTRelativeSlot(
+ 0, flatbuffers.number_types.UOffsetTFlags.py_type(newShape), 0)
+
+
+def ReshapeOptionsStartNewShapeVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def ReshapeOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/ResizeBilinearOptions.py b/tools/tflitefile_tool/tflite/ResizeBilinearOptions.py
new file mode 100644
index 000000000..66512bb1e
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ResizeBilinearOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class ResizeBilinearOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsResizeBilinearOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ResizeBilinearOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # ResizeBilinearOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ResizeBilinearOptions
+ def AlignCorners(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)
+ return 0
+
+
+def ResizeBilinearOptionsStart(builder):
+ builder.StartObject(3)
+
+
+def ResizeBilinearOptionsAddAlignCorners(builder, alignCorners):
+ builder.PrependBoolSlot(2, alignCorners, 0)
+
+
+def ResizeBilinearOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SVDFOptions.py b/tools/tflitefile_tool/tflite/SVDFOptions.py
new file mode 100644
index 000000000..1e65dff4b
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SVDFOptions.py
@@ -0,0 +1,50 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SVDFOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSVDFOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SVDFOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SVDFOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SVDFOptions
+ def Rank(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # SVDFOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def SVDFOptionsStart(builder):
+ builder.StartObject(2)
+
+
+def SVDFOptionsAddRank(builder, rank):
+ builder.PrependInt32Slot(0, rank, 0)
+
+
+def SVDFOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(1, fusedActivationFunction, 0)
+
+
+def SVDFOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SelectOptions.py b/tools/tflitefile_tool/tflite/SelectOptions.py
new file mode 100644
index 000000000..5539a87df
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SelectOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SelectOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSelectOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SelectOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SelectOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def SelectOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def SelectOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SequenceRNNOptions.py b/tools/tflitefile_tool/tflite/SequenceRNNOptions.py
new file mode 100644
index 000000000..bee7a0fc6
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SequenceRNNOptions.py
@@ -0,0 +1,50 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SequenceRNNOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSequenceRNNOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SequenceRNNOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SequenceRNNOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SequenceRNNOptions
+ def TimeMajor(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)
+ return 0
+
+ # SequenceRNNOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def SequenceRNNOptionsStart(builder):
+ builder.StartObject(2)
+
+
+def SequenceRNNOptionsAddTimeMajor(builder, timeMajor):
+ builder.PrependBoolSlot(0, timeMajor, 0)
+
+
+def SequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(1, fusedActivationFunction, 0)
+
+
+def SequenceRNNOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/ShapeOptions.py b/tools/tflitefile_tool/tflite/ShapeOptions.py
new file mode 100644
index 000000000..939e27b88
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/ShapeOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class ShapeOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsShapeOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ShapeOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # ShapeOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ShapeOptions
+ def OutType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def ShapeOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def ShapeOptionsAddOutType(builder, outType):
+ builder.PrependInt8Slot(0, outType, 0)
+
+
+def ShapeOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SkipGramOptions.py b/tools/tflitefile_tool/tflite/SkipGramOptions.py
new file mode 100644
index 000000000..50738b924
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SkipGramOptions.py
@@ -0,0 +1,61 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SkipGramOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSkipGramOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SkipGramOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SkipGramOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SkipGramOptions
+ def NgramSize(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # SkipGramOptions
+ def MaxSkipSize(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # SkipGramOptions
+ def IncludeAllNgrams(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)
+ return 0
+
+
+def SkipGramOptionsStart(builder):
+ builder.StartObject(3)
+
+
+def SkipGramOptionsAddNgramSize(builder, ngramSize):
+ builder.PrependInt32Slot(0, ngramSize, 0)
+
+
+def SkipGramOptionsAddMaxSkipSize(builder, maxSkipSize):
+ builder.PrependInt32Slot(1, maxSkipSize, 0)
+
+
+def SkipGramOptionsAddIncludeAllNgrams(builder, includeAllNgrams):
+ builder.PrependBoolSlot(2, includeAllNgrams, 0)
+
+
+def SkipGramOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SliceOptions.py b/tools/tflitefile_tool/tflite/SliceOptions.py
new file mode 100644
index 000000000..2cce3a00c
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SliceOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SliceOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSliceOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SliceOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SliceOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def SliceOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def SliceOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SoftmaxOptions.py b/tools/tflitefile_tool/tflite/SoftmaxOptions.py
new file mode 100644
index 000000000..05571f2f5
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SoftmaxOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SoftmaxOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSoftmaxOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SoftmaxOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SoftmaxOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SoftmaxOptions
+ def Beta(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
+ return 0.0
+
+
+def SoftmaxOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def SoftmaxOptionsAddBeta(builder, beta):
+ builder.PrependFloat32Slot(0, beta, 0.0)
+
+
+def SoftmaxOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SpaceToBatchNDOptions.py b/tools/tflitefile_tool/tflite/SpaceToBatchNDOptions.py
new file mode 100644
index 000000000..ee31e0d5f
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SpaceToBatchNDOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SpaceToBatchNDOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSpaceToBatchNDOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SpaceToBatchNDOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SpaceToBatchNDOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def SpaceToBatchNDOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def SpaceToBatchNDOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SpaceToDepthOptions.py b/tools/tflitefile_tool/tflite/SpaceToDepthOptions.py
new file mode 100644
index 000000000..277fa1aa5
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SpaceToDepthOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SpaceToDepthOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSpaceToDepthOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SpaceToDepthOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SpaceToDepthOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SpaceToDepthOptions
+ def BlockSize(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+
+def SpaceToDepthOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def SpaceToDepthOptionsAddBlockSize(builder, blockSize):
+ builder.PrependInt32Slot(0, blockSize, 0)
+
+
+def SpaceToDepthOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SparseToDenseOptions.py b/tools/tflitefile_tool/tflite/SparseToDenseOptions.py
new file mode 100644
index 000000000..2782ae573
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SparseToDenseOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SparseToDenseOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSparseToDenseOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SparseToDenseOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SparseToDenseOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SparseToDenseOptions
+ def ValidateIndices(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)
+ return 0
+
+
+def SparseToDenseOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def SparseToDenseOptionsAddValidateIndices(builder, validateIndices):
+ builder.PrependBoolSlot(0, validateIndices, 0)
+
+
+def SparseToDenseOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SplitOptions.py b/tools/tflitefile_tool/tflite/SplitOptions.py
new file mode 100644
index 000000000..a591e2e1e
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SplitOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SplitOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSplitOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SplitOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SplitOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SplitOptions
+ def NumSplits(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+
+def SplitOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def SplitOptionsAddNumSplits(builder, numSplits):
+ builder.PrependInt32Slot(0, numSplits, 0)
+
+
+def SplitOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SqueezeOptions.py b/tools/tflitefile_tool/tflite/SqueezeOptions.py
new file mode 100644
index 000000000..6881c114a
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SqueezeOptions.py
@@ -0,0 +1,61 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SqueezeOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSqueezeOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SqueezeOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SqueezeOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SqueezeOptions
+ def SqueezeDims(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # SqueezeOptions
+ def SqueezeDimsAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # SqueezeOptions
+ def SqueezeDimsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+
+def SqueezeOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def SqueezeOptionsAddSqueezeDims(builder, squeezeDims):
+ builder.PrependUOffsetTRelativeSlot(
+ 0, flatbuffers.number_types.UOffsetTFlags.py_type(squeezeDims), 0)
+
+
+def SqueezeOptionsStartSqueezeDimsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def SqueezeOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/StridedSliceOptions.py b/tools/tflitefile_tool/tflite/StridedSliceOptions.py
new file mode 100644
index 000000000..99db0da68
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/StridedSliceOptions.py
@@ -0,0 +1,83 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class StridedSliceOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsStridedSliceOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = StridedSliceOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # StridedSliceOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # StridedSliceOptions
+ def BeginMask(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # StridedSliceOptions
+ def EndMask(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # StridedSliceOptions
+ def EllipsisMask(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # StridedSliceOptions
+ def NewAxisMask(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # StridedSliceOptions
+ def ShrinkAxisMask(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+
+def StridedSliceOptionsStart(builder):
+ builder.StartObject(5)
+
+
+def StridedSliceOptionsAddBeginMask(builder, beginMask):
+ builder.PrependInt32Slot(0, beginMask, 0)
+
+
+def StridedSliceOptionsAddEndMask(builder, endMask):
+ builder.PrependInt32Slot(1, endMask, 0)
+
+
+def StridedSliceOptionsAddEllipsisMask(builder, ellipsisMask):
+ builder.PrependInt32Slot(2, ellipsisMask, 0)
+
+
+def StridedSliceOptionsAddNewAxisMask(builder, newAxisMask):
+ builder.PrependInt32Slot(3, newAxisMask, 0)
+
+
+def StridedSliceOptionsAddShrinkAxisMask(builder, shrinkAxisMask):
+ builder.PrependInt32Slot(4, shrinkAxisMask, 0)
+
+
+def StridedSliceOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SubGraph.py b/tools/tflitefile_tool/tflite/SubGraph.py
new file mode 100644
index 000000000..c20880a36
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SubGraph.py
@@ -0,0 +1,164 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SubGraph(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSubGraph(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SubGraph()
+ x.Init(buf, n + offset)
+ return x
+
+ # SubGraph
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SubGraph
+ def Tensors(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from .Tensor import Tensor
+ obj = Tensor()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # SubGraph
+ def TensorsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # SubGraph
+ def Inputs(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # SubGraph
+ def InputsAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # SubGraph
+ def InputsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # SubGraph
+ def Outputs(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # SubGraph
+ def OutputsAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # SubGraph
+ def OutputsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # SubGraph
+ def Operators(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from .Operator import Operator
+ obj = Operator()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # SubGraph
+ def OperatorsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # SubGraph
+ def Name(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return ""
+
+
+def SubGraphStart(builder):
+ builder.StartObject(5)
+
+
+def SubGraphAddTensors(builder, tensors):
+ builder.PrependUOffsetTRelativeSlot(
+ 0, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0)
+
+
+def SubGraphStartTensorsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def SubGraphAddInputs(builder, inputs):
+ builder.PrependUOffsetTRelativeSlot(
+ 1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
+
+
+def SubGraphStartInputsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def SubGraphAddOutputs(builder, outputs):
+ builder.PrependUOffsetTRelativeSlot(
+ 2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
+
+
+def SubGraphStartOutputsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def SubGraphAddOperators(builder, operators):
+ builder.PrependUOffsetTRelativeSlot(
+ 3, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0)
+
+
+def SubGraphStartOperatorsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def SubGraphAddName(builder, name):
+ builder.PrependUOffsetTRelativeSlot(
+ 4, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
+
+
+def SubGraphEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/SubOptions.py b/tools/tflitefile_tool/tflite/SubOptions.py
new file mode 100644
index 000000000..29b3dcbfb
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/SubOptions.py
@@ -0,0 +1,39 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class SubOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsSubOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SubOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # SubOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SubOptions
+ def FusedActivationFunction(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+
+def SubOptionsStart(builder):
+ builder.StartObject(1)
+
+
+def SubOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
+ builder.PrependInt8Slot(0, fusedActivationFunction, 0)
+
+
+def SubOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/Tensor.py b/tools/tflitefile_tool/tflite/Tensor.py
new file mode 100644
index 000000000..468b120f4
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/Tensor.py
@@ -0,0 +1,122 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class Tensor(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsTensor(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = Tensor()
+ x.Init(buf, n + offset)
+ return x
+
+ # Tensor
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # Tensor
+ def Shape(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(
+ flatbuffers.number_types.Int32Flags,
+ a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # Tensor
+ def ShapeAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # Tensor
+ def ShapeLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Tensor
+ def Type(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # Tensor
+ def Buffer(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+ return 0
+
+ # Tensor
+ def Name(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return ""
+
+ # Tensor
+ def Quantization(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ x = self._tab.Indirect(o + self._tab.Pos)
+ from .QuantizationParameters import QuantizationParameters
+ obj = QuantizationParameters()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # Tensor
+ def IsVariable(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)
+ return 0
+
+
+def TensorStart(builder):
+ builder.StartObject(6)
+
+
+def TensorAddShape(builder, shape):
+ builder.PrependUOffsetTRelativeSlot(
+ 0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
+
+
+def TensorStartShapeVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+
+def TensorAddType(builder, type):
+ builder.PrependInt8Slot(1, type, 0)
+
+
+def TensorAddBuffer(builder, buffer):
+ builder.PrependUint32Slot(2, buffer, 0)
+
+
+def TensorAddName(builder, name):
+ builder.PrependUOffsetTRelativeSlot(
+ 3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
+
+
+def TensorAddQuantization(builder, quantization):
+ builder.PrependUOffsetTRelativeSlot(
+ 4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0)
+
+
+def TensorAddIsVariable(builder, isVariable):
+ builder.PrependBoolSlot(5, isVariable, 0)
+
+
+def TensorEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/TensorType.py b/tools/tflitefile_tool/tflite/TensorType.py
new file mode 100644
index 000000000..e375c65ee
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/TensorType.py
@@ -0,0 +1,15 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+
+class TensorType(object):
+ FLOAT32 = 0
+ FLOAT16 = 1
+ INT32 = 2
+ UINT8 = 3
+ INT64 = 4
+ STRING = 5
+ BOOL = 6
+ INT16 = 7
+ COMPLEX64 = 8
diff --git a/tools/tflitefile_tool/tflite/TileOptions.py b/tools/tflitefile_tool/tflite/TileOptions.py
new file mode 100644
index 000000000..59543fc31
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/TileOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class TileOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsTileOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = TileOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # TileOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def TileOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def TileOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/TopKV2Options.py b/tools/tflitefile_tool/tflite/TopKV2Options.py
new file mode 100644
index 000000000..004898943
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/TopKV2Options.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class TopKV2Options(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsTopKV2Options(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = TopKV2Options()
+ x.Init(buf, n + offset)
+ return x
+
+ # TopKV2Options
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def TopKV2OptionsStart(builder):
+ builder.StartObject(0)
+
+
+def TopKV2OptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/TransposeConvOptions.py b/tools/tflitefile_tool/tflite/TransposeConvOptions.py
new file mode 100644
index 000000000..d36a8437e
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/TransposeConvOptions.py
@@ -0,0 +1,61 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class TransposeConvOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsTransposeConvOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = TransposeConvOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # TransposeConvOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # TransposeConvOptions
+ def Padding(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # TransposeConvOptions
+ def StrideW(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # TransposeConvOptions
+ def StrideH(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+
+def TransposeConvOptionsStart(builder):
+ builder.StartObject(3)
+
+
+def TransposeConvOptionsAddPadding(builder, padding):
+ builder.PrependInt8Slot(0, padding, 0)
+
+
+def TransposeConvOptionsAddStrideW(builder, strideW):
+ builder.PrependInt32Slot(1, strideW, 0)
+
+
+def TransposeConvOptionsAddStrideH(builder, strideH):
+ builder.PrependInt32Slot(2, strideH, 0)
+
+
+def TransposeConvOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/TransposeOptions.py b/tools/tflitefile_tool/tflite/TransposeOptions.py
new file mode 100644
index 000000000..b796686dd
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/TransposeOptions.py
@@ -0,0 +1,28 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+import flatbuffers
+
+
+class TransposeOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsTransposeOptions(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = TransposeOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ # TransposeOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+
+def TransposeOptionsStart(builder):
+ builder.StartObject(0)
+
+
+def TransposeOptionsEnd(builder):
+ return builder.EndObject()
diff --git a/tools/tflitefile_tool/tflite/__init__.py b/tools/tflitefile_tool/tflite/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tools/tflitefile_tool/tflite/__init__.py