summaryrefslogtreecommitdiff
path: root/infra
diff options
context:
space:
mode:
Diffstat (limited to 'infra')
-rw-r--r--infra/cmake/modules/ExternalBuildTools.cmake25
-rw-r--r--infra/cmake/modules/ExternalSourceTools.cmake60
-rw-r--r--infra/cmake/modules/IdentifyPlatform.cmake20
-rw-r--r--infra/cmake/packages/ARMComputeSourceConfig.cmake4
-rw-r--r--infra/cmake/packages/AbseilConfig.cmake8
-rw-r--r--infra/cmake/packages/AbseilSourceConfig.cmake12
-rw-r--r--infra/cmake/packages/BoostConfig.cmake11
-rw-r--r--infra/cmake/packages/BoostSourceConfig.cmake2
-rw-r--r--infra/cmake/packages/CMSIS-NN-4.0.0/CMSIS-NNConfig.cmake14
-rw-r--r--infra/cmake/packages/CMSIS-NN-4.0.0/CMSIS-NNConfigVersion.cmake (renamed from infra/cmake/packages/TensorFlowProtoText-1.12/TensorFlowProtoTextConfigVersion.cmake)7
-rw-r--r--infra/cmake/packages/CMSIS-NN-4.1.0/CMSIS-NNConfig.cmake14
-rw-r--r--infra/cmake/packages/CMSIS-NN-4.1.0/CMSIS-NNConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/CMSISSource-5.8.0/CMSISSourceConfig.cmake16
-rw-r--r--infra/cmake/packages/CMSISSource-5.8.0/CMSISSourceConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/CaffeSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/CpuInfoSourceConfig.cmake21
-rw-r--r--infra/cmake/packages/Egl_HeadersSourceConfig.cmake21
-rw-r--r--infra/cmake/packages/FarmhashSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/FlatBuffers-2.0/FlatBuffersConfig.cmake (renamed from infra/cmake/packages/FlatBuffersConfig.cmake)47
-rw-r--r--infra/cmake/packages/FlatBuffers-2.0/FlatBuffersConfigVersion.cmake (renamed from infra/cmake/packages/TensorFlowSource-1.12/TensorFlowSourceConfigVersion.cmake)2
-rw-r--r--infra/cmake/packages/FlatBuffersSource-1.11/FlatBuffersSourceConfig.cmake21
-rw-r--r--infra/cmake/packages/FlatBuffersSource-2.0/FlatBuffersSourceConfig.cmake (renamed from infra/cmake/packages/FlatBuffersSource-1.10/FlatBuffersSourceConfig.cmake)9
-rw-r--r--infra/cmake/packages/FlatBuffersSource-2.0/FlatBuffersSourceConfigVersion.cmake (renamed from infra/cmake/packages/FlatBuffersSource-1.11/FlatBuffersSourceConfigVersion.cmake)2
-rw-r--r--infra/cmake/packages/FlatBuffersSourceConfig.cmake28
-rw-r--r--infra/cmake/packages/FlatBuffersSourceConfigVersion.cmake9
-rw-r--r--infra/cmake/packages/Fp16SourceConfig.cmake21
-rw-r--r--infra/cmake/packages/FxdivSourceConfig.cmake21
-rw-r--r--infra/cmake/packages/GEMMLowpSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/GFlagsSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/GTestConfig.cmake15
-rw-r--r--infra/cmake/packages/GTestSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/GoogleDoubleConversionConfig.cmake52
-rw-r--r--infra/cmake/packages/GoogleNSyncConfig.cmake62
-rw-r--r--infra/cmake/packages/H5Tinit.c.linux-armv7l977
-rw-r--r--infra/cmake/packages/HDF5Config.cmake16
-rw-r--r--infra/cmake/packages/HDF5Source.patch195
-rw-r--r--infra/cmake/packages/HDF5SourceConfig.cmake6
-rw-r--r--infra/cmake/packages/JsoncppConfig.cmake34
-rw-r--r--infra/cmake/packages/JsoncppSourceConfig.cmake19
-rw-r--r--infra/cmake/packages/MbedOSSource-6.15/MbedOSSourceConfig.cmake16
-rw-r--r--infra/cmake/packages/MbedOSSource-6.15/MbedOSSourceConfigVersion.cmake (renamed from infra/cmake/packages/FlatBuffersSource-1.10/FlatBuffersSourceConfigVersion.cmake)2
-rw-r--r--infra/cmake/packages/NEON2SSESourceConfig.cmake9
-rw-r--r--infra/cmake/packages/NoniusSourceConfig.cmake2
-rw-r--r--infra/cmake/packages/ONNXSource-1.4.1/ONNXSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/ONNXSource-1.6.0/ONNXSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/OouraFFTSourceConfig.cmake20
-rw-r--r--infra/cmake/packages/Opencl_HeadersConfig.cmake27
-rw-r--r--infra/cmake/packages/Opencl_HeadersSourceConfig.cmake22
-rw-r--r--infra/cmake/packages/Opengl_HeadersSourceConfig.cmake21
-rw-r--r--infra/cmake/packages/ProtobufConfig.cmake21
-rw-r--r--infra/cmake/packages/ProtobufSource.patch18
-rw-r--r--infra/cmake/packages/ProtobufSourceConfig.cmake6
-rw-r--r--infra/cmake/packages/PsimdSourceConfig.cmake21
-rw-r--r--infra/cmake/packages/PthreadpoolSourceConfig.cmake21
-rw-r--r--infra/cmake/packages/Pybind11SourceConfig.cmake3
-rw-r--r--infra/cmake/packages/PytorchSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/RuySourceConfig.cmake21
-rw-r--r--infra/cmake/packages/TensorFlow-1.13/TensorFlowConfig.cmake56
-rw-r--r--infra/cmake/packages/TensorFlow-1.13/TensorFlowConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/TensorFlow-1.13/TensorFlowVersionChecker.c25
-rw-r--r--infra/cmake/packages/TensorFlowEigenSource-2.1.0/TensorFlowEigenSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/TensorFlowEigenSource-2.6.0/TensorFlowEigenSourceConfig.cmake (renamed from infra/cmake/packages/TensorFlowEigenSource-2.3.0-rc0Config.cmake)10
-rw-r--r--infra/cmake/packages/TensorFlowEigenSource-2.6.0/TensorFlowEigenSourceConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/TensorFlowEigenSource-2.8.0/TensorFlowEigenSourceConfig.cmake21
-rw-r--r--infra/cmake/packages/TensorFlowEigenSource-2.8.0/TensorFlowEigenSourceConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/TensorFlowGEMMLowpSource-2.1.0/TensorFlowGEMMLowpSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/TensorFlowGEMMLowpSource-2.3.0/TensorFlowGEMMLowpSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/TensorFlowGEMMLowpSource-2.6.0/TensorFlowGEMMLowpSourceConfig.cmake21
-rw-r--r--infra/cmake/packages/TensorFlowGEMMLowpSource-2.6.0/TensorFlowGEMMLowpSourceConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/TensorFlowGEMMLowpSource-2.8.0/TensorFlowGEMMLowpSourceConfig.cmake21
-rw-r--r--infra/cmake/packages/TensorFlowGEMMLowpSource-2.8.0/TensorFlowGEMMLowpSourceConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/TensorFlowLite-1.12/Lite/CMakeLists.txt41
-rw-r--r--infra/cmake/packages/TensorFlowLite-1.12/TensorFlowLiteConfig.cmake62
-rw-r--r--infra/cmake/packages/TensorFlowLite-1.13.1/Lite/CMakeLists.txt7
-rw-r--r--infra/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfig.cmake6
-rw-r--r--infra/cmake/packages/TensorFlowProtoText-1.12/TensorFlowProtoTextConfig.cmake104
-rw-r--r--infra/cmake/packages/TensorFlowProtoText-1.12/build/CMakeLists.txt78
-rwxr-xr-xinfra/cmake/packages/TensorFlowProtoText-1.12/make_directories.sh6
-rw-r--r--infra/cmake/packages/TensorFlowProtoText-1.13.1/TensorFlowProtoTextConfig.cmake104
-rw-r--r--infra/cmake/packages/TensorFlowProtoText-1.13.1/TensorFlowProtoTextConfigVersion.cmake9
-rw-r--r--infra/cmake/packages/TensorFlowProtoText-1.13.1/build/CMakeLists.txt78
-rwxr-xr-xinfra/cmake/packages/TensorFlowProtoText-1.13.1/make_directories.sh6
-rw-r--r--infra/cmake/packages/TensorFlowRuySource-2.3.0/TensorFlowRuySourceConfig.cmake3
-rw-r--r--infra/cmake/packages/TensorFlowRuySource-2.6.0/TensorFlowRuySourceConfig.cmake21
-rw-r--r--infra/cmake/packages/TensorFlowRuySource-2.6.0/TensorFlowRuySourceConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/TensorFlowRuySource-2.8.0/TensorFlowRuySourceConfig.cmake21
-rw-r--r--infra/cmake/packages/TensorFlowRuySource-2.8.0/TensorFlowRuySourceConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/TensorFlowSource-1.14/TensorFlowSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/TensorFlowSource-2.1.0/TensorFlowSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/TensorFlowSource-2.12.1/TensorFlowSourceConfig.cmake19
-rw-r--r--infra/cmake/packages/TensorFlowSource-2.12.1/TensorFlowSourceConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/TensorFlowSource-2.2.0/TensorFlowSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/TensorFlowSource-2.3.0-rc0Config.cmake3
-rw-r--r--infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfig.cmake3
-rw-r--r--infra/cmake/packages/TensorFlowSource-2.6.0/TensorFlowSourceConfig.cmake (renamed from infra/cmake/packages/TensorFlowSource-1.12/TensorFlowSourceConfig.cmake)5
-rw-r--r--infra/cmake/packages/TensorFlowSource-2.6.0/TensorFlowSourceConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/TensorFlowSource-2.8.0/TensorFlowSourceConfig.cmake19
-rw-r--r--infra/cmake/packages/TensorFlowSource-2.8.0/TensorFlowSourceConfigVersion.cmake10
-rw-r--r--infra/cmake/packages/VulkanSourceConfig.cmake20
-rw-r--r--infra/cmake/packages/XnnpackSourceConfig.cmake21
-rw-r--r--infra/command/build-docker-image29
-rw-r--r--infra/command/docker-run11
-rw-r--r--infra/command/format116
-rw-r--r--infra/command/gen-coverage-report7
-rw-r--r--infra/config/docker.configuration2
-rw-r--r--infra/debian/compiler/changelog113
-rw-r--r--infra/debian/compiler/compat1
-rw-r--r--infra/debian/compiler/control25
-rw-r--r--infra/debian/compiler/copyright3
-rw-r--r--infra/debian/compiler/docs/one-build.196
-rw-r--r--infra/debian/compiler/docs/one-codegen.139
-rw-r--r--infra/debian/compiler/docs/one-import-bcq.161
-rw-r--r--infra/debian/compiler/docs/one-import-onnx.163
-rw-r--r--infra/debian/compiler/docs/one-import-tf.177
-rw-r--r--infra/debian/compiler/docs/one-import-tflite.144
-rw-r--r--infra/debian/compiler/docs/one-import.135
-rw-r--r--infra/debian/compiler/docs/one-infer.146
-rw-r--r--infra/debian/compiler/docs/one-optimize.1222
-rw-r--r--infra/debian/compiler/docs/one-pack.142
-rw-r--r--infra/debian/compiler/docs/one-partition.156
-rw-r--r--infra/debian/compiler/docs/one-profile.139
-rw-r--r--infra/debian/compiler/docs/one-quantize.183
-rw-r--r--infra/debian/compiler/docs/onecc.1170
-rw-r--r--infra/debian/compiler/one-compiler-dev.install10
-rw-r--r--infra/debian/compiler/one-compiler-dev.links6
-rw-r--r--infra/debian/compiler/one-compiler-test.install5
-rw-r--r--infra/debian/compiler/one-compiler.install61
-rw-r--r--infra/debian/compiler/one-compiler.links17
-rw-r--r--infra/debian/compiler/one-compiler.manpages14
-rw-r--r--infra/debian/compiler/postinst12
-rw-r--r--infra/debian/compiler/postrm18
-rwxr-xr-xinfra/debian/compiler/rules19
-rw-r--r--infra/debian/compiler/source/format1
-rw-r--r--infra/debian/compiler/source/local-options2
-rw-r--r--infra/debian/runtime/changelog38
-rw-r--r--infra/debian/runtime/compat1
-rw-r--r--infra/debian/runtime/control19
-rw-r--r--infra/debian/runtime/copyright3
-rw-r--r--infra/debian/runtime/nnfw-dev.install4
-rw-r--r--infra/debian/runtime/nnfw.install3
-rwxr-xr-xinfra/debian/runtime/rules22
-rw-r--r--infra/debian/runtime/source/format1
-rw-r--r--infra/debian/runtime/source/local-options2
-rw-r--r--infra/docker/Dockerfile66
-rw-r--r--infra/docker/Dockerfile.180441
-rw-r--r--infra/docker/bionic/Dockerfile145
-rw-r--r--infra/docker/bionic/Dockerfile.aarch6492
-rw-r--r--infra/docker/focal/Dockerfile108
-rw-r--r--infra/docker/focal/Dockerfile.aarch6462
-rw-r--r--infra/docker/jammy/Dockerfile60
-rw-r--r--infra/docker/jammy/Dockerfile.aarch6460
-rw-r--r--infra/doxygen/Doxyfile68
-rw-r--r--infra/nncc/CMakeLists.txt60
-rw-r--r--infra/nncc/Makefile.arm32152
-rw-r--r--infra/nncc/cmake/ApplyCompileFlags.cmake35
-rw-r--r--infra/nncc/cmake/CfgOptionFlags.cmake58
-rw-r--r--infra/nncc/cmake/buildtool/config/arm-none-eabi-gcc.cmake66
-rw-r--r--infra/nncc/cmake/buildtool/config/config_aarch64-linux.cmake13
-rw-r--r--infra/nncc/cmake/buildtool/config/config_aarch64-tizen.cmake17
-rw-r--r--infra/nncc/cmake/buildtool/config/config_armv7hl-tizen.cmake29
-rw-r--r--infra/nncc/cmake/buildtool/config/config_armv7l-linux.cmake25
-rw-r--r--infra/nncc/cmake/buildtool/config/config_armv7l-tizen.cmake29
-rw-r--r--infra/nncc/cmake/buildtool/config/config_i686-tizen.cmake17
-rw-r--r--infra/nncc/cmake/buildtool/config/config_linux.cmake11
-rw-r--r--infra/nncc/cmake/buildtool/config/config_x86_64-tizen.cmake17
-rw-r--r--infra/nncc/cmake/buildtool/cross/toolchain_armv7l-linux.cmake38
-rw-r--r--infra/nncc/cmake/options/options_aarch64-darwin.cmake4
-rw-r--r--infra/nncc/cmake/options/options_aarch64-linux.cmake4
-rw-r--r--infra/nncc/cmake/options/options_aarch64-tizen.cmake4
-rw-r--r--infra/nncc/cmake/options/options_armv7em-generic.cmake3
-rw-r--r--infra/nncc/cmake/options/options_armv7hl-tizen.cmake5
-rw-r--r--infra/nncc/cmake/options/options_armv7l-linux.cmake5
-rw-r--r--infra/nncc/cmake/options/options_armv7l-tizen.cmake5
-rw-r--r--infra/nncc/cmake/options/options_i686-tizen.cmake3
-rw-r--r--infra/nncc/cmake/options/options_riscv64-tizen.cmake3
-rw-r--r--infra/nncc/cmake/options/options_x86_64-darwin.cmake4
-rw-r--r--infra/nncc/cmake/options/options_x86_64-linux.cmake3
-rw-r--r--infra/nncc/cmake/options/options_x86_64-tizen.cmake3
-rw-r--r--infra/nncc/command/utcount8
-rw-r--r--infra/nncc/config/docker.configuration4
-rw-r--r--infra/nnfw/CMakeLists.txt18
-rw-r--r--infra/nnfw/cmake/ApplyCompileFlags.cmake10
-rw-r--r--infra/nnfw/cmake/CfgOptionFlags.cmake42
-rw-r--r--infra/nnfw/cmake/buildtool/config/config_aarch64-android.cmake3
-rw-r--r--infra/nnfw/cmake/buildtool/config/config_aarch64-tizen.cmake2
-rw-r--r--infra/nnfw/cmake/buildtool/config/config_armv7hl-tizen.cmake22
-rw-r--r--infra/nnfw/cmake/buildtool/config/config_i686-tizen.cmake17
-rw-r--r--infra/nnfw/cmake/buildtool/config/config_linux.cmake16
-rw-r--r--infra/nnfw/cmake/buildtool/config/config_riscv64-tizen.cmake17
-rw-r--r--infra/nnfw/cmake/buildtool/config/config_x86_64-darwin.cmake3
-rw-r--r--infra/nnfw/cmake/buildtool/config/config_x86_64-tizen.cmake17
-rw-r--r--infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-linux.cmake6
-rw-r--r--infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-tizen.cmake6
-rw-r--r--infra/nnfw/cmake/buildtool/cross/toolchain_armv7l-linux.cmake6
-rw-r--r--infra/nnfw/cmake/buildtool/cross/toolchain_armv7l-tizen.cmake66
-rw-r--r--infra/nnfw/cmake/options/options_aarch64-android.cmake20
-rw-r--r--infra/nnfw/cmake/options/options_aarch64-tizen.cmake9
-rw-r--r--infra/nnfw/cmake/options/options_armv7hl-tizen.cmake27
-rw-r--r--infra/nnfw/cmake/options/options_armv7l-linux.cmake7
-rw-r--r--infra/nnfw/cmake/options/options_armv7l-tizen.cmake17
-rw-r--r--infra/nnfw/cmake/options/options_i686-tizen.cmake21
-rw-r--r--infra/nnfw/cmake/options/options_riscv64-tizen.cmake20
-rw-r--r--infra/nnfw/cmake/options/options_x86_64-darwin.cmake1
-rw-r--r--infra/nnfw/cmake/options/options_x86_64-linux.cmake1
-rw-r--r--infra/nnfw/cmake/options/options_x86_64-tizen.cmake21
-rw-r--r--infra/nnfw/cmake/packages/ARMComputeConfig.cmake118
-rw-r--r--infra/nnfw/cmake/packages/BoostConfig.cmake33
-rw-r--r--infra/nnfw/cmake/packages/CpuInfoConfig.cmake39
-rw-r--r--infra/nnfw/cmake/packages/EigenConfig.cmake2
-rw-r--r--infra/nnfw/cmake/packages/FarmhashSourceConfig.cmake19
-rw-r--r--infra/nnfw/cmake/packages/FlatBuffersConfig.cmake4
-rw-r--r--infra/nnfw/cmake/packages/Fp16Config.cmake30
-rw-r--r--infra/nnfw/cmake/packages/FxdivConfig.cmake29
-rw-r--r--infra/nnfw/cmake/packages/GEMMLowpConfig.cmake2
-rw-r--r--infra/nnfw/cmake/packages/GEMMLowpSourceConfig.cmake19
-rw-r--r--infra/nnfw/cmake/packages/GLib2.0Config.cmake41
-rw-r--r--infra/nnfw/cmake/packages/GObject2.0Config.cmake30
-rw-r--r--infra/nnfw/cmake/packages/GTestConfig.cmake30
-rw-r--r--infra/nnfw/cmake/packages/Gio2.0Config.cmake32
-rw-r--r--infra/nnfw/cmake/packages/Giounix2.0Config.cmake30
-rw-r--r--infra/nnfw/cmake/packages/LuciConfig.cmake43
-rw-r--r--infra/nnfw/cmake/packages/NEON2SSESourceConfig.cmake19
-rw-r--r--infra/nnfw/cmake/packages/PsimdConfig.cmake26
-rw-r--r--infra/nnfw/cmake/packages/PthreadpoolConfig.cmake35
-rw-r--r--infra/nnfw/cmake/packages/Ruy/CMakeLists.txt9
-rw-r--r--infra/nnfw/cmake/packages/RuyConfig.cmake40
-rw-r--r--infra/nnfw/cmake/packages/RuySourceConfig.cmake19
-rw-r--r--infra/nnfw/cmake/packages/TRIXEngineConfig.cmake42
-rw-r--r--infra/nnfw/cmake/packages/TRIXEngineConfigVersion.cmake104
-rw-r--r--infra/nnfw/cmake/packages/TRIXEngineConfigVersion.extra.cpp24
-rw-r--r--infra/nnfw/cmake/packages/TRIXEngineConfigVersion.major.cpp24
-rw-r--r--infra/nnfw/cmake/packages/TRIXEngineConfigVersion.minor.cpp24
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowEigen-1.13.1/TensorFlowEigenConfig.cmake19
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowEigen-1.13.1/TensorFlowEigenConfigVersion.cmake9
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowGpuConfig.cmake51
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLite/CMakeLists.txt62
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfig.cmake73
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfigVersion.cmake9
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowLite-2.3.0/CMakeLists.txt123
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowLite-2.3.0Config.cmake100
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLite/CMakeLists.txt185
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLiteConfig.cmake96
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLiteConfigVersion.cmake (renamed from infra/cmake/packages/TensorFlowLite-1.12/TensorFlowLiteConfigVersion.cmake)2
-rw-r--r--infra/nnfw/cmake/packages/TensorFlowLiteGpu/CMakeLists.txt73
-rw-r--r--infra/nnfw/cmake/packages/XnnpackConfig.cmake41
-rw-r--r--infra/nnfw/command/build2
-rw-r--r--infra/nnfw/command/count-unittest6
-rw-r--r--infra/nnfw/command/prepare-model18
-rw-r--r--infra/nnfw/config/docker.configuration2
-rw-r--r--infra/nnfw/config/gbs.conf41
-rw-r--r--infra/onert-micro/CMakeLists.txt61
-rw-r--r--infra/onert-micro/cmake/ApplyCompileFlags.cmake35
-rw-r--r--infra/onert-micro/cmake/CfgOptionFlags.cmake18
-rw-r--r--infra/onert-micro/cmake/buildtool/config/arm-none-eabi-gcc.cmake66
-rw-r--r--infra/onert-micro/cmake/buildtool/config/config_linux.cmake11
-rw-r--r--infra/onert-micro/cmake/buildtool/config/config_x86_64-linux.cmake12
-rw-r--r--infra/onert-micro/cmake/options/options_armv7-r-generic.cmake3
-rw-r--r--infra/onert-micro/cmake/options/options_armv7em-generic.cmake3
-rw-r--r--infra/onert-micro/cmake/options/options_armv8-m-generic.cmake3
-rw-r--r--infra/onert-micro/cmake/options/options_x86_64-linux.cmake3
-rw-r--r--infra/onert-micro/utils.cmake53
-rw-r--r--infra/packaging/build13
-rw-r--r--infra/packaging/preset/202006304
-rw-r--r--infra/packaging/preset/20200731_windows6
-rw-r--r--infra/packaging/preset/2021040655
-rw-r--r--infra/packaging/preset/20210406_windows67
-rw-r--r--infra/packaging/preset/2021070655
-rw-r--r--infra/packaging/preset/20210706_windows67
-rw-r--r--infra/packaging/preset/2021091055
-rw-r--r--infra/packaging/preset/20210910_windows67
-rw-r--r--infra/packaging/preset/2022032364
-rw-r--r--infra/packaging/preset/20220323_windows77
-rw-r--r--infra/packaging/preset/2022112566
-rw-r--r--infra/packaging/preset/20221125_windows80
-rw-r--r--infra/packaging/preset/2023041366
-rw-r--r--infra/packaging/preset/20230413_windows80
-rw-r--r--infra/packaging/preset/2023090766
-rw-r--r--infra/packaging/preset/20230907_windows80
-rw-r--r--infra/packaging/res/tf2nnpkg.2020063033
-rw-r--r--infra/packaging/res/tf2nnpkg.20210406109
-rw-r--r--infra/packaging/res/tf2nnpkg.20210706109
-rw-r--r--infra/packaging/res/tf2nnpkg.20210910109
-rw-r--r--infra/packaging/res/tf2nnpkg.20220323109
-rw-r--r--infra/packaging/res/tf2nnpkg.20221125109
-rw-r--r--infra/packaging/res/tf2nnpkg.20230413109
-rw-r--r--infra/packaging/res/tf2nnpkg.20230907109
-rwxr-xr-xinfra/scripts/build-tcm.sh10
-rwxr-xr-xinfra/scripts/build_android_runtime_release.sh21
-rwxr-xr-xinfra/scripts/common.sh36
-rw-r--r--infra/scripts/compiler_modules.sh25
-rwxr-xr-xinfra/scripts/docker_build_cross_aarch64_runtime.sh48
-rwxr-xr-xinfra/scripts/docker_build_cross_arm_runtime.sh48
-rwxr-xr-xinfra/scripts/docker_build_cross_arm_runtime_release.sh49
-rwxr-xr-xinfra/scripts/docker_build_cross_coverage.sh58
-rwxr-xr-xinfra/scripts/docker_build_nncc.sh34
-rwxr-xr-xinfra/scripts/docker_build_test_x64.sh49
-rwxr-xr-xinfra/scripts/docker_build_tizen_cross.sh50
-rwxr-xr-xinfra/scripts/docker_build_tizen_gbs.sh31
-rwxr-xr-xinfra/scripts/docker_collect_nnpkg_resources.sh15
-rwxr-xr-xinfra/scripts/docker_coverage_report.sh32
-rwxr-xr-xinfra/scripts/test_arm_nnpkg.sh3
-rwxr-xr-xinfra/scripts/test_coverage.sh15
-rwxr-xr-xinfra/scripts/test_ubuntu_npud.sh59
-rwxr-xr-xinfra/scripts/test_ubuntu_runtime.sh50
-rwxr-xr-xinfra/scripts/test_ubuntu_runtime_mixed.sh24
-rwxr-xr-xinfra/scripts/tizen_xu4_test.sh44
-rwxr-xr-xinfra/scripts/unittest_compiler_xml.sh11
307 files changed, 8549 insertions, 2287 deletions
diff --git a/infra/cmake/modules/ExternalBuildTools.cmake b/infra/cmake/modules/ExternalBuildTools.cmake
index 4f2027b4b..557e6f47d 100644
--- a/infra/cmake/modules/ExternalBuildTools.cmake
+++ b/infra/cmake/modules/ExternalBuildTools.cmake
@@ -14,7 +14,6 @@ function(ExternalBuild_CMake)
${ARGN}
)
- set(BUILD_STAMP_PATH "${ARG_BUILD_DIR}/${ARG_PKG_NAME}.stamp")
set(BUILD_LOG_PATH "${ARG_BUILD_DIR}/${ARG_PKG_NAME}.log")
set(INSTALL_STAMP_PATH "${ARG_INSTALL_DIR}/${ARG_PKG_NAME}.stamp")
set(INSTALL_LOG_PATH "${ARG_INSTALL_DIR}/${ARG_PKG_NAME}.log")
@@ -24,14 +23,6 @@ function(ExternalBuild_CMake)
set(PKG_IDENTIFIER "${ARG_IDENTIFIER}")
endif(DEFINED ARG_IDENTIFIER)
- # NOTE Do NOT retry build once it fails
- if(EXISTS ${BUILD_STAMP_PATH})
- file(READ ${BUILD_STAMP_PATH} READ_IDENTIFIER)
- if("${READ_IDENTIFIER}" STREQUAL "${PKG_IDENTIFIER}")
- return()
- endif("${READ_IDENTIFIER}" STREQUAL "${PKG_IDENTIFIER}")
- endif(EXISTS ${BUILD_STAMP_PATH})
-
# NOTE Do NOT build pre-installed exists
if(EXISTS ${INSTALL_STAMP_PATH})
file(READ ${INSTALL_STAMP_PATH} READ_IDENTIFIER)
@@ -42,11 +33,23 @@ function(ExternalBuild_CMake)
message(STATUS "Build ${ARG_PKG_NAME} from ${ARG_CMAKE_DIR}")
+ # if we're doing the cross compilation, external project also needs it
+ if(CMAKE_TOOLCHAIN_FILE)
+ set(TOOLCHAIN_FILE ${CMAKE_TOOLCHAIN_FILE})
+ # NOTE CMAKE_TOOLCHAIN_FILE maybe relative path -> make abs folder
+ if(NOT EXISTS ${TOOLCHAIN_FILE})
+ set(TOOLCHAIN_FILE ${CMAKE_SOURCE_DIR}/${CMAKE_TOOLCHAIN_FILE})
+ if(NOT EXISTS ${TOOLCHAIN_FILE})
+ message(FATAL "Failed to find ${CMAKE_TOOLCHAIN_FILE}")
+ endif()
+ endif()
+ message(STATUS "ExternalBuild_CMake TOOLCHAIN_FILE=${TOOLCHAIN_FILE}")
+ list(APPEND ARG_EXTRA_OPTS -DCMAKE_TOOLCHAIN_FILE=${TOOLCHAIN_FILE})
+ endif(CMAKE_TOOLCHAIN_FILE)
+
file(MAKE_DIRECTORY ${ARG_BUILD_DIR})
file(MAKE_DIRECTORY ${ARG_INSTALL_DIR})
- file(WRITE "${BUILD_STAMP_PATH}" "${PKG_IDENTIFIER}")
-
execute_process(COMMAND ${CMAKE_COMMAND}
-G "${CMAKE_GENERATOR}"
-DCMAKE_INSTALL_PREFIX=${ARG_INSTALL_DIR}
diff --git a/infra/cmake/modules/ExternalSourceTools.cmake b/infra/cmake/modules/ExternalSourceTools.cmake
index 87cb15270..5671ae0c8 100644
--- a/infra/cmake/modules/ExternalSourceTools.cmake
+++ b/infra/cmake/modules/ExternalSourceTools.cmake
@@ -5,7 +5,7 @@ function(ExternalSource_Download PREFIX)
include(CMakeParseArguments)
nnas_include(StampTools)
- cmake_parse_arguments(ARG "" "DIRNAME;URL;CHECKSUM" "" ${ARGN})
+ cmake_parse_arguments(ARG "" "DIRNAME;URL;CHECKSUM;PATCH" "" ${ARGN})
# Configure URL
if(ARG_URL)
@@ -47,19 +47,39 @@ function(ExternalSource_Download PREFIX)
file(MAKE_DIRECTORY "${TMP_DIR}")
message(STATUS "Download ${PREFIX} from ${URL}")
- file(DOWNLOAD ${URL} "${DOWNLOAD_PATH}"
- STATUS status
- LOG log)
- list(GET status 0 status_code)
- list(GET status 1 status_string)
+ foreach(retry_count RANGE 5)
+ message(STATUS "(Trial Count : ${retry_count})")
- if(NOT status_code EQUAL 0)
- message(FATAL_ERROR "error: downloading '${URL}' failed
+ # For external mirror server
+ envoption(EXTERNAL_SERVER_USERPWD "")
+ file(DOWNLOAD ${URL} "${DOWNLOAD_PATH}"
+ STATUS status
+ USERPWD "${EXTERNAL_SERVER_USERPWD}"
+ LOG log)
+
+ list(GET status 0 status_code)
+ list(GET status 1 status_string)
+
+ # Download success
+ if(status_code EQUAL 0)
+ break()
+ endif()
+
+ message(WARNING "error: downloading '${URL}' failed
status_code: ${status_code}
status_string: ${status_string}
log: ${log}")
- endif()
+
+ # Retry limit exceed
+ if(retry_count EQUAL 5)
+ message(FATAL_ERROR "Download ${PREFIX} from ${URL} - failed")
+ endif()
+
+ # Retry after 10 seconds when download fails
+ execute_process(COMMAND sleep 10)
+ endforeach()
+
message(STATUS "Download ${PREFIX} from ${URL} - done")
# Verify checksum
@@ -86,7 +106,14 @@ function(ExternalSource_Download PREFIX)
message(STATUS "Extract ${PREFIX}")
execute_process(COMMAND ${CMAKE_COMMAND} -E tar xfz "${DOWNLOAD_PATH}"
- WORKING_DIRECTORY "${TMP_DIR}")
+ WORKING_DIRECTORY "${TMP_DIR}"
+ RESULT_VARIABLE EXTRACTION_RESULT
+ ERROR_VARIABLE EXTRACTION_ERROR)
+
+ if(EXTRACTION_RESULT AND NOT EXTRACTION_RESULT EQUAL 0)
+ message(FATAL_ERROR "Extract ${PREFIX} - failed: ${EXTRACTION_ERROR}")
+ endif()
+
file(REMOVE "${DOWNLOAD_PATH}")
message(STATUS "Extract ${PREFIX} - done")
@@ -100,6 +127,19 @@ function(ExternalSource_Download PREFIX)
get_filename_component(contents ${contents} ABSOLUTE)
file(RENAME ${contents} "${OUT_DIR}")
+ if(ARG_PATCH)
+ message(STATUS "Patch with ${ARG_PATCH}")
+ execute_process(COMMAND patch -p1 -i ${ARG_PATCH}
+ WORKING_DIRECTORY ${OUT_DIR}
+ RESULT_VARIABLE EXEC_RESULT
+ ERROR_VARIABLE EXEC_ERROR)
+ if(NOT EXEC_RESULT EQUAL 0)
+ message(FATAL_ERROR "${PREFIX} failed patch ${ARG_PATCH}")
+ endif(NOT EXEC_RESULT EQUAL 0)
+
+ message(STATUS "patch ${PATCH_FILE}: ${EXEC_RESULT}, ${EXEC_ERROR}")
+ endif(ARG_PATCH)
+
file(REMOVE_RECURSE "${TMP_DIR}")
file(WRITE "${STAMP_PATH}" "${URL}")
message(STATUS "Cleanup ${PREFIX} - done")
diff --git a/infra/cmake/modules/IdentifyPlatform.cmake b/infra/cmake/modules/IdentifyPlatform.cmake
index 69fe48cad..ebaaaced6 100644
--- a/infra/cmake/modules/IdentifyPlatform.cmake
+++ b/infra/cmake/modules/IdentifyPlatform.cmake
@@ -35,20 +35,40 @@ endif()
if("${HOST_ARCH}" STREQUAL "x86_64")
set(HOST_ARCH_BASE ${HOST_ARCH})
+elseif("${HOST_ARCH}" STREQUAL "armv7em")
+ set(HOST_ARCH_BASE "arm")
elseif("${HOST_ARCH}" STREQUAL "armv7l")
set(HOST_ARCH_BASE "arm")
+elseif("${HOST_ARCH}" STREQUAL "armv7hl")
+ set(HOST_ARCH_BASE "arm")
elseif("${HOST_ARCH}" STREQUAL "aarch64")
set(HOST_ARCH_BASE "aarch64")
+elseif("${HOST_ARCH}" STREQUAL "i686")
+ set(HOST_ARCH_BASE "i686")
+elseif("${HOST_ARCH}" STREQUAL "riscv64")
+ set(HOST_ARCH_BASE "riscv64")
else()
message(FATAL_ERROR "'${HOST_ARCH}' architecture is not supported")
endif()
if("${TARGET_ARCH}" STREQUAL "x86_64")
set(TARGET_ARCH_BASE ${TARGET_ARCH})
+elseif("${TARGET_ARCH}" STREQUAL "armv8-m")
+ set(TARGET_ARCH_BASE "arm")
+elseif("${TARGET_ARCH}" STREQUAL "armv7-r")
+ set(TARGET_ARCH_BASE "arm")
+elseif("${TARGET_ARCH}" STREQUAL "armv7em")
+ set(TARGET_ARCH_BASE "arm")
elseif("${TARGET_ARCH}" STREQUAL "armv7l")
set(TARGET_ARCH_BASE "arm")
+elseif("${TARGET_ARCH}" STREQUAL "armv7hl")
+ set(TARGET_ARCH_BASE "arm")
elseif("${TARGET_ARCH}" STREQUAL "aarch64")
set(TARGET_ARCH_BASE "aarch64")
+elseif("${TARGET_ARCH}" STREQUAL "i686")
+ set(TARGET_ARCH_BASE "i686")
+elseif("${TARGET_ARCH}" STREQUAL "riscv64")
+ set(TARGET_ARCH_BASE "riscv64")
else()
message(FATAL_ERROR "'${TARGET_ARCH}' architecture is not supported")
endif()
diff --git a/infra/cmake/packages/ARMComputeSourceConfig.cmake b/infra/cmake/packages/ARMComputeSourceConfig.cmake
index adec1f91b..16e12bbca 100644
--- a/infra/cmake/packages/ARMComputeSourceConfig.cmake
+++ b/infra/cmake/packages/ARMComputeSourceConfig.cmake
@@ -8,11 +8,11 @@ function(_ARMComputeSource_import)
nnas_include(OptionTools)
envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
- set(ARMCOMPUTE_URL ${EXTERNAL_DOWNLOAD_SERVER}/ARM-software/ComputeLibrary/archive/v20.05.tar.gz)
+ set(ARMCOMPUTE_URL ${EXTERNAL_DOWNLOAD_SERVER}/ARM-software/ComputeLibrary/archive/v21.02.tar.gz)
ExternalSource_Download(ARMCOMPUTE ${ARMCOMPUTE_URL})
set(ARMComputeSource_DIR ${ARMCOMPUTE_SOURCE_DIR} PARENT_SCOPE)
- set(ARMComputeSource_FOUND ${ARMCOMPUTE_SOURCE_GET} PARENT_SCOPE)
+ set(ARMComputeSource_FOUND TRUE PARENT_SCOPE)
endfunction(_ARMComputeSource_import)
_ARMComputeSource_import()
diff --git a/infra/cmake/packages/AbseilConfig.cmake b/infra/cmake/packages/AbseilConfig.cmake
index e16dd94d7..b3cb364e1 100644
--- a/infra/cmake/packages/AbseilConfig.cmake
+++ b/infra/cmake/packages/AbseilConfig.cmake
@@ -12,11 +12,18 @@ function(_Abseil_import)
# NOTE Turn off abseil testing
set(BUILD_TESTING OFF)
+ # Set -fPIC property because Abseil-cpp can be used for shared library
+ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+ # Abseil-cpp 20211102.0 show warning without below setting
+ set(ABSL_PROPAGATE_CXX_STD ON)
+
add_extdirectory("${AbseilSource_DIR}" ABSEIL)
add_library(abseil INTERFACE)
+
target_link_libraries(abseil INTERFACE
# From "Available Abseil CMake Public Targets" in CMake/README.md
+ # Add absl::status (It is not listed in CMake/README.md)
absl::algorithm
absl::base
absl::debugging
@@ -30,6 +37,7 @@ function(_Abseil_import)
absl::synchronization
absl::time
absl::utility
+ absl::status
)
endif(NOT TARGET abseil)
diff --git a/infra/cmake/packages/AbseilSourceConfig.cmake b/infra/cmake/packages/AbseilSourceConfig.cmake
index 8be732660..8d0c7798f 100644
--- a/infra/cmake/packages/AbseilSourceConfig.cmake
+++ b/infra/cmake/packages/AbseilSourceConfig.cmake
@@ -7,19 +7,13 @@ function(_AbseilSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- # NOTE TensorFlow 1.12 downloads abseil from the following URL
- # - https://github.com/abseil/abseil-cpp/archive/48cd2c3f351ff188bc85684b84a91b6e6d17d896.tar.gz
- #
- # The last change of "48cd2c3f351" was commited on 2018.09.27
- #
- # Let's use the latest released version (2020-02 release patch 2)
+ # NOTE GCC 13 requires abseil 20230125.3
envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
- envoption(ABSEIL_URL ${EXTERNAL_DOWNLOAD_SERVER}/abseil/abseil-cpp/archive/20200225.2.tar.gz)
-
+ envoption(ABSEIL_URL ${EXTERNAL_DOWNLOAD_SERVER}/abseil/abseil-cpp/archive/20230125.3.tar.gz)
ExternalSource_Download(ABSEIL
DIRNAME ABSEIL
URL ${ABSEIL_URL}
- CHECKSUM MD5=73f2b6e72f1599a9139170c29482ddc4)
+ CHECKSUM MD5=9b6dae642c4bd92f007ab2c148bc0498)
set(AbseilSource_DIR ${ABSEIL_SOURCE_DIR} PARENT_SCOPE)
set(AbseilSource_FOUND TRUE PARENT_SCOPE)
diff --git a/infra/cmake/packages/BoostConfig.cmake b/infra/cmake/packages/BoostConfig.cmake
index c4d7d5857..e72f742f3 100644
--- a/infra/cmake/packages/BoostConfig.cmake
+++ b/infra/cmake/packages/BoostConfig.cmake
@@ -25,6 +25,17 @@ function(_Boost_Build Boost_PREFIX)
list(APPEND Boost_Options --with-system)
list(APPEND Boost_Options --with-filesystem)
+ if(DEFINED EXTERNALS_BUILD_THREADS)
+ set(N ${EXTERNALS_BUILD_THREADS})
+ else(DEFINED EXTERNALS_BUILD_THREADS)
+ include(ProcessorCount)
+ ProcessorCount(N)
+ endif(DEFINED EXTERNALS_BUILD_THREADS)
+
+ if((NOT N EQUAL 0) AND BUILD_EXT_MULTITHREAD)
+ list(APPEND Boost_Options -j${N})
+ endif()
+
set(JAM_FILENAME ${BoostBuild_DIR}/user-config.jam)
if(ANDROID)
diff --git a/infra/cmake/packages/BoostSourceConfig.cmake b/infra/cmake/packages/BoostSourceConfig.cmake
index 52cda7c7d..2477a4857 100644
--- a/infra/cmake/packages/BoostSourceConfig.cmake
+++ b/infra/cmake/packages/BoostSourceConfig.cmake
@@ -13,7 +13,7 @@ function(_BoostSource_import)
ExternalSource_Download(BOOST ${BOOST_URL})
set(BoostSource_DIR ${BOOST_SOURCE_DIR} PARENT_SCOPE)
- set(BoostSource_FOUND ${BOOST_SOURCE_GET} PARENT_SCOPE)
+ set(BoostSource_FOUND TRUE PARENT_SCOPE)
endfunction(_BoostSource_import)
_BoostSource_import()
diff --git a/infra/cmake/packages/CMSIS-NN-4.0.0/CMSIS-NNConfig.cmake b/infra/cmake/packages/CMSIS-NN-4.0.0/CMSIS-NNConfig.cmake
new file mode 100644
index 000000000..4c82af2cb
--- /dev/null
+++ b/infra/cmake/packages/CMSIS-NN-4.0.0/CMSIS-NNConfig.cmake
@@ -0,0 +1,14 @@
+function(_CMSIS_NN_import)
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(CMSIS_NN_4_0_0_URL ${EXTERNAL_DOWNLOAD_SERVER}/ARM-software/CMSIS-NN/archive/refs/tags/v4.0.0.tar.gz)
+
+ ExternalSource_Download(CMSIS_NN DIRNAME CMSIS-NN-4.0.0 ${CMSIS_NN_4_0_0_URL})
+
+ set(CMSIS_NNSource_DIR ${CMSIS_NN_SOURCE_DIR} PARENT_SCOPE)
+ set(CMSIS_NNSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_CMSIS_NN_import)
+
+_CMSIS_NN_import()
diff --git a/infra/cmake/packages/TensorFlowProtoText-1.12/TensorFlowProtoTextConfigVersion.cmake b/infra/cmake/packages/CMSIS-NN-4.0.0/CMSIS-NNConfigVersion.cmake
index 4a57b655b..5fa88e6c5 100644
--- a/infra/cmake/packages/TensorFlowProtoText-1.12/TensorFlowProtoTextConfigVersion.cmake
+++ b/infra/cmake/packages/CMSIS-NN-4.0.0/CMSIS-NNConfigVersion.cmake
@@ -1,9 +1,10 @@
-set(PACKAGE_VERSION "1.12")
+set(PACKAGE_VERSION "4.0.0")
set(PACKAGE_VERSION_EXACT FALSE)
set(PACKAGE_VERSION_COMPATIBLE FALSE)
set(PACKAGE_VERSION_UNSUITABLE TRUE)
if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
- set(PACKAGE_VERSION_EXACT TRUE)
- set(PACKAGE_VERSION_UNSUITABLE FALSE)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/CMSIS-NN-4.1.0/CMSIS-NNConfig.cmake b/infra/cmake/packages/CMSIS-NN-4.1.0/CMSIS-NNConfig.cmake
new file mode 100644
index 000000000..06106dc60
--- /dev/null
+++ b/infra/cmake/packages/CMSIS-NN-4.1.0/CMSIS-NNConfig.cmake
@@ -0,0 +1,14 @@
+function(_CMSIS_NN_import)
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(CMSIS_NN_4_1_0_URL ${EXTERNAL_DOWNLOAD_SERVER}/ARM-software/CMSIS-NN/archive/refs/tags/v4.1.0.tar.gz)
+
+ ExternalSource_Download(CMSIS_NN DIRNAME CMSIS-NN-4.1.0 ${CMSIS_NN_4_1_0_URL})
+
+ set(CMSIS_NNSource_DIR ${CMSIS_NN_SOURCE_DIR} PARENT_SCOPE)
+ set(CMSIS_NNSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_CMSIS_NN_import)
+
+_CMSIS_NN_import()
diff --git a/infra/cmake/packages/CMSIS-NN-4.1.0/CMSIS-NNConfigVersion.cmake b/infra/cmake/packages/CMSIS-NN-4.1.0/CMSIS-NNConfigVersion.cmake
new file mode 100644
index 000000000..5296e197d
--- /dev/null
+++ b/infra/cmake/packages/CMSIS-NN-4.1.0/CMSIS-NNConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "4.1.0")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/CMSISSource-5.8.0/CMSISSourceConfig.cmake b/infra/cmake/packages/CMSISSource-5.8.0/CMSISSourceConfig.cmake
new file mode 100644
index 000000000..d1588d3fd
--- /dev/null
+++ b/infra/cmake/packages/CMSISSource-5.8.0/CMSISSourceConfig.cmake
@@ -0,0 +1,16 @@
+function(_CMSISSource_import)
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(CMSIS_5_8_0_URL ${EXTERNAL_DOWNLOAD_SERVER}/ARM-software/CMSIS_5/archive/refs/tags/5.8.0.tar.gz)
+ set(CMSIS_5_8_0_SHA256 fe6b697b8782e7fd6131034b7646a3b65c83018774abf7f9f94901a3bc7c82ad)
+
+ ExternalSource_Download(CMSIS DIRNAME CMSIS-5.8.0 ${CMSIS_5_8_0_URL}
+ CHECKSUM "SHA256=${CMSIS_5_8_0_SHA256}")
+
+ set(CMSISSource_DIR ${CMSIS_SOURCE_DIR} PARENT_SCOPE)
+ set(CMSISSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_CMSISSource_import)
+
+_CMSISSource_import()
diff --git a/infra/cmake/packages/CMSISSource-5.8.0/CMSISSourceConfigVersion.cmake b/infra/cmake/packages/CMSISSource-5.8.0/CMSISSourceConfigVersion.cmake
new file mode 100644
index 000000000..ca6f7826d
--- /dev/null
+++ b/infra/cmake/packages/CMSISSource-5.8.0/CMSISSourceConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "5.8.0")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/CaffeSourceConfig.cmake b/infra/cmake/packages/CaffeSourceConfig.cmake
index 41cc2c9f7..05eb5b30e 100644
--- a/infra/cmake/packages/CaffeSourceConfig.cmake
+++ b/infra/cmake/packages/CaffeSourceConfig.cmake
@@ -7,7 +7,8 @@ function(_CaffeSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(CAFFE_URL https://github.com/BVLC/caffe/archive/1.0.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(CAFFE_URL ${EXTERNAL_DOWNLOAD_SERVER}/BVLC/caffe/archive/1.0.tar.gz)
ExternalSource_Download(CAFFE ${CAFFE_URL})
diff --git a/infra/cmake/packages/CpuInfoSourceConfig.cmake b/infra/cmake/packages/CpuInfoSourceConfig.cmake
new file mode 100644
index 000000000..b93a6a2e5
--- /dev/null
+++ b/infra/cmake/packages/CpuInfoSourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_CpuInfoSource_import)
+ if(NOT ${DOWNLOAD_CPUINFO})
+ set(CpuInfoSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT ${DOWNLOAD_CPUINFO})
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ # CPUINFO commit from tflite v2.8
+ envoption(CPUINFO_URL ${EXTERNAL_DOWNLOAD_SERVER}/pytorch/cpuinfo/archive/5916273f79a21551890fd3d56fc5375a78d1598d.tar.gz)
+ ExternalSource_Download(CPUINFO
+ DIRNAME CPUINFO
+ URL ${CPUINFO_URL})
+
+ set(CpuInfoSource_DIR ${CPUINFO_SOURCE_DIR} PARENT_SCOPE)
+ set(CpuInfoSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_CpuInfoSource_import)
+
+_CpuInfoSource_import()
diff --git a/infra/cmake/packages/Egl_HeadersSourceConfig.cmake b/infra/cmake/packages/Egl_HeadersSourceConfig.cmake
new file mode 100644
index 000000000..fae57f6ce
--- /dev/null
+++ b/infra/cmake/packages/Egl_HeadersSourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_Egl_HeadersSource_import)
+ if(NOT DOWNLOAD_EGL_HEADERS)
+ set(Egl_HeadersSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_EGL_HEADERS)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(EGL_HEADERS_URL ${EXTERNAL_DOWNLOAD_SERVER}/KhronosGroup/EGL-Registry/archive/649981109e263b737e7735933c90626c29a306f2.zip)
+
+ ExternalSource_Download(EGL_HEADERS
+ DIRNAME EGL_HEADERS
+ URL ${EGL_HEADERS_URL})
+
+ set(Egl_HeadersSource_DIR ${EGL_HEADERS_SOURCE_DIR} PARENT_SCOPE)
+ set(Egl_HeadersSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_Egl_HeadersSource_import)
+
+_Egl_HeadersSource_import()
diff --git a/infra/cmake/packages/FarmhashSourceConfig.cmake b/infra/cmake/packages/FarmhashSourceConfig.cmake
index a19c8b992..fa1867c5c 100644
--- a/infra/cmake/packages/FarmhashSourceConfig.cmake
+++ b/infra/cmake/packages/FarmhashSourceConfig.cmake
@@ -10,7 +10,8 @@ function(_FarmhashSource_import)
# NOTE TensorFlow 1.12 downloads farmhash from the following URL
# TensorFlow 1.13.1 downloads farmhash from the following URL
# TensorFlow 2.3.0 downloads farmhash from the following URL
- envoption(FARMHASH_1_12_URL https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(FARMHASH_1_12_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz)
ExternalSource_Download(FARMHASH ${FARMHASH_1_12_URL})
diff --git a/infra/cmake/packages/FlatBuffersConfig.cmake b/infra/cmake/packages/FlatBuffers-2.0/FlatBuffersConfig.cmake
index da084e7d3..99da30803 100644
--- a/infra/cmake/packages/FlatBuffersConfig.cmake
+++ b/infra/cmake/packages/FlatBuffers-2.0/FlatBuffersConfig.cmake
@@ -1,17 +1,20 @@
+# TODO Remove other Flatbuffers versions
function(_FlatBuffers_import)
- find_package(Flatbuffers QUIET)
+ find_package(Flatbuffers 2.0 QUIET)
set(FlatBuffers_FOUND ${Flatbuffers_FOUND} PARENT_SCOPE)
endfunction(_FlatBuffers_import)
function(_FlatBuffers_build)
if(NOT BUILD_FLATBUFFERS)
+ message(STATUS "FlatBuffersConfig !BUILD_FLATBUFFERS")
return()
endif(NOT BUILD_FLATBUFFERS)
- nnas_find_package(FlatBuffersSource EXACT 1.10 QUIET)
+ nnas_find_package(FlatBuffersSource EXACT 2.0 QUIET)
if(NOT FlatBuffersSource_FOUND)
# Source is not available
+ message(STATUS "FlatBuffersConfig !FlatBuffersSource_FOUND")
return()
endif(NOT FlatBuffersSource_FOUND)
@@ -19,27 +22,43 @@ function(_FlatBuffers_build)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 8.0)
set(ADDITIONAL_CXX_FLAGS "-Wno-error=class-memaccess")
endif()
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0)
+ set(ADDITIONAL_CXX_FLAGS "-Wno-error=stringop-overflow")
+ endif()
nnas_include(ExternalBuildTools)
ExternalBuild_CMake(CMAKE_DIR ${FlatBuffersSource_DIR}
- BUILD_DIR ${CMAKE_BINARY_DIR}/externals/FLATBUFFERS/build
+ BUILD_DIR ${CMAKE_BINARY_DIR}/externals/FLATBUFFERS-2.0/build
INSTALL_DIR ${EXT_OVERLAY_DIR}
BUILD_FLAGS ${ADDITIONAL_CXX_FLAGS}
- IDENTIFIER "1.10-fix2"
- EXTRA_OPTS "-DFLATBUFFERS_BUILD_TESTS:BOOL=OFF"
- PKG_NAME "FLATBUFFERS")
+ IDENTIFIER "2.0"
+ EXTRA_OPTS "-DFLATBUFFERS_BUILD_TESTS:BOOL=OFF"
+ "-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON"
+ PKG_NAME "FLATBUFFERS-2.0")
endfunction(_FlatBuffers_build)
_FlatBuffers_build()
_FlatBuffers_import()
+# for cross compilation BUILD_HOST_EXEC should be set for host flatc executable
+# flatc should exist as ${BUILD_HOST_EXEC}/overlay/bin/flatc.
+# and then if EXTERNAL_FLATC is set then use ${EXTERNAL_FLATC} file.
+set(FLATC_PATH "$<TARGET_FILE:flatbuffers::flatc>")
+
+if(DEFINED ENV{BUILD_HOST_EXEC})
+ set(FLATC_PATH $ENV{BUILD_HOST_EXEC}/overlay/bin/flatc)
+endif(DEFINED ENV{BUILD_HOST_EXEC})
+if(DEFINED ENV{EXTERNAL_FLATC})
+ set(FLATC_PATH $ENV{EXTERNAL_FLATC})
+endif(DEFINED ENV{EXTERNAL_FLATC})
+
if(FlatBuffers_FOUND)
- if(NOT TARGET flatbuffers)
- add_library(flatbuffers INTERFACE)
- target_link_libraries(flatbuffers INTERFACE flatbuffers::flatbuffers)
- message(STATUS "Found FlatBuffers: TRUE")
- endif(NOT TARGET flatbuffers)
+ if(NOT TARGET flatbuffers-2.0)
+ add_library(flatbuffers-2.0 INTERFACE)
+ target_link_libraries(flatbuffers-2.0 INTERFACE flatbuffers::flatbuffers)
+ message(STATUS "Found flatbuffers-2.0: TRUE")
+ endif(NOT TARGET flatbuffers-2.0)
function(FlatBuffers_Generate PREFIX OUTPUT_DIR SCHEMA_DIR)
get_filename_component(abs_output_dir ${OUTPUT_DIR} ABSOLUTE)
@@ -57,7 +76,7 @@ if(FlatBuffers_FOUND)
add_custom_command(OUTPUT ${OUTPUT_FILES}
COMMAND ${CMAKE_COMMAND} -E make_directory "${abs_output_dir}"
- COMMAND "$<TARGET_FILE:flatbuffers::flatc>" -c --no-includes
+ COMMAND "${FLATC_PATH}" -c --no-includes
--no-union-value-namespacing
--gen-object-api -o "${abs_output_dir}"
${SCHEMA_FILES}
@@ -99,7 +118,7 @@ if(FlatBuffers_FOUND)
# Generate headers
add_custom_command(OUTPUT ${OUTPUT_FILES}
COMMAND ${CMAKE_COMMAND} -E make_directory "${abs_output_dir}"
- COMMAND "$<TARGET_FILE:flatbuffers::flatc>" -c --no-includes
+ COMMAND "${FLATC_PATH}" -c --no-includes
--no-union-value-namespacing
--gen-object-api -o "${abs_output_dir}"
${SCHEMA_FILES}
@@ -111,6 +130,6 @@ if(FlatBuffers_FOUND)
add_library(${TGT} STATIC ${OUTPUT_FILES})
set_target_properties(${TGT} PROPERTIES LINKER_LANGUAGE CXX)
target_include_directories(${TGT} PUBLIC "${ARG_INCLUDE_DIR}")
- target_link_libraries(${TGT} PUBLIC flatbuffers)
+ target_link_libraries(${TGT} PUBLIC flatbuffers-2.0)
endfunction(FlatBuffers_Target)
endif(FlatBuffers_FOUND)
diff --git a/infra/cmake/packages/TensorFlowSource-1.12/TensorFlowSourceConfigVersion.cmake b/infra/cmake/packages/FlatBuffers-2.0/FlatBuffersConfigVersion.cmake
index 8cfdbf8e5..e4a87a7d5 100644
--- a/infra/cmake/packages/TensorFlowSource-1.12/TensorFlowSourceConfigVersion.cmake
+++ b/infra/cmake/packages/FlatBuffers-2.0/FlatBuffersConfigVersion.cmake
@@ -1,4 +1,4 @@
-set(PACKAGE_VERSION "1.12")
+set(PACKAGE_VERSION "2.0")
set(PACKAGE_VERSION_EXACT FALSE)
set(PACKAGE_VERSION_COMPATIBLE FALSE)
set(PACKAGE_VERSION_UNSUITABLE TRUE)
diff --git a/infra/cmake/packages/FlatBuffersSource-1.11/FlatBuffersSourceConfig.cmake b/infra/cmake/packages/FlatBuffersSource-1.11/FlatBuffersSourceConfig.cmake
deleted file mode 100644
index 92efbf97e..000000000
--- a/infra/cmake/packages/FlatBuffersSource-1.11/FlatBuffersSourceConfig.cmake
+++ /dev/null
@@ -1,21 +0,0 @@
-function(_FlatBuffersSource_import)
- if(NOT DOWNLOAD_FLATBUFFERS)
- set(FlatBuffersSource_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT DOWNLOAD_FLATBUFFERS)
-
- nnas_include(ExternalSourceTools)
- nnas_include(OptionTools)
-
- envoption(FLATBUFFERS_1_11_URL https://github.com/google/flatbuffers/archive/v1.11.0.tar.gz)
- ExternalSource_Download(FLATBUFFERS
- DIRNAME FLATBUFFERS-1.11
- CHECKSUM MD5=02c64880acb89dbd57eebacfd67200d8
- URL ${FLATBUFFERS_1_11_URL}
- )
-
- set(FlatBuffersSource_DIR ${FLATBUFFERS_SOURCE_DIR} PARENT_SCOPE)
- set(FlatBuffersSource_FOUND TRUE PARENT_SCOPE)
-endfunction(_FlatBuffersSource_import)
-
-_FlatBuffersSource_import()
diff --git a/infra/cmake/packages/FlatBuffersSource-1.10/FlatBuffersSourceConfig.cmake b/infra/cmake/packages/FlatBuffersSource-2.0/FlatBuffersSourceConfig.cmake
index 09a922b67..e094055b7 100644
--- a/infra/cmake/packages/FlatBuffersSource-1.10/FlatBuffersSourceConfig.cmake
+++ b/infra/cmake/packages/FlatBuffersSource-2.0/FlatBuffersSourceConfig.cmake
@@ -7,11 +7,12 @@ function(_FlatBuffersSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(FLATBUFFERS_1_10_URL https://github.com/google/flatbuffers/archive/v1.10.0.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(FLATBUFFERS_2_0_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/flatbuffers/archive/v2.0.0.tar.gz)
ExternalSource_Download(FLATBUFFERS
- DIRNAME FLATBUFFERS-1.10
- CHECKSUM MD5=f7d19a3f021d93422b0bc287d7148cd2
- URL ${FLATBUFFERS_1_10_URL}
+ DIRNAME FLATBUFFERS-2.0
+ CHECKSUM MD5=a27992324c3cbf86dd888268a23d17bd
+ URL ${FLATBUFFERS_2_0_URL}
)
set(FlatBuffersSource_DIR ${FLATBUFFERS_SOURCE_DIR} PARENT_SCOPE)
diff --git a/infra/cmake/packages/FlatBuffersSource-1.11/FlatBuffersSourceConfigVersion.cmake b/infra/cmake/packages/FlatBuffersSource-2.0/FlatBuffersSourceConfigVersion.cmake
index f008e0528..e4a87a7d5 100644
--- a/infra/cmake/packages/FlatBuffersSource-1.11/FlatBuffersSourceConfigVersion.cmake
+++ b/infra/cmake/packages/FlatBuffersSource-2.0/FlatBuffersSourceConfigVersion.cmake
@@ -1,4 +1,4 @@
-set(PACKAGE_VERSION "1.11")
+set(PACKAGE_VERSION "2.0")
set(PACKAGE_VERSION_EXACT FALSE)
set(PACKAGE_VERSION_COMPATIBLE FALSE)
set(PACKAGE_VERSION_UNSUITABLE TRUE)
diff --git a/infra/cmake/packages/FlatBuffersSourceConfig.cmake b/infra/cmake/packages/FlatBuffersSourceConfig.cmake
deleted file mode 100644
index 52bce6de0..000000000
--- a/infra/cmake/packages/FlatBuffersSourceConfig.cmake
+++ /dev/null
@@ -1,28 +0,0 @@
-function(_FlatBuffersSource_import)
- if(NOT DOWNLOAD_FLATBUFFERS)
- set(FlatBuffersSource_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT DOWNLOAD_FLATBUFFERS)
-
- nnas_include(ExternalSourceTools)
- nnas_include(OptionTools)
-
- # Each TensorFlow needs a specific version of Flatbuffers
- # - TensorFlow 1.7 downloads it from https://github.com/google/flatbuffers/archive/971a68110e4.tar.gz
- # - TensorFlow 1.12 downloads it from https://github.com/google/flatbuffers/archive/1f5eae5d6a1.tar.gz
- #
- # Let's use 1.10 released in 2018.10 (compatible with 1f5eae5d6a1).
- #
- # TODO Manage multiple versions
- envoption(FLATBUFFERS_URL https://github.com/google/flatbuffers/archive/v1.10.0.tar.gz)
- ExternalSource_Download(FLATBUFFERS
- DIRNAME FLATBUFFERS
- CHECKSUM MD5=f7d19a3f021d93422b0bc287d7148cd2
- URL ${FLATBUFFERS_URL}
- )
-
- set(FlatBuffersSource_DIR ${FLATBUFFERS_SOURCE_DIR} PARENT_SCOPE)
- set(FlatBuffersSource_FOUND TRUE PARENT_SCOPE)
-endfunction(_FlatBuffersSource_import)
-
-_FlatBuffersSource_import()
diff --git a/infra/cmake/packages/FlatBuffersSourceConfigVersion.cmake b/infra/cmake/packages/FlatBuffersSourceConfigVersion.cmake
deleted file mode 100644
index ac9e22e51..000000000
--- a/infra/cmake/packages/FlatBuffersSourceConfigVersion.cmake
+++ /dev/null
@@ -1,9 +0,0 @@
-set(PACKAGE_VERSION_EXACT FALSE)
-set(PACKAGE_VERSION_COMPATIBLE FALSE)
-set(PACKAGE_VERSION_UNSUITABLE TRUE)
-
-if(NOT PACKAGE_FIND_VERSION)
- # This package works only when find_package(...) call has no EXACT option
- set(PACKAGE_VERSION_COMPATIBLE TRUE)
- set(PACKAGE_VERSION_UNSUITABLE FALSE)
-endif(NOT PACKAGE_FIND_VERSION)
diff --git a/infra/cmake/packages/Fp16SourceConfig.cmake b/infra/cmake/packages/Fp16SourceConfig.cmake
new file mode 100644
index 000000000..3df4e4cc5
--- /dev/null
+++ b/infra/cmake/packages/Fp16SourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_Fp16Source_import)
+ if(NOT ${DOWNLOAD_FP16})
+ set(Fp16Source_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT ${DOWNLOAD_FP16})
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ # fp16 commit in xnnpack 8b283aa30a31
+ envoption(FP16_URL ${EXTERNAL_DOWNLOAD_SERVER}/Maratyszcza/FP16/archive/4dfe081cf6bcd15db339cf2680b9281b8451eeb3.tar.gz)
+ ExternalSource_Download(FP16
+ DIRNAME FP16
+ URL ${FP16_URL})
+
+ set(Fp16Source_DIR ${FP16_SOURCE_DIR} PARENT_SCOPE)
+ set(Fp16Source_FOUND TRUE PARENT_SCOPE)
+endfunction(_Fp16Source_import)
+
+_Fp16Source_import()
diff --git a/infra/cmake/packages/FxdivSourceConfig.cmake b/infra/cmake/packages/FxdivSourceConfig.cmake
new file mode 100644
index 000000000..4427bf292
--- /dev/null
+++ b/infra/cmake/packages/FxdivSourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_FxdivSource_import)
+ if(NOT ${DOWNLOAD_FXDIV})
+ set(FxdivSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT ${DOWNLOAD_FXDIV})
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ # fxdiv commit in xnnpack 8b283aa30a31
+ envoption(FXDIV_URL ${EXTERNAL_DOWNLOAD_SERVER}/Maratyszcza/FXdiv/archive/f8c5354679ec2597792bc70a9e06eff50c508b9a.tar.gz)
+ ExternalSource_Download(FXDIV
+ DIRNAME FXDIV
+ URL ${FXDIV_URL})
+
+ set(FxdivSource_DIR ${FXDIV_SOURCE_DIR} PARENT_SCOPE)
+ set(FxdivSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_FxdivSource_import)
+
+_FxdivSource_import()
diff --git a/infra/cmake/packages/GEMMLowpSourceConfig.cmake b/infra/cmake/packages/GEMMLowpSourceConfig.cmake
index 6e1cfa9c9..3b3560359 100644
--- a/infra/cmake/packages/GEMMLowpSourceConfig.cmake
+++ b/infra/cmake/packages/GEMMLowpSourceConfig.cmake
@@ -9,7 +9,8 @@ function(_GEMMLowpSource_import)
# NOTE TensorFlow 1.12 uses the following URL
# TensorFlow 1.13.1 uses the following URL
- envoption(GEMMLOWP_URL https://github.com/google/gemmlowp/archive/38ebac7b059e84692f53e5938f97a9943c120d98.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(GEMMLOWP_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/gemmlowp/archive/38ebac7b059e84692f53e5938f97a9943c120d98.tar.gz)
ExternalSource_Download(GEMMLOWP ${GEMMLOWP_URL})
diff --git a/infra/cmake/packages/GFlagsSourceConfig.cmake b/infra/cmake/packages/GFlagsSourceConfig.cmake
index 3e70d89fc..2f9b7537f 100644
--- a/infra/cmake/packages/GFlagsSourceConfig.cmake
+++ b/infra/cmake/packages/GFlagsSourceConfig.cmake
@@ -7,7 +7,8 @@ function(_GFlagsSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(GFLAGS_URL https://github.com/gflags/gflags/archive/v2.2.1.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(GFLAGS_URL ${EXTERNAL_DOWNLOAD_SERVER}/gflags/gflags/archive/v2.2.1.tar.gz)
ExternalSource_Download(GFLAGS ${GFLAGS_URL})
diff --git a/infra/cmake/packages/GTestConfig.cmake b/infra/cmake/packages/GTestConfig.cmake
index 62a15e0cc..c844f4c63 100644
--- a/infra/cmake/packages/GTestConfig.cmake
+++ b/infra/cmake/packages/GTestConfig.cmake
@@ -6,6 +6,7 @@ function(_GTest_build)
nnas_find_package(GTestSource QUIET)
if(NOT GTestSource_FOUND)
+ message(STATUS "GTest_build skip: NOT GTestSource_FOUND")
return()
endif(NOT GTestSource_FOUND)
@@ -13,9 +14,14 @@ function(_GTest_build)
ExternalBuild_CMake(CMAKE_DIR ${GTestSource_DIR}
BUILD_DIR ${CMAKE_BINARY_DIR}/externals/GTEST/build
INSTALL_DIR ${EXT_OVERLAY_DIR}
- IDENTIFIER "1.8.0-fix1"
+ IDENTIFIER "1.11.0"
PKG_NAME "GTEST")
+ set(GTEST_FOUND TRUE PARENT_SCOPE)
+ set(GTEST_INCLUDE_DIRS ${EXT_OVERLAY_DIR}/include PARENT_SCOPE)
+ set(GTEST_LIBRARIES ${EXT_OVERLAY_DIR}/lib/libgtest.a PARENT_SCOPE)
+ set(GTEST_MAIN_LIBRARIES ${EXT_OVERLAY_DIR}/lib/libgtest_main.a PARENT_SCOPE)
+
endfunction(_GTest_build)
_GTest_build()
@@ -24,7 +30,12 @@ _GTest_build()
# Note: cmake supports GTest and does not find GTestConfig.cmake or GTest-config.cmake.
# Refer to "https://cmake.org/cmake/help/v3.5/module/FindGTest.html"
# find_package(GTest) creates options like GTEST_FOUND, not GTest_FOUND.
-find_package(GTest)
+if(NOT GTEST_FOUND)
+ message(STATUS "GTEST_FOUND false: call find_package(GTest)")
+ # Reset package config directory cache to prevent recursive find
+ unset(GTest_DIR CACHE)
+ find_package(GTest)
+endif(NOT GTEST_FOUND)
find_package(Threads)
if(${GTEST_FOUND} AND TARGET Threads::Threads)
diff --git a/infra/cmake/packages/GTestSourceConfig.cmake b/infra/cmake/packages/GTestSourceConfig.cmake
index 8b7495fbc..643c3d109 100644
--- a/infra/cmake/packages/GTestSourceConfig.cmake
+++ b/infra/cmake/packages/GTestSourceConfig.cmake
@@ -7,7 +7,8 @@ function(_GTestSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(GTEST_URL https://github.com/google/googletest/archive/release-1.8.0.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(GTEST_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/googletest/archive/release-1.11.0.tar.gz)
ExternalSource_Download(GTEST ${GTEST_URL})
diff --git a/infra/cmake/packages/GoogleDoubleConversionConfig.cmake b/infra/cmake/packages/GoogleDoubleConversionConfig.cmake
deleted file mode 100644
index 3fdc86102..000000000
--- a/infra/cmake/packages/GoogleDoubleConversionConfig.cmake
+++ /dev/null
@@ -1,52 +0,0 @@
-# https://github.com/google/double-conversion
-set(GOOGLE_DOUBLE_CONVERSION_PREFIX "/usr" CACHE PATH "Google DoubleConversion install prefix")
-
-function(_GoogleDoubleConversion_import)
- # Find the header & lib
- find_library(GoogleDoubleConversion_LIB
- NAMES double-conversion
- PATHS "${GOOGLE_DOUBLE_CONVERSION_PREFIX}/lib"
- )
-
- find_path(GoogleDoubleConversion_INCLUDE_DIR
- NAMES double-conversion/double-conversion.h
- PATHS "${GOOGLE_DOUBLE_CONVERSION_PREFIX}/include"
- )
-
- # TODO Version check
- set(GoogleDoubleConversion_FOUND TRUE)
-
- if(NOT GoogleDoubleConversion_LIB)
- set(GoogleDoubleConversion_FOUND FALSE)
- endif(NOT GoogleDoubleConversion_LIB)
-
- if(NOT GoogleDoubleConversion_INCLUDE_DIR)
- set(GoogleDoubleConversion_FOUND FALSE)
- endif(NOT GoogleDoubleConversion_INCLUDE_DIR)
-
- set(GoogleDoubleConversion_FOUND ${GoogleDoubleConversion_FOUND} PARENT_SCOPE)
-
- unset(MESSAGE)
- list(APPEND MESSAGE "Found Google Double Conversion")
-
- if(NOT GoogleDoubleConversion_FOUND)
- list(APPEND MESSAGE ": FALSE")
- else(NOT GoogleDoubleConversion_FOUND)
- list(APPEND MESSAGE " (include: ${GoogleDoubleConversion_INCLUDE_DIR} library: ${GoogleDoubleConversion_LIB})")
-
- # Add target
- if(NOT TARGET google_double_conversion)
- # NOTE IMPORTED target may be more appropriate for this case
- add_library(google_double_conversion INTERFACE)
- target_link_libraries(google_double_conversion INTERFACE ${GoogleDoubleConversion_LIB})
- target_include_directories(google_double_conversion INTERFACE ${GoogleDoubleConversion_INCLUDE_DIR})
-
- add_library(Google::DoubleConversion ALIAS google_double_conversion)
- endif(NOT TARGET google_double_conversion)
- endif(NOT GoogleDoubleConversion_FOUND)
-
- message(STATUS ${MESSAGE})
- set(GoogleDoubleConversion_FOUND ${GoogleDoubleConversion_FOUND} PARENT_SCOPE)
-endfunction(_GoogleDoubleConversion_import)
-
-_GoogleDoubleConversion_import()
diff --git a/infra/cmake/packages/GoogleNSyncConfig.cmake b/infra/cmake/packages/GoogleNSyncConfig.cmake
deleted file mode 100644
index 1fdf8cc20..000000000
--- a/infra/cmake/packages/GoogleNSyncConfig.cmake
+++ /dev/null
@@ -1,62 +0,0 @@
-# https://github.com/google/nsync
-set(GOOGLE_NSYNC_PREFIX "/usr" CACHE PATH "Where to find Google NSync library")
-
-function(_GoogleNSync_import)
- # Find the header & lib
- find_library(GoogleNSync_C_LIB
- NAMES nsync
- PATHS "${GOOGLE_NSYNC_PREFIX}/lib"
- )
-
- find_library(GoogleNSync_CPP_LIB
- NAMES nsync_cpp
- PATHS "${GOOGLE_NSYNC_PREFIX}/lib"
- )
-
- find_path(GoogleNSync_INCLUDE_DIR
- NAMES nsync.h
- PATHS "${GOOGLE_NSYNC_PREFIX}/include"
- )
-
- message(STATUS "GoogleNSync_C_LIB: ${GoogleNSync_C_LIB}")
- message(STATUS "GoogleNSync_CPP_LIB: ${GoogleNSync_CPP_LIB}")
- message(STATUS "GoogleNSync_INCLUDE_DIR: ${GoogleNSync_INCLUDE_DIR}")
-
- set(GoogleNSync_FOUND TRUE)
-
- if(NOT GoogleNSync_C_LIB)
- set(GoogleNSync_FOUND FALSE)
- endif(NOT GoogleNSync_C_LIB)
-
- if(NOT GoogleNSync_CPP_LIB)
- set(GoogleNSync_FOUND FALSE)
- endif(NOT GoogleNSync_CPP_LIB)
-
- if(NOT GoogleNSync_INCLUDE_DIR)
- set(GoogleNSync_FOUND FALSE)
- endif(NOT GoogleNSync_INCLUDE_DIR)
-
- unset(MESSAGE)
- list(APPEND MESSAGE "Found Google NSync")
-
- if(NOT GoogleNSync_FOUND)
- list(APPEND MESSAGE ": FALSE")
- else(NOT GoogleNSync_FOUND)
- list(APPEND MESSAGE " (include: ${GoogleNSync_INCLUDE_DIR} library: ${GoogleNSync_C_LIB} ${GoogleNSync_CPP_LIB})")
-
- # Add target
- if(NOT TARGET google_nsync)
- # NOTE IMPORTED target may be more appropriate for this case
- add_library(google_nsync INTERFACE)
- target_link_libraries(google_nsync INTERFACE ${GoogleNSync_C_LIB} ${GoogleNSync_CPP_LIB})
- target_include_directories(google_nsync INTERFACE ${GoogleNSync_INCLUDE_DIR})
-
- add_library(Google::NSync ALIAS google_nsync)
- endif(NOT TARGET google_nsync)
- endif(NOT GoogleNSync_FOUND)
-
- message(STATUS ${MESSAGE})
- set(GoogleNSync_FOUND ${GoogleNSync_FOUND} PARENT_SCOPE)
-endfunction(_GoogleNSync_import)
-
-_GoogleNSync_import()
diff --git a/infra/cmake/packages/H5Tinit.c.linux-armv7l b/infra/cmake/packages/H5Tinit.c.linux-armv7l
new file mode 100644
index 000000000..b0f6a470d
--- /dev/null
+++ b/infra/cmake/packages/H5Tinit.c.linux-armv7l
@@ -0,0 +1,977 @@
+/* Generated automatically by H5detect -- do not edit */
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ *
+ * Created: Mar 31, 2022
+ * Ubuntu <ubuntu@rpi4>
+ *
+ * Purpose: This machine-generated source code contains
+ * information about the various integer and
+ * floating point numeric formats found on this
+ * architecture. The parameters below should be
+ * checked carefully and errors reported to the
+ * HDF5 maintainer.
+ *
+ * Each of the numeric formats listed below are
+ * printed from most significant bit to least
+ * significant bit even though the actual bytes
+ * might be stored in a different order in
+ * memory. The integers above each binary byte
+ * indicate the relative order of the bytes in
+ * memory; little-endian machines have
+ * decreasing numbers while big-endian machines
+ * have increasing numbers.
+ *
+ * The fields of the numbers are printed as
+ * letters with `S' for the mantissa sign bit,
+ * `M' for the mantissa magnitude, and `E' for
+ * the exponent. The exponent has an associated
+ * bias which can be subtracted to find the
+ * true exponent. The radix point is assumed
+ * to be before the first `M' bit. Any bit
+ * of a floating-point value not falling into one
+ * of these categories is printed as a question
+ * mark. Bits of integer types are printed as
+ * `I' for 2's complement and `U' for magnitude.
+ *
+ * If the most significant bit of the normalized
+ * mantissa (always a `1' except for `0.0') is
+ * not stored then an `implicit=yes' appears
+ * under the field description. In thie case,
+ * the radix point is still assumed to be
+ * before the first `M' but after the implicit
+ * bit.
+ *
+ * Modifications:
+ *
+ * DO NOT MAKE MODIFICATIONS TO THIS FILE!
+ * It was generated by code in `H5detect.c'.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#define H5T_PACKAGE /*suppress error about including H5Tpkg.h*/
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5Iprivate.h" /* IDs */
+#include "H5Tpkg.h" /* Datatypes */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Package Typedefs */
+/********************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+
+/********************/
+/* Public Variables */
+/********************/
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5TN_init_interface
+ *
+ * Purpose: Initialize pre-defined native datatypes from code generated
+ * during the library configuration by H5detect.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Robb Matzke
+ * Wednesday, December 16, 1998
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5TN_init_interface(void)
+{
+ H5T_t *dt = NULL;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /*
+ * 0
+ * IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 1;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 8;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_SCHAR_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_SCHAR_ALIGN_g = 1;
+ H5T_NATIVE_SCHAR_COMP_ALIGN_g = 1;
+
+ /*
+ * 0
+ * UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 1;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 8;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UCHAR_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UCHAR_ALIGN_g = 1;
+
+ /*
+ * 1 0
+ * IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 2;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 16;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_SHORT_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_SHORT_ALIGN_g = 1;
+ H5T_NATIVE_SHORT_COMP_ALIGN_g = 2;
+
+ /*
+ * 1 0
+ * UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 2;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 16;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_USHORT_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_USHORT_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT_ALIGN_g = 1;
+ H5T_NATIVE_INT_COMP_ALIGN_g = 4;
+
+ /*
+ * 3 2 1 0
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_LONG_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_LONG_ALIGN_g = 1;
+ H5T_NATIVE_LONG_COMP_ALIGN_g = 4;
+
+ /*
+ * 3 2 1 0
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_ULONG_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_ULONG_ALIGN_g = 1;
+
+ /*
+ * 0
+ * IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 1;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 8;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT8_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT8_ALIGN_g = 1;
+
+ /*
+ * 0
+ * UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 1;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 8;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT8_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT8_ALIGN_g = 1;
+
+ /*
+ * 0
+ * IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 1;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 8;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT_LEAST8_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT_LEAST8_ALIGN_g = 1;
+
+ /*
+ * 0
+ * UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 1;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 8;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT_LEAST8_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT_LEAST8_ALIGN_g = 1;
+
+ /*
+ * 0
+ * IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 1;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 8;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT_FAST8_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT_FAST8_ALIGN_g = 1;
+
+ /*
+ * 0
+ * UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 1;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 8;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT_FAST8_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT_FAST8_ALIGN_g = 1;
+
+ /*
+ * 1 0
+ * IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 2;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 16;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT16_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT16_ALIGN_g = 1;
+
+ /*
+ * 1 0
+ * UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 2;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 16;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT16_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT16_ALIGN_g = 1;
+
+ /*
+ * 1 0
+ * IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 2;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 16;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT_LEAST16_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT_LEAST16_ALIGN_g = 1;
+
+ /*
+ * 1 0
+ * UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 2;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 16;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT_LEAST16_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT_LEAST16_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT_FAST16_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT_FAST16_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT_FAST16_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT_FAST16_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT32_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT32_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT32_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT32_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT_LEAST32_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT_LEAST32_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT_LEAST32_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT_LEAST32_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT_FAST32_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT_FAST32_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT_FAST32_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT_FAST32_ALIGN_g = 1;
+
+ /*
+ * 7 6 5 4
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * 3 2 1 0
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 8;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 64;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT64_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT64_ALIGN_g = 1;
+
+ /*
+ * 7 6 5 4
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * 3 2 1 0
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 8;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 64;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT64_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT64_ALIGN_g = 1;
+
+ /*
+ * 7 6 5 4
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * 3 2 1 0
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 8;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 64;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT_LEAST64_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT_LEAST64_ALIGN_g = 1;
+
+ /*
+ * 7 6 5 4
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * 3 2 1 0
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 8;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 64;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT_LEAST64_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT_LEAST64_ALIGN_g = 1;
+
+ /*
+ * 7 6 5 4
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * 3 2 1 0
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 8;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 64;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_INT_FAST64_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_INT_FAST64_ALIGN_g = 1;
+
+ /*
+ * 7 6 5 4
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * 3 2 1 0
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 8;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 64;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_UINT_FAST64_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_UINT_FAST64_ALIGN_g = 1;
+
+ /*
+ * 7 6 5 4
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * 3 2 1 0
+ * IIIIIIII IIIIIIII IIIIIIII IIIIIIII
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 8;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 64;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2;
+ if((H5T_NATIVE_LLONG_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_LLONG_ALIGN_g = 1;
+ H5T_NATIVE_LLONG_COMP_ALIGN_g = 8;
+
+ /*
+ * 7 6 5 4
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * 3 2 1 0
+ * UUUUUUUU UUUUUUUU UUUUUUUU UUUUUUUU
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_INTEGER;
+ dt->shared->size = 8;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 64;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE;
+ if((H5T_NATIVE_ULLONG_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_ULLONG_ALIGN_g = 1;
+
+ /*
+ * 3 2 1 0
+ * SEEEEEEE EMMMMMMM MMMMMMMM MMMMMMMM
+ * Implicit bit? yes
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_FLOAT;
+ dt->shared->size = 4;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 32;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.f.sign = 31;
+ dt->shared->u.atomic.u.f.epos = 23;
+ dt->shared->u.atomic.u.f.esize = 8;
+ dt->shared->u.atomic.u.f.ebias = 0x0000007f;
+ dt->shared->u.atomic.u.f.mpos = 0;
+ dt->shared->u.atomic.u.f.msize = 23;
+ dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED;
+ dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO;
+ if((H5T_NATIVE_FLOAT_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_FLOAT_ALIGN_g = 1;
+ H5T_NATIVE_FLOAT_COMP_ALIGN_g = 4;
+
+ /*
+ * 7 6 5 4
+ * SEEEEEEE EEEEMMMM MMMMMMMM MMMMMMMM
+ * 3 2 1 0
+ * MMMMMMMM MMMMMMMM MMMMMMMM MMMMMMMM
+ * Implicit bit? yes
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_FLOAT;
+ dt->shared->size = 8;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 64;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.f.sign = 63;
+ dt->shared->u.atomic.u.f.epos = 52;
+ dt->shared->u.atomic.u.f.esize = 11;
+ dt->shared->u.atomic.u.f.ebias = 0x000003ff;
+ dt->shared->u.atomic.u.f.mpos = 0;
+ dt->shared->u.atomic.u.f.msize = 52;
+ dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED;
+ dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO;
+ if((H5T_NATIVE_DOUBLE_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_DOUBLE_ALIGN_g = 1;
+ H5T_NATIVE_DOUBLE_COMP_ALIGN_g = 8;
+
+ /*
+ * 7 6 5 4
+ * SEEEEEEE EEEEMMMM MMMMMMMM MMMMMMMM
+ * 3 2 1 0
+ * MMMMMMMM MMMMMMMM MMMMMMMM MMMMMMMM
+ * Implicit bit? yes
+ * Alignment: none
+ */
+ if(NULL == (dt = H5T__alloc()))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, FAIL, "datatype allocation failed")
+ dt->shared->state = H5T_STATE_IMMUTABLE;
+ dt->shared->type = H5T_FLOAT;
+ dt->shared->size = 8;
+ dt->shared->u.atomic.order = H5T_ORDER_LE;
+ dt->shared->u.atomic.offset = 0;
+ dt->shared->u.atomic.prec = 64;
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;
+ dt->shared->u.atomic.u.f.sign = 63;
+ dt->shared->u.atomic.u.f.epos = 52;
+ dt->shared->u.atomic.u.f.esize = 11;
+ dt->shared->u.atomic.u.f.ebias = 0x000003ff;
+ dt->shared->u.atomic.u.f.mpos = 0;
+ dt->shared->u.atomic.u.f.msize = 52;
+ dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED;
+ dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO;
+ if((H5T_NATIVE_LDOUBLE_g = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register ID for built-in datatype")
+ H5T_NATIVE_LDOUBLE_ALIGN_g = 1;
+ H5T_NATIVE_LDOUBLE_COMP_ALIGN_g = 8;
+
+ /* Set the native order for this machine */
+ H5T_native_order_g = H5T_ORDER_LE;
+
+ /* Structure alignment for pointers, hvl_t, hobj_ref_t, hdset_reg_ref_t */
+ H5T_POINTER_COMP_ALIGN_g = 4;
+ H5T_HVL_COMP_ALIGN_g = 4;
+ H5T_HOBJREF_COMP_ALIGN_g = 8;
+ H5T_HDSETREGREF_COMP_ALIGN_g = 1;
+
+done:
+ if(ret_value < 0) {
+ if(dt != NULL) {
+ dt->shared = H5FL_FREE(H5T_shared_t, dt->shared);
+ dt = H5FL_FREE(H5T_t, dt);
+ } /* end if */
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5TN_init_interface() */
+
+/****************************************/
+/* ALIGNMENT and signal-handling status */
+/****************************************/
+/* Signal() support: yes */
+/* setjmp() support: yes */
+/* longjmp() support: yes */
+/* sigsetjmp() support: yes */
+/* siglongjmp() support: yes */
+/* sigprocmask() support: yes */
+
+/******************************/
+/* signal handlers statistics */
+/******************************/
+/* signal_handlers tested: 15 times */
+/* sigbus_handler called: 5 times */
+/* sigsegv_handler called: 5 times */
+/* sigill_handler called: 5 times */
diff --git a/infra/cmake/packages/HDF5Config.cmake b/infra/cmake/packages/HDF5Config.cmake
index 19803f1ea..4ab338144 100644
--- a/infra/cmake/packages/HDF5Config.cmake
+++ b/infra/cmake/packages/HDF5Config.cmake
@@ -6,9 +6,24 @@ function(_HDF5_build)
nnas_find_package(HDF5Source QUIET)
if(NOT HDF5Source_FOUND)
+ message(STATUS "HD5Config skip: HDF5Source NOT FOUND")
return()
endif(NOT HDF5Source_FOUND)
+ if(DEFINED ENV{BUILD_HOST_EXEC})
+ set(EXTERNAL_H5MAKE_LIBSETTINGS $ENV{BUILD_HOST_EXEC}/externals/HDF5/build/bin/H5make_libsettings)
+ set(ENV{EXTERNAL_H5MAKE_LIBSETTINGS} ${EXTERNAL_H5MAKE_LIBSETTINGS})
+
+ # NOTE https://github.com/Samsung/ONE/issues/8762
+ # TODO generalize to select 'linux-armv7l'
+ set(H5TINIT_C_FROM_NATIVE ${CMAKE_CURRENT_LIST_DIR}/H5Tinit.c.linux-armv7l)
+ set(H5TINIT_C_COPY ${CMAKE_BINARY_DIR}/externals/HDF5/build/H5Tinit.c)
+ message(STATUS "Copy H5Tinit.c generated from target native build")
+ execute_process(
+ COMMAND ${CMAKE_COMMAND} -E copy "${H5TINIT_C_FROM_NATIVE}" "${H5TINIT_C_COPY}"
+ )
+ endif(DEFINED ENV{BUILD_HOST_EXEC})
+
nnas_include(ExternalBuildTools)
ExternalBuild_CMake(CMAKE_DIR ${HDF5Source_DIR}
BUILD_DIR ${CMAKE_BINARY_DIR}/externals/HDF5/build
@@ -26,6 +41,7 @@ _HDF5_build()
find_path(HDF5_CONFIG_DIR "hdf5-config.cmake"
PATHS ${EXT_OVERLAY_DIR}
+ NO_CMAKE_FIND_ROOT_PATH
PATH_SUFFIXES
cmake
share/cmake
diff --git a/infra/cmake/packages/HDF5Source.patch b/infra/cmake/packages/HDF5Source.patch
new file mode 100644
index 000000000..b8602a08a
--- /dev/null
+++ b/infra/cmake/packages/HDF5Source.patch
@@ -0,0 +1,195 @@
+Only in HDF5: build
+diff -r -u a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake
+--- a/config/cmake/ConfigureChecks.cmake
++++ b/config/cmake/ConfigureChecks.cmake
+@@ -109,15 +109,15 @@
+ if (NOT WINDOWS)
+ CHECK_FUNCTION_EXISTS(clock_gettime CLOCK_GETTIME_IN_LIBC)
+ CHECK_LIBRARY_EXISTS(rt clock_gettime "" CLOCK_GETTIME_IN_LIBRT)
+- CHECK_LIBRARY_EXISTS(posix4 clock_gettime "" CLOCK_GETTIME_IN_LIBPOSIX4)
++ #CHECK_LIBRARY_EXISTS(posix4 clock_gettime "" CLOCK_GETTIME_IN_LIBPOSIX4)
+ if (CLOCK_GETTIME_IN_LIBC)
+ set (H5_HAVE_CLOCK_GETTIME 1)
+ elseif (CLOCK_GETTIME_IN_LIBRT)
+ set (H5_HAVE_CLOCK_GETTIME 1)
+ list (APPEND LINK_LIBS rt)
+- elseif (CLOCK_GETTIME_IN_LIBPOSIX4)
+- set (H5_HAVE_CLOCK_GETTIME 1)
+- list (APPEND LINK_LIBS posix4)
++ #elseif (CLOCK_GETTIME_IN_LIBPOSIX4)
++ # set (H5_HAVE_CLOCK_GETTIME 1)
++ # list (APPEND LINK_LIBS posix4)
+ endif (CLOCK_GETTIME_IN_LIBC)
+ endif (NOT WINDOWS)
+ #-----------------------------------------------------------------------------
+@@ -130,12 +130,17 @@
+ if (HDF5_ENABLE_DIRECT_VFD)
+ set (msg "Performing TEST_DIRECT_VFD_WORKS")
+ set (MACRO_CHECK_FUNCTION_DEFINITIONS "-DTEST_DIRECT_VFD_WORKS -D_GNU_SOURCE ${CMAKE_REQUIRED_FLAGS}")
++ if(NOT CMAKE_CROSSCOMPILING)
+ TRY_RUN (TEST_DIRECT_VFD_WORKS_RUN TEST_DIRECT_VFD_WORKS_COMPILE
+ ${CMAKE_BINARY_DIR}
+ ${HDF_RESOURCES_EXT_DIR}/HDFTests.c
+ CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS}
+ OUTPUT_VARIABLE OUTPUT
+ )
++ else(NOT CMAKE_CROSSCOMPILING)
++ set(TEST_DIRECT_VFD_WORKS_RUN 0)
++ set(TEST_DIRECT_VFD_WORKS_COMPILE TRUE)
++ endif(NOT CMAKE_CROSSCOMPILING)
+ if (TEST_DIRECT_VFD_WORKS_COMPILE)
+ if (TEST_DIRECT_VFD_WORKS_RUN MATCHES 0)
+ HDF_FUNCTION_TEST (HAVE_DIRECT)
+@@ -221,7 +226,12 @@
+ # The machine's conversion gets the correct value. We define the macro and disable
+ # this kind of test until we figure out what algorithm they use.
+ #
++if(NOT CMAKE_CROSSCOMPILING)
+ H5ConversionTests (H5_LDOUBLE_TO_LONG_SPECIAL "Checking IF your system converts long double to (unsigned) long values with special algorithm")
++else(NOT CMAKE_CROSSCOMPILING)
++ set(H5_LDOUBLE_TO_LONG_SPECIAL_RUN 1)
++ set(H5_LDOUBLE_TO_LONG_SPECIAL_COMPILE TRUE)
++endif(NOT CMAKE_CROSSCOMPILING)
+ # ----------------------------------------------------------------------
+ # Set the flag to indicate that the machine is using a special algorithm
+ # to convert some values of '(unsigned) long' to 'long double' values.
+@@ -230,7 +240,12 @@
+ # ..., 7fffff..., the compiler uses a unknown algorithm. We define a
+ # macro and skip the test for now until we know about the algorithm.
+ #
++if(NOT CMAKE_CROSSCOMPILING)
+ H5ConversionTests (H5_LONG_TO_LDOUBLE_SPECIAL "Checking IF your system can convert (unsigned) long to long double values with special algorithm")
++else(NOT CMAKE_CROSSCOMPILING)
++ set(H5_LONG_TO_LDOUBLE_SPECIAL_RUN 1)
++ set(H5_LONG_TO_LDOUBLE_SPECIAL_COMPILE TRUE)
++endif(NOT CMAKE_CROSSCOMPILING)
+ # ----------------------------------------------------------------------
+ # Set the flag to indicate that the machine can accurately convert
+ # 'long double' to '(unsigned) long long' values. (This flag should be set for
+@@ -240,7 +255,12 @@
+ # 0x4351ccf385ebc8a0dfcc... or 0x4351ccf385ebc8a0ffcc... will make the converted
+ # values wildly wrong. This test detects this wrong behavior and disable the test.
+ #
++if(NOT CMAKE_CROSSCOMPILING)
+ H5ConversionTests (H5_LDOUBLE_TO_LLONG_ACCURATE "Checking IF correctly converting long double to (unsigned) long long values")
++else(NOT CMAKE_CROSSCOMPILING)
++ set(H5_LDOUBLE_TO_LLONG_ACCURATE_RUN 0)
++ set(H5_LDOUBLE_TO_LLONG_ACCURATE_COMPILE TRUE)
++endif(NOT CMAKE_CROSSCOMPILING)
+ # ----------------------------------------------------------------------
+ # Set the flag to indicate that the machine can accurately convert
+ # '(unsigned) long long' to 'long double' values. (This flag should be set for
+@@ -248,11 +268,21 @@
+ # 007fff..., 00ffff..., 01ffff..., ..., 7fffff..., the converted values are twice
+ # as big as they should be.
+ #
++if(NOT CMAKE_CROSSCOMPILING)
+ H5ConversionTests (H5_LLONG_TO_LDOUBLE_CORRECT "Checking IF correctly converting (unsigned) long long to long double values")
++else(NOT CMAKE_CROSSCOMPILING)
++ set(H5_LLONG_TO_LDOUBLE_CORRECT_RUN 0)
++ set(H5_LLONG_TO_LDOUBLE_CORRECT_COMPILE TRUE)
++endif(NOT CMAKE_CROSSCOMPILING)
+ # ----------------------------------------------------------------------
+ # Check if pointer alignments are enforced
+ #
++if(NOT CMAKE_CROSSCOMPILING)
+ H5ConversionTests (H5_NO_ALIGNMENT_RESTRICTIONS "Checking IF alignment restrictions are strictly enforced")
++else(NOT CMAKE_CROSSCOMPILING)
++ set(H5_NO_ALIGNMENT_RESTRICTIONS_RUN 0)
++ set(H5_NO_ALIGNMENT_RESTRICTIONS_COMPILE TRUE)
++endif(NOT CMAKE_CROSSCOMPILING)
+
+ # -----------------------------------------------------------------------
+ # wrapper script variables
+diff -r -u a/config/cmake_ext_mod/ConfigureChecks.cmake b/config/cmake_ext_mod/ConfigureChecks.cmake
+--- a/config/cmake_ext_mod/ConfigureChecks.cmake
++++ b/config/cmake_ext_mod/ConfigureChecks.cmake
+@@ -272,12 +272,17 @@
+ # http://www.gnu.org/s/libc/manual/html_node/Feature-Test-Macros.html
+ set (HDF_EXTRA_C_FLAGS -D_POSIX_C_SOURCE=199506L)
+ # _BSD_SOURCE deprecated in GLIBC >= 2.20
++ if(NOT CMAKE_CROSSCOMPILING)
+ TRY_RUN (HAVE_DEFAULT_SOURCE_RUN HAVE_DEFAULT_SOURCE_COMPILE
+ ${CMAKE_BINARY_DIR}
+ ${HDF_RESOURCES_EXT_DIR}/HDFTests.c
+ CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=-DHAVE_DEFAULT_SOURCE
+ OUTPUT_VARIABLE OUTPUT
+ )
++ else(NOT CMAKE_CROSSCOMPILING)
++ set(HAVE_DEFAULT_SOURCE_RUN 1)
++ set(HAVE_DEFAULT_SOURCE_COMPILE TRUE)
++ endif(NOT CMAKE_CROSSCOMPILING)
+ if (HAVE_DEFAULT_SOURCE_COMPILE AND HAVE_DEFAULT_SOURCE_RUN)
+ set (HDF_EXTRA_FLAGS -D_DEFAULT_SOURCE)
+ else (HAVE_DEFAULT_SOURCE_COMPILE AND HAVE_DEFAULT_SOURCE_RUN)
+@@ -287,12 +292,17 @@
+ option (HDF_ENABLE_LARGE_FILE "Enable support for large (64-bit) files on Linux." ON)
+ if (HDF_ENABLE_LARGE_FILE)
+ set (msg "Performing TEST_LFS_WORKS")
++ if(NOT CMAKE_CROSSCOMPILING)
+ TRY_RUN (TEST_LFS_WORKS_RUN TEST_LFS_WORKS_COMPILE
+ ${CMAKE_BINARY_DIR}
+ ${HDF_RESOURCES_EXT_DIR}/HDFTests.c
+ CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=-DTEST_LFS_WORKS
+ OUTPUT_VARIABLE OUTPUT
+ )
++ else(NOT CMAKE_CROSSCOMPILING)
++ set(TEST_LFS_WORKS_RUN 0)
++ set(TEST_LFS_WORKS_COMPILE TRUE)
++ endif(NOT CMAKE_CROSSCOMPILING)
+ if (TEST_LFS_WORKS_COMPILE)
+ if (TEST_LFS_WORKS_RUN MATCHES 0)
+ set (TEST_LFS_WORKS 1 CACHE INTERNAL ${msg})
+@@ -702,7 +712,8 @@
+ set (CURRENT_TEST_DEFINITIONS "-DPRINTF_LL_WIDTH")
+ if (${HDF_PREFIX}_SIZEOF_LONG_LONG)
+ set (CURRENT_TEST_DEFINITIONS "${CURRENT_TEST_DEFINITIONS} -DHAVE_LONG_LONG")
+ endif (${HDF_PREFIX}_SIZEOF_LONG_LONG)
++ if(NOT CMAKE_CROSSCOMPILING)
+ TRY_RUN (${HDF_PREFIX}_PRINTF_LL_TEST_RUN ${HDF_PREFIX}_PRINTF_LL_TEST_COMPILE
+ ${CMAKE_BINARY_DIR}
+ ${HDF_RESOURCES_EXT_DIR}/HDFTests.c
+@@ -722,6 +733,13 @@
+ "Test ${HDF_PREFIX}_PRINTF_LL_WIDTH failed with the following output:\n ${OUTPUT}\n"
+ )
+ endif (${HDF_PREFIX}_PRINTF_LL_TEST_COMPILE)
++ else(NOT CMAKE_CROSSCOMPILING)
++ set (${HDF_PREFIX}_PRINTF_LL_TEST_RUN 1)
++ set (${HDF_PREFIX}_PRINTF_LL_TEST_COMPILE 1)
++ set (${HDF_PREFIX}_PRINTF_LL_WIDTH "\"L\"")
++ set (${HDF_PREFIX}_PRINTF_LL "L")
++ set (PRINT_LL_FOUND 1)
++ endif(NOT CMAKE_CROSSCOMPILING)
+
+ if (PRINT_LL_FOUND)
+ message (STATUS "Checking for appropriate format for 64 bit long: found ${${HDF_PREFIX}_PRINTF_LL_WIDTH}")
+diff -r -u a/src/CMakeLists.txt b/src/CMakeLists.txt
+--- a/src/CMakeLists.txt
++++ b/src/CMakeLists.txt
+@@ -616,6 +616,7 @@
+ target_link_libraries (H5detect "ws2_32.lib")
+ endif (MSVC OR MINGW)
+
++if (NOT CMAKE_CROSSCOMPILING)
+ set (CMD $<TARGET_FILE:H5detect>)
+ add_custom_command (
+ OUTPUT ${HDF5_BINARY_DIR}/H5Tinit.c
+@@ -623,6 +624,7 @@
+ ARGS > ${HDF5_BINARY_DIR}/H5Tinit.c
+ DEPENDS H5detect
+ )
++endif (NOT CMAKE_CROSSCOMPILING)
+
+ add_executable (H5make_libsettings ${HDF5_SRC_DIR}/H5make_libsettings.c)
+ TARGET_C_PROPERTIES (H5make_libsettings STATIC " " " ")
+@@ -631,6 +633,10 @@
+ endif (MSVC OR MINGW)
+
+ set (CMD $<TARGET_FILE:H5make_libsettings>)
++# for cross compile
++if (DEFINED ENV{EXTERNAL_H5MAKE_LIBSETTINGS})
++ set(CMD $ENV{EXTERNAL_H5MAKE_LIBSETTINGS})
++endif (DEFINED ENV{EXTERNAL_H5MAKE_LIBSETTINGS})
+ add_custom_command (
+ OUTPUT ${HDF5_BINARY_DIR}/H5lib_settings.c
+ COMMAND ${CMD}
diff --git a/infra/cmake/packages/HDF5SourceConfig.cmake b/infra/cmake/packages/HDF5SourceConfig.cmake
index 134efa6f4..3440dbd20 100644
--- a/infra/cmake/packages/HDF5SourceConfig.cmake
+++ b/infra/cmake/packages/HDF5SourceConfig.cmake
@@ -7,9 +7,11 @@ function(_HDF5Source_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(HDF5_URL https://github.com/HDFGroup/hdf5/archive/hdf5-1_8_16.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(HDF5_URL ${EXTERNAL_DOWNLOAD_SERVER}/HDFGroup/hdf5/archive/hdf5-1_8_16.tar.gz)
- ExternalSource_Download(HDF5 ${HDF5_URL})
+ ExternalSource_Download(HDF5 ${HDF5_URL}
+ PATCH ${CMAKE_CURRENT_LIST_DIR}/HDF5Source.patch)
set(HDF5Source_DIR ${HDF5_SOURCE_DIR} PARENT_SCOPE)
set(HDF5Source_FOUND TRUE PARENT_SCOPE)
diff --git a/infra/cmake/packages/JsoncppConfig.cmake b/infra/cmake/packages/JsoncppConfig.cmake
new file mode 100644
index 000000000..3c5c3e78a
--- /dev/null
+++ b/infra/cmake/packages/JsoncppConfig.cmake
@@ -0,0 +1,34 @@
+function(_Jsoncpp_import)
+ nnas_find_package(JsoncppSource QUIET)
+
+ if(NOT JsoncppSource_FOUND)
+ set(Jsoncpp_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT JsoncppSource_FOUND)
+
+ nnas_include(ExternalBuildTools)
+ ExternalBuild_CMake(CMAKE_DIR ${JsoncppSource_DIR}
+ BUILD_DIR ${CMAKE_BINARY_DIR}/externals/JSONCPP/build
+ INSTALL_DIR ${EXT_OVERLAY_DIR}
+ IDENTIFIER "1.9.5"
+ PKG_NAME "JSONCPP"
+ EXTRA_OPTS "-DBUILD_STATIC_LIBS=ON"
+ "-DBUILD_SHARED_LIBS=OFF"
+ "-DJSONCPP_WITH_TESTS=OFF"
+ "-DJSONCPP_WITH_POST_BUILD_UNITTEST=OFF")
+
+ find_path(Jsoncpp_INCLUDE_DIRS
+ NAMES json.h
+ PATHS ${EXT_OVERLAY_DIR}
+ NO_CMAKE_FIND_ROOT_PATH
+ PATH_SUFFIXES include/json)
+ find_file(Jsoncpp_STATIC_LIB
+ NAMES libjsoncpp.a
+ PATHS ${EXT_OVERLAY_DIR}
+ NO_CMAKE_FIND_ROOT_PATH
+ PATH_SUFFIXES lib)
+
+ set(Jsoncpp_FOUND TRUE PARENT_SCOPE)
+endfunction(_Jsoncpp_import)
+
+_Jsoncpp_import()
diff --git a/infra/cmake/packages/JsoncppSourceConfig.cmake b/infra/cmake/packages/JsoncppSourceConfig.cmake
new file mode 100644
index 000000000..8d672854b
--- /dev/null
+++ b/infra/cmake/packages/JsoncppSourceConfig.cmake
@@ -0,0 +1,19 @@
+function(_JsoncppSource_import)
+ if(NOT DOWNLOAD_JSONCPP)
+ set(JsoncppSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_JSONCPP)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(JSONCPP_URL ${EXTERNAL_DOWNLOAD_SERVER}/open-source-parsers/jsoncpp/archive/refs/tags/1.9.5.tar.gz)
+
+ ExternalSource_Download(JSONCPP ${JSONCPP_URL})
+
+ set(JsoncppSource_DIR ${JSONCPP_SOURCE_DIR} PARENT_SCOPE)
+ set(JsoncppSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_JsoncppSource_import)
+
+_JsoncppSource_import()
diff --git a/infra/cmake/packages/MbedOSSource-6.15/MbedOSSourceConfig.cmake b/infra/cmake/packages/MbedOSSource-6.15/MbedOSSourceConfig.cmake
new file mode 100644
index 000000000..e55647da8
--- /dev/null
+++ b/infra/cmake/packages/MbedOSSource-6.15/MbedOSSourceConfig.cmake
@@ -0,0 +1,16 @@
+function(_MbedOSSource_import)
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(MBEDOS_6_15_URL ${EXTERNAL_DOWNLOAD_SERVER}/ARMmbed/mbed-os/archive/refs/tags/mbed-os-6.15.0.tar.gz)
+ set(MBEDOS_6_15_SHA256 529b04c41f3020ed8a62f12d47f2d3de87e1b07fb13708534534a587f7ea048e)
+
+ ExternalSource_Download(MBEDOS DIRNAME MBEDOS-6.15 ${MBEDOS_6_15_URL}
+ CHECKSUM "SHA256=${MBEDOS_6_15_SHA256}")
+
+ set(MbedOSSource_DIR ${MBEDOS_SOURCE_DIR} PARENT_SCOPE)
+ set(MbedOSSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_MbedOSSource_import)
+
+_MbedOSSource_import()
diff --git a/infra/cmake/packages/FlatBuffersSource-1.10/FlatBuffersSourceConfigVersion.cmake b/infra/cmake/packages/MbedOSSource-6.15/MbedOSSourceConfigVersion.cmake
index 6585f21d5..acdd54ad6 100644
--- a/infra/cmake/packages/FlatBuffersSource-1.10/FlatBuffersSourceConfigVersion.cmake
+++ b/infra/cmake/packages/MbedOSSource-6.15/MbedOSSourceConfigVersion.cmake
@@ -1,4 +1,4 @@
-set(PACKAGE_VERSION "1.10")
+set(PACKAGE_VERSION "6.15")
set(PACKAGE_VERSION_EXACT FALSE)
set(PACKAGE_VERSION_COMPATIBLE FALSE)
set(PACKAGE_VERSION_UNSUITABLE TRUE)
diff --git a/infra/cmake/packages/NEON2SSESourceConfig.cmake b/infra/cmake/packages/NEON2SSESourceConfig.cmake
index 5970ec73e..82c71e2a8 100644
--- a/infra/cmake/packages/NEON2SSESourceConfig.cmake
+++ b/infra/cmake/packages/NEON2SSESourceConfig.cmake
@@ -7,12 +7,13 @@ function(_NEON2SSESource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- # NOTE TensorFlow 1.12 downloads NEON2SSE from the following URL
# NOTE TensorFlow 1.13.1 downloads NEON2SSE from the following URL
- # NOTE TensorFlow 2.2 downloads NEON2SSE from the following URL
- envoption(NEON2SSE_1_12_URL https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz)
+ # NOTE TensorFlow 2.8.0 downloads NEON2SSE from the following URL
+ # NOTE commit c12f8932c3be5aebaf35562d699f645686c4e2c3 will resolve build fail on debug build
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(NEON2SSE_URL ${EXTERNAL_DOWNLOAD_SERVER}/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz)
- ExternalSource_Download(NEON2SSE ${NEON2SSE_1_12_URL})
+ ExternalSource_Download(NEON2SSE ${NEON2SSE_URL})
set(NEON2SSESource_DIR ${NEON2SSE_SOURCE_DIR} PARENT_SCOPE)
set(NEON2SSESource_FOUND TRUE PARENT_SCOPE)
diff --git a/infra/cmake/packages/NoniusSourceConfig.cmake b/infra/cmake/packages/NoniusSourceConfig.cmake
index 0af23ef0e..17965f1eb 100644
--- a/infra/cmake/packages/NoniusSourceConfig.cmake
+++ b/infra/cmake/packages/NoniusSourceConfig.cmake
@@ -20,7 +20,7 @@ function(_NoniusSource_import)
endif(BUILD_KBENCHMARK)
set(NoniusSource_DIR ${NONIUS_SOURCE_DIR} PARENT_SCOPE)
- set(NoniusSource_FOUND ${NONIUS_SOURCE_GET} PARENT_SCOPE)
+ set(NoniusSource_FOUND TRUE PARENT_SCOPE)
endfunction(_NoniusSource_import)
_NoniusSource_import()
diff --git a/infra/cmake/packages/ONNXSource-1.4.1/ONNXSourceConfig.cmake b/infra/cmake/packages/ONNXSource-1.4.1/ONNXSourceConfig.cmake
index c9fb5e490..fe21f6d3d 100644
--- a/infra/cmake/packages/ONNXSource-1.4.1/ONNXSourceConfig.cmake
+++ b/infra/cmake/packages/ONNXSource-1.4.1/ONNXSourceConfig.cmake
@@ -7,7 +7,8 @@ function(_ONNXSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(ONNX_1_4_1_URL https://github.com/onnx/onnx/archive/v1.4.1.zip)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(ONNX_1_4_1_URL ${EXTERNAL_DOWNLOAD_SERVER}/onnx/onnx/archive/v1.4.1.zip)
ExternalSource_Download(ONNX DIRNAME ONNX-1.4.1
CHECKSUM MD5=604b43a22fbc758f32ae9f3a4fb9d397
diff --git a/infra/cmake/packages/ONNXSource-1.6.0/ONNXSourceConfig.cmake b/infra/cmake/packages/ONNXSource-1.6.0/ONNXSourceConfig.cmake
index ef903f834..b2ad08b90 100644
--- a/infra/cmake/packages/ONNXSource-1.6.0/ONNXSourceConfig.cmake
+++ b/infra/cmake/packages/ONNXSource-1.6.0/ONNXSourceConfig.cmake
@@ -7,7 +7,8 @@ function(_ONNXSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(ONNX_1_6_0_URL https://github.com/onnx/onnx/archive/v1.6.0.zip)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(ONNX_1_6_0_URL ${EXTERNAL_DOWNLOAD_SERVER}/onnx/onnx/archive/v1.6.0.zip)
ExternalSource_Download(ONNX DIRNAME ONNX-1.6.0
CHECKSUM MD5=cbdc547a527f1b59c7f066c8d258b966
diff --git a/infra/cmake/packages/OouraFFTSourceConfig.cmake b/infra/cmake/packages/OouraFFTSourceConfig.cmake
new file mode 100644
index 000000000..d84b5b20f
--- /dev/null
+++ b/infra/cmake/packages/OouraFFTSourceConfig.cmake
@@ -0,0 +1,20 @@
+function(_OouraFFTSource_import)
+ if(NOT DOWNLOAD_OOURAFFT)
+ set(OouraFFTSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_OOURAFFT)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ # NOTE TensorFlow 2.3 downloads OOURAFFT from the following URL
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(OOURAFFT_URL ${EXTERNAL_DOWNLOAD_SERVER}/petewarden/OouraFFT/archive/v1.0.tar.gz)
+
+ ExternalSource_Download(OOURAFFT ${OOURAFFT_URL})
+
+ set(OouraFFTSource_DIR ${OOURAFFT_SOURCE_DIR} PARENT_SCOPE)
+ set(OouraFFTSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_OouraFFTSource_import)
+
+_OouraFFTSource_import()
diff --git a/infra/cmake/packages/Opencl_HeadersConfig.cmake b/infra/cmake/packages/Opencl_HeadersConfig.cmake
new file mode 100644
index 000000000..ec7c65a73
--- /dev/null
+++ b/infra/cmake/packages/Opencl_HeadersConfig.cmake
@@ -0,0 +1,27 @@
+function(_Opencl_Headers_import)
+ nnas_find_package(Opencl_HeadersSource QUIET)
+
+ # NOTE This line prevents multiple definitions of target
+ if(TARGET OpenCL_Headers)
+ set(Opencl_HeadersSource_DIR ${Opencl_HeadersSource_DIR} PARENT_SCOPE)
+ set(Opencl_Headers_FOUND TRUE PARENT_SCOPE)
+ return()
+ endif(TARGET OpenCL_Headers)
+
+ if(NOT Opencl_HeadersSource_FOUND)
+ message(STATUS "Opencl_Headers: Source not found")
+ set(Opencl_Headers_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT Opencl_HeadersSource_FOUND)
+
+ # We don't need test builds and installs, we only need headers.
+ # add_extdirectory("${Opencl_HeadersSource_DIR}" OPENCL_HEADERS EXCLUDE_FROM_ALL)
+
+ add_library(OpenCL_Headers INTERFACE)
+ target_include_directories(OpenCL_Headers INTERFACE ${Opencl_HeadersSource_DIR})
+
+ set(Opencl_Headers_DIR ${Opencl_HeadersSource_DIR} PARENT_SCOPE)
+ set(Opencl_Headers_FOUND TRUE PARENT_SCOPE)
+endfunction(_Opencl_Headers_import)
+
+_Opencl_Headers_import()
diff --git a/infra/cmake/packages/Opencl_HeadersSourceConfig.cmake b/infra/cmake/packages/Opencl_HeadersSourceConfig.cmake
new file mode 100644
index 000000000..04858aa41
--- /dev/null
+++ b/infra/cmake/packages/Opencl_HeadersSourceConfig.cmake
@@ -0,0 +1,22 @@
+function(_Opencl_HeadersSource_import)
+ if(NOT DOWNLOAD_OPENCL_HEADERS)
+ set(Opencl_HeadersSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_OPENCL_HEADERS)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(OPENCL_HEADERS_URL ${EXTERNAL_DOWNLOAD_SERVER}/KhronosGroup/OpenCL-Headers/archive/v2021.04.29.tar.gz)
+
+ ExternalSource_Download(OPENCL_HEADERS
+ DIRNAME OPENCL_HEADERS
+ URL ${OPENCL_HEADERS_URL}
+ CHECKSUM MD5=5a7ea04265119aa76b4ecbd95f258219)
+
+ set(Opencl_HeadersSource_DIR ${OPENCL_HEADERS_SOURCE_DIR} PARENT_SCOPE)
+ set(Opencl_HeadersSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_Opencl_HeadersSource_import)
+
+_Opencl_HeadersSource_import()
diff --git a/infra/cmake/packages/Opengl_HeadersSourceConfig.cmake b/infra/cmake/packages/Opengl_HeadersSourceConfig.cmake
new file mode 100644
index 000000000..c5a774a73
--- /dev/null
+++ b/infra/cmake/packages/Opengl_HeadersSourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_Opengl_HeadersSource_import)
+ if(NOT DOWNLOAD_OPENGL_HEADERS)
+ set(Opengl_HeadersSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_OPENGL_HEADERS)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(OPENGL_HEADERS_URL ${EXTERNAL_DOWNLOAD_SERVER}/KhronosGroup/OpenGL-Registry/archive/0cb0880d91581d34f96899c86fc1bf35627b4b81.zip)
+
+ ExternalSource_Download(OPENGL_HEADERS
+ DIRNAME OPENGL_HEADERS
+ URL ${OPENGL_HEADERS_URL})
+
+ set(Opengl_HeadersSource_DIR ${OPENGL_HEADERS_SOURCE_DIR} PARENT_SCOPE)
+ set(Opengl_HeadersSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_Opengl_HeadersSource_import)
+
+_Opengl_HeadersSource_import()
diff --git a/infra/cmake/packages/ProtobufConfig.cmake b/infra/cmake/packages/ProtobufConfig.cmake
index 3c8d2320f..f8e9ff1f9 100644
--- a/infra/cmake/packages/ProtobufConfig.cmake
+++ b/infra/cmake/packages/ProtobufConfig.cmake
@@ -51,17 +51,34 @@ function(_Protobuf_build)
return()
endif(NOT ProtobufSource_FOUND)
+ # set 'EXTERNAL_JS_EMBED' environment variable
+ if(NOT DEFINED ENV{EXTERNAL_JS_EMBED})
+ if(DEFINED ENV{BUILD_HOST_EXEC})
+ set(EXTERNAL_JS_EMBED $ENV{BUILD_HOST_EXEC}/externals/PROTOBUF/build/js_embed)
+ set(ENV{EXTERNAL_JS_EMBED} ${EXTERNAL_JS_EMBED})
+ endif(DEFINED ENV{BUILD_HOST_EXEC})
+ endif(NOT DEFINED ENV{EXTERNAL_JS_EMBED})
+
nnas_include(ExternalBuildTools)
ExternalBuild_CMake(CMAKE_DIR ${ProtobufSource_DIR}/cmake
BUILD_DIR ${CMAKE_BINARY_DIR}/externals/PROTOBUF/build
INSTALL_DIR ${EXT_OVERLAY_DIR}
BUILD_FLAGS -fPIC
EXTRA_OPTS -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_WITH_ZLIB=OFF
- IDENTIFIER "3.5.2-fix1"
+ IDENTIFIER "3.5.2-fix2"
PKG_NAME "PROTOBUF")
endfunction(_Protobuf_build)
+set(PROTOC_PATH $<TARGET_FILE:protobuf::protoc>)
+
+if(DEFINED ENV{BUILD_HOST_EXEC})
+ set(PROTOC_PATH $ENV{BUILD_HOST_EXEC}/overlay/bin/protoc)
+endif(DEFINED ENV{BUILD_HOST_EXEC})
+if(DEFINED ENV{EXTERNAL_PROTOC})
+ set(PROTOC_PATH $ENV{EXTERNAL_PROTOC})
+endif(DEFINED ENV{EXTERNAL_PROTOC})
+
_Protobuf_build()
if(USE_PROTOBUF_LEGACY_IMPORT)
@@ -96,7 +113,7 @@ if(Protobuf_FOUND)
add_custom_command(OUTPUT ${OUTPUT_FILES}
COMMAND ${CMAKE_COMMAND} -E make_directory "${abs_output_dir}"
- COMMAND "$<TARGET_FILE:protobuf::protoc>" --cpp_out "${abs_output_dir}" -I "${abs_proto_dir}" ${PROTO_FILES}
+ COMMAND "${PROTOC_PATH}" --cpp_out "${abs_output_dir}" -I "${abs_proto_dir}" ${PROTO_FILES}
DEPENDS ${PROTO_FILES})
set(${PREFIX}_SOURCES ${OUTPUT_FILES} PARENT_SCOPE)
diff --git a/infra/cmake/packages/ProtobufSource.patch b/infra/cmake/packages/ProtobufSource.patch
new file mode 100644
index 000000000..9a83a80e4
--- /dev/null
+++ b/infra/cmake/packages/ProtobufSource.patch
@@ -0,0 +1,18 @@
+--- a/cmake/libprotoc.cmake
++++ b/cmake/libprotoc.cmake
+@@ -209,10 +209,14 @@
+ ${protobuf_source_dir}/src/google/protobuf/compiler/js/well_known_types/timestamp.js
+ )
+ add_executable(js_embed ${protobuf_source_dir}/src/google/protobuf/compiler/js/embed.cc)
++set(JS_EMBED_EXEC "js_embed")
++if(DEFINED ENV{EXTERNAL_JS_EMBED})
++ set(JS_EMBED_EXEC "$ENV{EXTERNAL_JS_EMBED}")
++endif()
+ add_custom_command(
+ OUTPUT ${protobuf_source_dir}/src/google/protobuf/compiler/js/well_known_types_embed.cc
+ DEPENDS js_embed ${js_well_known_types_sources}
+- COMMAND js_embed ${js_well_known_types_sources} > ${protobuf_source_dir}/src/google/protobuf/compiler/js/well_known_types_embed.cc
++ COMMAND ${JS_EMBED_EXEC} ${js_well_known_types_sources} > ${protobuf_source_dir}/src/google/protobuf/compiler/js/well_known_types_embed.cc
+ )
+
+ add_library(libprotoc ${protobuf_SHARED_OR_STATIC}
diff --git a/infra/cmake/packages/ProtobufSourceConfig.cmake b/infra/cmake/packages/ProtobufSourceConfig.cmake
index 6b35ae7dc..a1704e53d 100644
--- a/infra/cmake/packages/ProtobufSourceConfig.cmake
+++ b/infra/cmake/packages/ProtobufSourceConfig.cmake
@@ -7,9 +7,11 @@ function(_ProtobufSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(PROTOBUF_URL https://github.com/protocolbuffers/protobuf/archive/v3.5.2.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(PROTOBUF_URL ${EXTERNAL_DOWNLOAD_SERVER}/protocolbuffers/protobuf/archive/v3.5.2.tar.gz)
- ExternalSource_Download(PROTOBUF ${PROTOBUF_URL})
+ ExternalSource_Download(PROTOBUF ${PROTOBUF_URL}
+ PATCH ${CMAKE_CURRENT_LIST_DIR}/ProtobufSource.patch)
set(ProtobufSource_DIR ${PROTOBUF_SOURCE_DIR} PARENT_SCOPE)
set(ProtobufSource_FOUND TRUE PARENT_SCOPE)
diff --git a/infra/cmake/packages/PsimdSourceConfig.cmake b/infra/cmake/packages/PsimdSourceConfig.cmake
new file mode 100644
index 000000000..1da5cdc5e
--- /dev/null
+++ b/infra/cmake/packages/PsimdSourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_PsimdSource_import)
+ if(NOT ${DOWNLOAD_PSIMD})
+ set(PsimdSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT ${DOWNLOAD_PSIMD})
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ # psimd commit in xnnpack 8b283aa30a31
+ envoption(PSIMD_URL ${EXTERNAL_DOWNLOAD_SERVER}/Maratyszcza/psimd/archive/072586a71b55b7f8c584153d223e95687148a900.tar.gz)
+ ExternalSource_Download(PSIMD
+ DIRNAME PSIMD
+ URL ${PSIMD_URL})
+
+ set(PsimdSource_DIR ${PSIMD_SOURCE_DIR} PARENT_SCOPE)
+ set(PsimdSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_PsimdSource_import)
+
+_PsimdSource_import()
diff --git a/infra/cmake/packages/PthreadpoolSourceConfig.cmake b/infra/cmake/packages/PthreadpoolSourceConfig.cmake
new file mode 100644
index 000000000..4e1910a84
--- /dev/null
+++ b/infra/cmake/packages/PthreadpoolSourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_PthreadpoolSource_import)
+ if(NOT ${DOWNLOAD_PTHREADPOOL})
+ set(PthreadpoolSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT ${DOWNLOAD_PTHREADPOOL})
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ # pthreadpool commit in xnnpack 8b283aa30a31
+ envoption(PTHREADPOOL_URL ${EXTERNAL_DOWNLOAD_SERVER}/Maratyszcza/pthreadpool/archive/029c88620802e1361ccf41d1970bd5b07fd6b7bb.tar.gz)
+ ExternalSource_Download(PTHREADPOOL
+ DIRNAME PTHREADPOOL
+ URL ${PTHREADPOOL_URL})
+
+ set(PthreadpoolSource_DIR ${PTHREADPOOL_SOURCE_DIR} PARENT_SCOPE)
+ set(PthreadpoolSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_PthreadpoolSource_import)
+
+_PthreadpoolSource_import()
diff --git a/infra/cmake/packages/Pybind11SourceConfig.cmake b/infra/cmake/packages/Pybind11SourceConfig.cmake
index 76f51e4d3..2f6425355 100644
--- a/infra/cmake/packages/Pybind11SourceConfig.cmake
+++ b/infra/cmake/packages/Pybind11SourceConfig.cmake
@@ -7,7 +7,8 @@ function(_Pybind11Source_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(PYBIND11_URL https://github.com/pybind/pybind11/archive/v2.5.0.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(PYBIND11_URL ${EXTERNAL_DOWNLOAD_SERVER}/pybind/pybind11/archive/v2.5.0.tar.gz)
ExternalSource_Download(PYBIND11 ${PYBIND11_URL})
diff --git a/infra/cmake/packages/PytorchSourceConfig.cmake b/infra/cmake/packages/PytorchSourceConfig.cmake
index 0212f2f4b..94757f865 100644
--- a/infra/cmake/packages/PytorchSourceConfig.cmake
+++ b/infra/cmake/packages/PytorchSourceConfig.cmake
@@ -7,7 +7,8 @@ function(_PytorchSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(PYTORCH_URL https://github.com/pytorch/pytorch/archive/v0.4.1.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(PYTORCH_URL ${EXTERNAL_DOWNLOAD_SERVER}/pytorch/pytorch/archive/v0.4.1.tar.gz)
ExternalSource_Download(PYTORCH ${PYTORCH_URL})
diff --git a/infra/cmake/packages/RuySourceConfig.cmake b/infra/cmake/packages/RuySourceConfig.cmake
new file mode 100644
index 000000000..4faf0bb9f
--- /dev/null
+++ b/infra/cmake/packages/RuySourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_RuySource_import)
+ if(NOT ${DOWNLOAD_RUY})
+ set(RuySource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT ${DOWNLOAD_RUY})
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ # NOTE Downloads ruy source used by tensorflow v2.3.0
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(RUY_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/ruy/archive/34ea9f4993955fa1ff4eb58e504421806b7f2e8f.tar.gz)
+ ExternalSource_Download(RUY
+ DIRNAME RUY
+ URL ${RUY_URL})
+
+ set(RuySource_DIR ${RUY_SOURCE_DIR} PARENT_SCOPE)
+ set(RuySource_FOUND TRUE PARENT_SCOPE)
+endfunction(_RuySource_import)
+
+_RuySource_import()
diff --git a/infra/cmake/packages/TensorFlow-1.13/TensorFlowConfig.cmake b/infra/cmake/packages/TensorFlow-1.13/TensorFlowConfig.cmake
new file mode 100644
index 000000000..8fedc9537
--- /dev/null
+++ b/infra/cmake/packages/TensorFlow-1.13/TensorFlowConfig.cmake
@@ -0,0 +1,56 @@
+set(TENSORFLOW_PREFIX "/usr" CACHE PATH "The location of pre-installed TensorFlow 1.13 library")
+set(TENSORFLOW_VERSION_REQUIRED "1.13")
+
+# TODO Build TensorFlow from the (downloaded) source
+
+function(_TensorFlow_import)
+ # Clean cache
+ unset(TensorFlow_LIB CACHE)
+ unset(TensorFlow_INCLUDE_DIR CACHE)
+ # Find the header & lib
+ find_library(TensorFlow_LIB NAMES tensorflow PATHS "${TENSORFLOW_PREFIX}/lib")
+ find_path(TensorFlow_INCLUDE_DIR NAMES tensorflow/c/c_api.h PATHS "${TENSORFLOW_PREFIX}/include")
+
+ if(NOT TensorFlow_LIB OR NOT TensorFlow_INCLUDE_DIR)
+ message(STATUS "Found TensorFlow: FALSE")
+
+ set(TensorFlow_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT TensorFlow_LIB OR NOT TensorFlow_INCLUDE_DIR)
+
+ # Check TensorFlow version
+ try_run(RUN_RESULT_VAR COMPILE_RESULT_VAR
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_CURRENT_LIST_DIR}/TensorFlowVersionChecker.c
+ COMPILE_DEFINITIONS -I${TensorFlow_INCLUDE_DIR}
+ LINK_LIBRARIES ${TensorFlow_LIB}
+ ARGS ${TENSORFLOW_VERSION_REQUIRED})
+
+ if(NOT COMPILE_RESULT_VAR)
+ message(STATUS "Failed to build TensorFlowVersionChecker. Your libtensorflow may be built on different version of Ubuntu.")
+ message(STATUS "Found TensorFlow: FALSE")
+ set(TensorFlow_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT COMPILE_RESULT_VAR)
+
+ if(NOT RUN_RESULT_VAR EQUAL 0)
+ message(STATUS "you need tensorflow version ${TENSORFLOW_VERSION_REQUIRED}")
+ message(STATUS "Found TensorFlow: FALSE")
+ set(TensorFlow_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT RUN_RESULT_VAR EQUAL 0)
+
+ # Add tensorflow target (if necessary)
+ if(NOT TARGET tensorflow-1.13)
+ message(STATUS "Found TensorFlow (include: ${TensorFlow_INCLUDE_DIR}, library: ${TensorFlow_LIB})")
+
+ # NOTE IMPORTED target may be more appropriate for this case
+ add_library(tensorflow-1.13 INTERFACE)
+ target_link_libraries(tensorflow-1.13 INTERFACE ${TensorFlow_LIB})
+ target_include_directories(tensorflow-1.13 INTERFACE ${TensorFlow_INCLUDE_DIR})
+ endif(NOT TARGET tensorflow-1.13)
+
+ set(TensorFlow_FOUND TRUE PARENT_SCOPE)
+endfunction(_TensorFlow_import)
+
+_TensorFlow_import()
diff --git a/infra/cmake/packages/TensorFlow-1.13/TensorFlowConfigVersion.cmake b/infra/cmake/packages/TensorFlow-1.13/TensorFlowConfigVersion.cmake
new file mode 100644
index 000000000..b5a37ddba
--- /dev/null
+++ b/infra/cmake/packages/TensorFlow-1.13/TensorFlowConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "1.13")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/TensorFlow-1.13/TensorFlowVersionChecker.c b/infra/cmake/packages/TensorFlow-1.13/TensorFlowVersionChecker.c
new file mode 100644
index 000000000..fcd6be122
--- /dev/null
+++ b/infra/cmake/packages/TensorFlow-1.13/TensorFlowVersionChecker.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+#include <tensorflow/c/c_api.h>
+
+int main(int argc, char **argv)
+{
+ if (argc >= 2 && !strncmp(argv[1], TF_Version(), 4))
+ return 0;
+ return 255;
+}
diff --git a/infra/cmake/packages/TensorFlowEigenSource-2.1.0/TensorFlowEigenSourceConfig.cmake b/infra/cmake/packages/TensorFlowEigenSource-2.1.0/TensorFlowEigenSourceConfig.cmake
index f84675596..8120ebca2 100644
--- a/infra/cmake/packages/TensorFlowEigenSource-2.1.0/TensorFlowEigenSourceConfig.cmake
+++ b/infra/cmake/packages/TensorFlowEigenSource-2.1.0/TensorFlowEigenSourceConfig.cmake
@@ -9,7 +9,8 @@ function(_TensorFlowEigenSource_import)
# Exact version used by TensorFlow v2.1.0.
# See tensorflow/tensorflow/workspace.bzl.
- envoption(TENSORFLOW_2_1_0_EIGEN_URL https://gitlab.com/libeigen/eigen/-/archive/4e696901f873a2347f76d931cf2f701e31e15d05/eigen-4e696901f873a2347f76d931cf2f701e31e15d05.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://gitlab.com")
+ envoption(TENSORFLOW_2_1_0_EIGEN_URL ${EXTERNAL_DOWNLOAD_SERVER}/libeigen/eigen/-/archive/4e696901f873a2347f76d931cf2f701e31e15d05/eigen-4e696901f873a2347f76d931cf2f701e31e15d05.tar.gz)
ExternalSource_Download(EIGEN DIRNAME TENSORFLOW-2.1.0-EIGEN ${TENSORFLOW_2_1_0_EIGEN_URL})
diff --git a/infra/cmake/packages/TensorFlowEigenSource-2.3.0-rc0Config.cmake b/infra/cmake/packages/TensorFlowEigenSource-2.6.0/TensorFlowEigenSourceConfig.cmake
index 207f7b5bd..a9ec75d34 100644
--- a/infra/cmake/packages/TensorFlowEigenSource-2.3.0-rc0Config.cmake
+++ b/infra/cmake/packages/TensorFlowEigenSource-2.6.0/TensorFlowEigenSourceConfig.cmake
@@ -7,14 +7,12 @@ function(_TensorFlowEigenSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- # NOTE TensorFlow 2.3.0-rc0 uses the following URL
+ # Exact version used by TensorFlow v2.6.0.
+ # See tensorflow/third_party/eigen3/workspace.bzl.
envoption(EXTERNAL_DOWNLOAD_SERVER "https://gitlab.com")
- envoption(TENSORFLOW_2_3_0_EIGEN_URL ${EXTERNAL_DOWNLOAD_SERVER}/libeigen/eigen/-/archive/386d809bde475c65b7940f290efe80e6a05878c4/eigen-386d809bde475c65b7940f290efe80e6a05878c4.tar.gz)
+ envoption(TENSORFLOW_2_6_0_EIGEN_URL ${EXTERNAL_DOWNLOAD_SERVER}/libeigen/eigen/-/archive/12e8d57108c50d8a63605c6eb0144c838c128337/eigen-12e8d57108c50d8a63605c6eb0144c838c128337.tar.gz)
- ExternalSource_Download(EIGEN
- DIRNAME TENSORFLOW-2.3.0-EIGEN
- URL ${TENSORFLOW_2_3_0_EIGEN_URL}
-)
+ ExternalSource_Download(EIGEN DIRNAME TENSORFLOW-2.6.0-EIGEN ${TENSORFLOW_2_6_0_EIGEN_URL})
set(TensorFlowEigenSource_DIR ${EIGEN_SOURCE_DIR} PARENT_SCOPE)
set(TensorFlowEigenSource_FOUND TRUE PARENT_SCOPE)
diff --git a/infra/cmake/packages/TensorFlowEigenSource-2.6.0/TensorFlowEigenSourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowEigenSource-2.6.0/TensorFlowEigenSourceConfigVersion.cmake
new file mode 100644
index 000000000..38ad0aa31
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowEigenSource-2.6.0/TensorFlowEigenSourceConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "2.6.0")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/TensorFlowEigenSource-2.8.0/TensorFlowEigenSourceConfig.cmake b/infra/cmake/packages/TensorFlowEigenSource-2.8.0/TensorFlowEigenSourceConfig.cmake
new file mode 100644
index 000000000..6f59f0771
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowEigenSource-2.8.0/TensorFlowEigenSourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_TensorFlowEigenSource_import)
+ if(NOT DOWNLOAD_EIGEN)
+ set(TensorFlowEigenSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_EIGEN)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ # Exact version used by TensorFlow v2.8.0.
+ # See tensorflow/third_party/eigen3/workspace.bzl.
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://gitlab.com")
+ envoption(TENSORFLOW_2_8_0_EIGEN_URL ${EXTERNAL_DOWNLOAD_SERVER}/libeigen/eigen/-/archive/008ff3483a8c5604639e1c4d204eae30ad737af6/eigen-e1dd31ce174c3d26fbe38388f64b09d2adbd7557a59e90e6f545a288cc1755fc.tar.gz)
+
+ ExternalSource_Download(EIGEN DIRNAME TENSORFLOW-2.8.0-EIGEN ${TENSORFLOW_2_8_0_EIGEN_URL})
+
+ set(TensorFlowEigenSource_DIR ${EIGEN_SOURCE_DIR} PARENT_SCOPE)
+ set(TensorFlowEigenSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_TensorFlowEigenSource_import)
+
+_TensorFlowEigenSource_import()
diff --git a/infra/cmake/packages/TensorFlowEigenSource-2.8.0/TensorFlowEigenSourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowEigenSource-2.8.0/TensorFlowEigenSourceConfigVersion.cmake
new file mode 100644
index 000000000..2ad2e241e
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowEigenSource-2.8.0/TensorFlowEigenSourceConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "2.8.0")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/TensorFlowGEMMLowpSource-2.1.0/TensorFlowGEMMLowpSourceConfig.cmake b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.1.0/TensorFlowGEMMLowpSourceConfig.cmake
index 035264fa9..421be6c66 100644
--- a/infra/cmake/packages/TensorFlowGEMMLowpSource-2.1.0/TensorFlowGEMMLowpSourceConfig.cmake
+++ b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.1.0/TensorFlowGEMMLowpSourceConfig.cmake
@@ -9,7 +9,8 @@ function(_TensorFlowGEMMLowpSource_import)
# Exact version used by TensorFlow v2.1.0.
# See tensorflow/tensorflow/workspace.bzl.
- envoption(TENSORFLOW_2_1_0_GEMMLOWP_URL https://github.com/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_1_0_GEMMLOWP_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/gemmlowp/archive/12fed0cd7cfcd9e169bf1925bc3a7a58725fdcc3.zip)
ExternalSource_Download(GEMMLOWP DIRNAME TENSORFLOW-2.1.0-GEMMLOWP ${TENSORFLOW_2_1_0_GEMMLOWP_URL})
diff --git a/infra/cmake/packages/TensorFlowGEMMLowpSource-2.3.0/TensorFlowGEMMLowpSourceConfig.cmake b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.3.0/TensorFlowGEMMLowpSourceConfig.cmake
index bc13d6227..44c56a6be 100644
--- a/infra/cmake/packages/TensorFlowGEMMLowpSource-2.3.0/TensorFlowGEMMLowpSourceConfig.cmake
+++ b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.3.0/TensorFlowGEMMLowpSourceConfig.cmake
@@ -9,7 +9,8 @@ function(_TensorFlowGEMMLowpSource_import)
# Exact version used by TensorFlow v2.3.0.
# See tensorflow/tensorflow/workspace.bzl.
- envoption(TENSORFLOW_2_3_0_GEMMLOWP_URL https://github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_3_0_GEMMLOWP_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip)
ExternalSource_Download(GEMMLOWP DIRNAME TENSORFLOW-2.3.0-GEMMLOWP ${TENSORFLOW_2_3_0_GEMMLOWP_URL})
diff --git a/infra/cmake/packages/TensorFlowGEMMLowpSource-2.6.0/TensorFlowGEMMLowpSourceConfig.cmake b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.6.0/TensorFlowGEMMLowpSourceConfig.cmake
new file mode 100644
index 000000000..76cdfdd6c
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.6.0/TensorFlowGEMMLowpSourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_TensorFlowGEMMLowpSource_import)
+ if(NOT DOWNLOAD_GEMMLOWP)
+ set(TensorFlowGEMMLowpSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_GEMMLOWP)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ # Exact version used by TensorFlow v2.6.0.
+ # See tensorflow/third_party/gemmlowp/workspace.bzl.
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_6_0_GEMMLOWP_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip)
+
+ ExternalSource_Download(GEMMLOWP DIRNAME TENSORFLOW-2.6.0-GEMMLOWP ${TENSORFLOW_2_6_0_GEMMLOWP_URL})
+
+ set(TensorFlowGEMMLowpSource_DIR ${GEMMLOWP_SOURCE_DIR} PARENT_SCOPE)
+ set(TensorFlowGEMMLowpSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_TensorFlowGEMMLowpSource_import)
+
+_TensorFlowGEMMLowpSource_import()
diff --git a/infra/cmake/packages/TensorFlowGEMMLowpSource-2.6.0/TensorFlowGEMMLowpSourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.6.0/TensorFlowGEMMLowpSourceConfigVersion.cmake
new file mode 100644
index 000000000..38ad0aa31
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.6.0/TensorFlowGEMMLowpSourceConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "2.6.0")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/TensorFlowGEMMLowpSource-2.8.0/TensorFlowGEMMLowpSourceConfig.cmake b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.8.0/TensorFlowGEMMLowpSourceConfig.cmake
new file mode 100644
index 000000000..3e17490c3
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.8.0/TensorFlowGEMMLowpSourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_TensorFlowGEMMLowpSource_import)
+ if(NOT DOWNLOAD_GEMMLOWP)
+ set(TensorFlowGEMMLowpSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_GEMMLOWP)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ # Exact version used by TensorFlow v2.8.0.
+ # See tensorflow/third_party/gemmlowp/workspace.bzl.
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_8_0_GEMMLOWP_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip)
+
+ ExternalSource_Download(GEMMLOWP DIRNAME TENSORFLOW-2.8.0-GEMMLOWP ${TENSORFLOW_2_8_0_GEMMLOWP_URL})
+
+ set(TensorFlowGEMMLowpSource_DIR ${GEMMLOWP_SOURCE_DIR} PARENT_SCOPE)
+ set(TensorFlowGEMMLowpSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_TensorFlowGEMMLowpSource_import)
+
+_TensorFlowGEMMLowpSource_import()
diff --git a/infra/cmake/packages/TensorFlowGEMMLowpSource-2.8.0/TensorFlowGEMMLowpSourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.8.0/TensorFlowGEMMLowpSourceConfigVersion.cmake
new file mode 100644
index 000000000..2ad2e241e
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowGEMMLowpSource-2.8.0/TensorFlowGEMMLowpSourceConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "2.8.0")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/TensorFlowLite-1.12/Lite/CMakeLists.txt b/infra/cmake/packages/TensorFlowLite-1.12/Lite/CMakeLists.txt
deleted file mode 100644
index 337d6b24f..000000000
--- a/infra/cmake/packages/TensorFlowLite-1.12/Lite/CMakeLists.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-# NOTE The followings SHOULD be defined before using this CMakeLists.txt
-#
-# 'TensorFlowSource_DIR' variable
-# 'FlatBuffersSource_DIR' variable
-# 'eigen' target
-# 'gemmlowp' target
-# 'neon2sse' target
-# 'farmhash' target
-# 'abseil' target
-#
-message(STATUS "Build TensorFlow Lite from ${TensorFlowSource_DIR}")
-
-set(TensorFlowLiteSource_DIR ${TensorFlowSource_DIR}/tensorflow/contrib/lite)
-
-file(GLOB CORE_SRCS "${TensorFlowLiteSource_DIR}/*.c" "${TensorFlowLiteSource_DIR}/*.cc" "${TensorFlowLiteSource_DIR}/c/*.c" "${TensorFlowLiteSource_DIR}/core/api/*.cc")
-file(GLOB_RECURSE CORE_TESTS "${TensorFlowLiteSource_DIR}/*test*.cc")
-list(REMOVE_ITEM CORE_SRCS ${CORE_TESTS})
-
-file(GLOB_RECURSE KERNEL_SRCS "${TensorFlowLiteSource_DIR}/kernels/*.cc")
-file(GLOB_RECURSE KERNEL_TESTS "${TensorFlowLiteSource_DIR}/kernels/*test*.cc")
-list(REMOVE_ITEM KERNEL_SRCS ${KERNEL_TESTS})
-# Exclude buggy kernel(s) from the build
-#list(REMOVE_ITEM KERNEL_SRCS "${TensorFlowLiteSource_DIR}/kernels/internal/spectrogram.cc")
-
-list(APPEND SRCS ${CORE_SRCS})
-list(APPEND SRCS ${KERNEL_SRCS})
-
-include(CheckCXXCompilerFlag)
-
-CHECK_CXX_COMPILER_FLAG(-Wno-extern-c-compat COMPILER_SUPPORT_EXTERN_C_COMPAT_WARNING)
-
-add_library(tensorflowlite-1.12 ${SRCS})
-set_target_properties(tensorflowlite-1.12 PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(tensorflowlite-1.12 PUBLIC ${TensorFlowSource_DIR})
-target_include_directories(tensorflowlite-1.12 PUBLIC ${FlatBuffersSource_DIR}/include)
-target_compile_options(tensorflowlite-1.12 PUBLIC -Wno-ignored-attributes)
-if(COMPILER_SUPPORT_EXTERN_C_COMPAT_WARNING)
- target_compile_options(tensorflowlite-1.12 PUBLIC -Wno-extern-c-compat)
-endif(COMPILER_SUPPORT_EXTERN_C_COMPAT_WARNING)
-target_compile_definitions(tensorflowlite-1.12 PUBLIC "GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK")
-target_link_libraries(tensorflowlite-1.12 eigen-fd6845384b86 gemmlowp neon2sse farmhash abseil dl)
diff --git a/infra/cmake/packages/TensorFlowLite-1.12/TensorFlowLiteConfig.cmake b/infra/cmake/packages/TensorFlowLite-1.12/TensorFlowLiteConfig.cmake
deleted file mode 100644
index ff15d8576..000000000
--- a/infra/cmake/packages/TensorFlowLite-1.12/TensorFlowLiteConfig.cmake
+++ /dev/null
@@ -1,62 +0,0 @@
-function(_TensorFlowLite_import)
- nnas_find_package(TensorFlowSource EXACT 1.12 QUIET)
-
- if(NOT TensorFlowSource_FOUND)
- set(TensorFlowLite_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT TensorFlowSource_FOUND)
-
- # TensorFlow 1.12 downloads FlatBuffers from https://github.com/google/flatbuffers/archive/1f5eae5d6a1.tar.gz
- #
- # Let's use 1.10 released in 2018.10 (compatible with 1f5eae5d6a1).
- nnas_find_package(FlatBuffersSource EXACT 1.10 QUIET)
-
- if(NOT FlatBuffersSource_FOUND)
- set(TensorFlowLite_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT FlatBuffersSource_FOUND)
-
- nnas_find_package(Farmhash QUIET)
-
- if(NOT Farmhash_FOUND)
- set(TensorFlowLite_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT Farmhash_FOUND)
-
- nnas_find_package(Eigen-fd6845384b86 QUIET)
-
- if(NOT Eigen-fd6845384b86_FOUND)
- set(TensorFlowLite_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT Eigen-fd6845384b86_FOUND)
-
- nnas_find_package(GEMMLowp QUIET)
-
- if(NOT GEMMLowp_FOUND)
- set(TensorFlowLite_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT GEMMLowp_FOUND)
-
- nnas_find_package(NEON2SSE QUIET)
-
- if(NOT NEON2SSE_FOUND)
- set(TensorFlowLite_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT NEON2SSE_FOUND)
-
- nnas_find_package(Abseil QUIET)
-
- if(NOT Abseil_FOUND)
- set(TensorFlowLite_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT Abseil_FOUND)
-
- if(NOT TARGET tensorflowlite-1.12)
- nnas_include(ExternalProjectTools)
- add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/Lite" tflite-1.12)
- endif(NOT TARGET tensorflowlite-1.12)
-
- set(TensorFlowLite_FOUND TRUE PARENT_SCOPE)
-endfunction(_TensorFlowLite_import)
-
-_TensorFlowLite_import()
diff --git a/infra/cmake/packages/TensorFlowLite-1.13.1/Lite/CMakeLists.txt b/infra/cmake/packages/TensorFlowLite-1.13.1/Lite/CMakeLists.txt
index c35617497..a57d7f4cb 100644
--- a/infra/cmake/packages/TensorFlowLite-1.13.1/Lite/CMakeLists.txt
+++ b/infra/cmake/packages/TensorFlowLite-1.13.1/Lite/CMakeLists.txt
@@ -1,7 +1,9 @@
# NOTE The followings SHOULD be defined before using this CMakeLists.txt
+# NOTE TensorFlow 1.13.1 uses flatbuffers-1.10
+# but we use flatbuffers-2.0 to match with all other modules flatbuffers version.
#
# 'TensorFlowSource_DIR' variable
-# 'FlatBuffersSource_DIR' variable
+# 'flatbuffers-2.0' target
# 'eigen' target
# 'gemmlowp' target
# 'neon2sse' target
@@ -37,10 +39,9 @@ CHECK_CXX_COMPILER_FLAG(-Wno-extern-c-compat COMPILER_SUPPORT_EXTERN_C_COMPAT_WA
add_library(tensorflowlite-1.13.1 ${SRCS})
set_target_properties(tensorflowlite-1.13.1 PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_include_directories(tensorflowlite-1.13.1 PUBLIC ${TensorFlowSource_DIR})
-target_include_directories(tensorflowlite-1.13.1 PUBLIC ${FlatBuffersSource_DIR}/include)
target_compile_options(tensorflowlite-1.13.1 PUBLIC -Wno-ignored-attributes)
if(COMPILER_SUPPORT_EXTERN_C_COMPAT_WARNING)
target_compile_options(tensorflowlite-1.13.1 PUBLIC -Wno-extern-c-compat)
endif(COMPILER_SUPPORT_EXTERN_C_COMPAT_WARNING)
target_compile_definitions(tensorflowlite-1.13.1 PUBLIC "GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK")
-target_link_libraries(tensorflowlite-1.13.1 eigen gemmlowp neon2sse farmhash abseil dl)
+target_link_libraries(tensorflowlite-1.13.1 flatbuffers-2.0 eigen gemmlowp neon2sse farmhash abseil dl)
diff --git a/infra/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfig.cmake b/infra/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfig.cmake
index 2c6bd9f7a..ea2065850 100644
--- a/infra/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfig.cmake
+++ b/infra/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfig.cmake
@@ -6,12 +6,12 @@ function(_TensorFlowLite_import)
return()
endif(NOT TensorFlowSource_FOUND)
- nnas_find_package(FlatBuffersSource EXACT 1.10 QUIET)
+ nnas_find_package(FlatBuffers EXACT 2.0 QUIET)
- if(NOT FlatBuffersSource_FOUND)
+ if(NOT FlatBuffers_FOUND)
set(TensorFlowLite_FOUND FALSE PARENT_SCOPE)
return()
- endif(NOT FlatBuffersSource_FOUND)
+ endif(NOT FlatBuffers_FOUND)
nnas_find_package(Farmhash QUIET)
diff --git a/infra/cmake/packages/TensorFlowProtoText-1.12/TensorFlowProtoTextConfig.cmake b/infra/cmake/packages/TensorFlowProtoText-1.12/TensorFlowProtoTextConfig.cmake
deleted file mode 100644
index 9c7c79679..000000000
--- a/infra/cmake/packages/TensorFlowProtoText-1.12/TensorFlowProtoTextConfig.cmake
+++ /dev/null
@@ -1,104 +0,0 @@
-function(_TensorFlowProtoText_import)
- macro(require_package PKGNAME)
- nnas_find_package(${PKGNAME} ${ARGN} QUIET)
- if(NOT ${PKGNAME}_FOUND)
- message(STATUS "Found TensorFlowProtoText: FALSE (${PKGNAME} is missing)")
- set(TensorFlowProtoText_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT ${PKGNAME}_FOUND)
- endmacro(require_package)
-
- require_package(TensorFlowSource EXACT 1.12)
- require_package(Abseil)
- require_package(Eigen-fd6845384b86)
- require_package(Protobuf)
- require_package(GoogleDoubleConversion)
- require_package(GoogleNSync)
-
- if(NOT TARGET tensorflow-prototext-1.12)
- nnas_include(ExternalProjectTools)
- add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/build" TensorFlowProtoText-1.12)
- endif(NOT TARGET tensorflow-prototext-1.12)
-
- set(TensorFlowProtoText_FOUND TRUE PARENT_SCOPE)
-endfunction(_TensorFlowProtoText_import)
-
-_TensorFlowProtoText_import()
-
-if(TensorFlowProtoText_FOUND)
- # CMAKE_CURRENT_LIST_DIR
- #
- # ... The value has dynamic scope. ... Therefore the value of the variable inside a macro
- # or function is the directory of the file invoking the bottom-most entry on the call stack,
- # not the directory of the file containing the macro or function definition.
- #
- # Reference: https://cmake.org/cmake/help/v3.1/variable/CMAKE_CURRENT_LIST_DIR.html
- set(TENSORLFLOW_PROTO_TEXT_1_12_CMAKE_DIR
- "${CMAKE_CURRENT_LIST_DIR}" CACHE INTERNAL
- "Where to find make_directories"
- )
-
- # Comments from "gen_proto_text_functions.cc"
- # >
- # > Main program to take input protos and write output pb_text source files that
- # > contain generated proto text input and output functions.
- # >
- # > Main expects:
- # > - First argument is output path
- # > - Second argument is the relative path of the protos to the root. E.g.,
- # > for protos built by a rule in tensorflow/core, this will be
- # > tensorflow/core.
- # > - Then any number of source proto file names, plus one source name must be
- # > placeholder.txt from this gen tool's package. placeholder.txt is
- # > ignored for proto resolution, but is used to determine the root at which
- # > the build tool has placed the source proto files.
- # >
- function(ProtoText_Generate PREFIX OUTPUT_DIR)
- # THIS SHOULD SUCCEED!
- nnas_find_package(TensorFlowSource EXACT 1.12 REQUIRED)
-
- set(OUTPUT_REL "tensorflow")
- set(PROTO_DIR "${TensorFlowSource_DIR}")
-
- set(PROTO_INPUTS ${ARGN})
- list(APPEND PROTO_INPUTS "tensorflow/tools/proto_text/placeholder.txt")
-
- get_filename_component(abs_output_dir ${OUTPUT_DIR} ABSOLUTE)
- get_filename_component(abs_proto_dir ${TensorFlowSource_DIR} ABSOLUTE)
-
- # Let's reset variables before using them
- # NOTE This DOES NOT AFFECT variables in the parent scope
- unset(PROTO_FILES)
- unset(OUTPUT_DIRS)
- unset(OUTPUT_FILES)
-
- foreach(proto ${PROTO_INPUTS})
- get_filename_component(fil "${proto}" NAME)
- get_filename_component(dir "${proto}" DIRECTORY)
-
- get_filename_component(fil_we "${fil}" NAME_WE)
-
- get_filename_component(abs_fil "${abs_proto_base}/${proto}" ABSOLUTE)
- get_filename_component(abs_dir "${abs_fil}" DIRECTORY)
-
- list(APPEND PROTO_FILES "${abs_proto_dir}/${proto}")
-
- if(NOT ${fil} STREQUAL "placeholder.txt")
- list(APPEND OUTPUT_DIRS "${abs_output_dir}/${dir}")
- list(APPEND OUTPUT_FILES "${abs_output_dir}/${dir}/${fil_we}.pb_text.h")
- list(APPEND OUTPUT_FILES "${abs_output_dir}/${dir}/${fil_we}.pb_text-impl.h")
- list(APPEND OUTPUT_FILES "${abs_output_dir}/${dir}/${fil_we}.pb_text.cc")
- endif(NOT ${fil} STREQUAL "placeholder.txt")
- endforeach()
-
- add_custom_command(OUTPUT ${OUTPUT_FILES}
- # "make_directory" in CMake 3.1 cannot create multiple directories at once.
- # COMMAND ${CMAKE_COMMAND} -E make_directory ${OUTPUT_DIRS}
- COMMAND "${TENSORLFLOW_PROTO_TEXT_1_12_CMAKE_DIR}/make_directories.sh" ${OUTPUT_DIRS}
- COMMAND "$<TARGET_FILE:tensorflow-prototext-1.12>" "${abs_output_dir}/${OUTPUT_REL}" "${OUTPUT_REL}" ${PROTO_FILES}
- DEPENDS ${PROTO_FILES})
-
- set(${PREFIX}_SOURCES ${OUTPUT_FILES} PARENT_SCOPE)
- set(${PREFIX}_INCLUDE_DIRS ${abs_output_dir} PARENT_SCOPE)
- endfunction(ProtoText_Generate)
-endif(TensorFlowProtoText_FOUND)
diff --git a/infra/cmake/packages/TensorFlowProtoText-1.12/build/CMakeLists.txt b/infra/cmake/packages/TensorFlowProtoText-1.12/build/CMakeLists.txt
deleted file mode 100644
index ac8e43b7a..000000000
--- a/infra/cmake/packages/TensorFlowProtoText-1.12/build/CMakeLists.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-message(STATUS "Build TensorFlowProtoText from '${TensorFlowSource_DIR}'")
-
-#
-# Build "proto_text" tool
-#
-unset(SOURCE_FILES)
-
-macro(Source_Add RPATH)
- list(APPEND SOURCE_FILES "${TensorFlowSource_DIR}/${RPATH}")
-endmacro(Source_Add)
-
-# This list comes from "tensorflow/contrib/makefile/proto_text_cc_files.txt"
-Source_Add(tensorflow/core/lib/core/status.cc)
-Source_Add(tensorflow/core/lib/core/threadpool.cc)
-Source_Add(tensorflow/core/lib/hash/hash.cc)
-Source_Add(tensorflow/core/lib/io/inputstream_interface.cc)
-Source_Add(tensorflow/core/lib/io/random_inputstream.cc)
-Source_Add(tensorflow/core/lib/io/buffered_inputstream.cc)
-Source_Add(tensorflow/core/lib/io/inputbuffer.cc)
-Source_Add(tensorflow/core/lib/io/iterator.cc)
-Source_Add(tensorflow/core/lib/io/path.cc)
-Source_Add(tensorflow/core/lib/strings/numbers.cc)
-Source_Add(tensorflow/core/lib/strings/scanner.cc)
-Source_Add(tensorflow/core/lib/strings/str_util.cc)
-Source_Add(tensorflow/core/lib/strings/strcat.cc)
-Source_Add(tensorflow/core/lib/strings/stringprintf.cc)
-Source_Add(tensorflow/core/lib/strings/proto_text_util.cc)
-Source_Add(tensorflow/core/platform/cpu_info.cc)
-Source_Add(tensorflow/core/platform/denormal.cc)
-Source_Add(tensorflow/core/platform/env.cc)
-Source_Add(tensorflow/core/platform/env_time.cc)
-Source_Add(tensorflow/core/platform/file_system.cc)
-Source_Add(tensorflow/core/platform/file_system_helper.cc)
-Source_Add(tensorflow/core/platform/protobuf_util.cc)
-Source_Add(tensorflow/core/platform/setround.cc)
-Source_Add(tensorflow/core/platform/tracing.cc)
-Source_Add(tensorflow/core/platform/posix/env.cc)
-Source_Add(tensorflow/core/platform/posix/env_time.cc)
-Source_Add(tensorflow/core/platform/posix/error.cc)
-Source_Add(tensorflow/core/platform/posix/load_library.cc)
-Source_Add(tensorflow/core/platform/posix/port.cc)
-Source_Add(tensorflow/core/platform/posix/posix_file_system.cc)
-Source_Add(tensorflow/core/platform/default/logging.cc)
-Source_Add(tensorflow/core/platform/default/mutex.cc)
-Source_Add(tensorflow/core/platform/default/protobuf.cc)
-
-Source_Add(tensorflow/tools/proto_text/gen_proto_text_functions_lib.cc)
-Source_Add(tensorflow/tools/proto_text/gen_proto_text_functions.cc)
-
-unset(PROTO_FILES)
-
-macro(Proto_Add RPATH)
- list(APPEND PROTO_FILES "${RPATH}")
-endmacro(Proto_Add)
-
-Proto_Add(tensorflow/core/lib/core/error_codes.proto)
-Proto_Add(tensorflow/core/framework/types.proto)
-Proto_Add(tensorflow/core/framework/tensor.proto)
-Proto_Add(tensorflow/core/framework/tensor_shape.proto)
-Proto_Add(tensorflow/core/framework/summary.proto)
-Proto_Add(tensorflow/core/framework/resource_handle.proto)
-
-Protobuf_Generate(PROTO_TEXT_PROTO
- "${CMAKE_CURRENT_BINARY_DIR}/generated/proto_text"
- "${TensorFlowSource_DIR}"
- ${PROTO_FILES}
-)
-
-add_executable(tensorflow-prototext-1.12 ${SOURCE_FILES} ${PROTO_TEXT_PROTO_SOURCES})
-target_include_directories(tensorflow-prototext-1.12 PRIVATE ${TensorFlowSource_DIR})
-target_include_directories(tensorflow-prototext-1.12 PRIVATE ${PROTO_TEXT_PROTO_INCLUDE_DIRS})
-
-target_link_libraries(tensorflow-prototext-1.12 PRIVATE abseil)
-target_link_libraries(tensorflow-prototext-1.12 PRIVATE eigen-fd6845384b86)
-target_link_libraries(tensorflow-prototext-1.12 PRIVATE ${PROTO_TEXT_PROTO_LIBRARIES})
-target_link_libraries(tensorflow-prototext-1.12 PRIVATE Google::DoubleConversion)
-target_link_libraries(tensorflow-prototext-1.12 PRIVATE Google::NSync)
-target_link_libraries(tensorflow-prototext-1.12 PRIVATE dl)
diff --git a/infra/cmake/packages/TensorFlowProtoText-1.12/make_directories.sh b/infra/cmake/packages/TensorFlowProtoText-1.12/make_directories.sh
deleted file mode 100755
index 1fb2ab683..000000000
--- a/infra/cmake/packages/TensorFlowProtoText-1.12/make_directories.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-while [[ $# -ne 0 ]]; do
- DIR=$1; shift
- mkdir -p "${DIR}"
-done
diff --git a/infra/cmake/packages/TensorFlowProtoText-1.13.1/TensorFlowProtoTextConfig.cmake b/infra/cmake/packages/TensorFlowProtoText-1.13.1/TensorFlowProtoTextConfig.cmake
deleted file mode 100644
index 738b28240..000000000
--- a/infra/cmake/packages/TensorFlowProtoText-1.13.1/TensorFlowProtoTextConfig.cmake
+++ /dev/null
@@ -1,104 +0,0 @@
-function(_TensorFlowProtoText_import)
- macro(require_package PKGNAME)
- nnas_find_package(${PKGNAME} ${ARGN} QUIET)
- if(NOT ${PKGNAME}_FOUND)
- message(STATUS "Found TensorFlowProtoText: FALSE (${PKGNAME} is missing)")
- set(TensorFlowProtoText_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT ${PKGNAME}_FOUND)
- endmacro(require_package)
-
- require_package(TensorFlowSource EXACT 1.13.1)
- require_package(Abseil)
- require_package(Eigen)
- require_package(Protobuf)
- require_package(GoogleDoubleConversion)
- require_package(GoogleNSync)
-
- if(NOT TARGET tensorflow-prototext-1.13.1)
- nnas_include(ExternalProjectTools)
- add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/build" TensorFlowProtoText-1.13.1)
- endif(NOT TARGET tensorflow-prototext-1.13.1)
-
- set(TensorFlowProtoText_FOUND TRUE PARENT_SCOPE)
-endfunction(_TensorFlowProtoText_import)
-
-_TensorFlowProtoText_import()
-
-if(TensorFlowProtoText_FOUND)
- # CMAKE_CURRENT_LIST_DIR
- #
- # ... The value has dynamic scope. ... Therefore the value of the variable inside a macro
- # or function is the directory of the file invoking the bottom-most entry on the call stack,
- # not the directory of the file containing the macro or function definition.
- #
- # Reference: https://cmake.org/cmake/help/v3.1/variable/CMAKE_CURRENT_LIST_DIR.html
- set(TENSORLFLOW_PROTO_TEXT_1_13_1_CMAKE_DIR
- "${CMAKE_CURRENT_LIST_DIR}" CACHE INTERNAL
- "Where to find make_directories"
- )
-
- # Comments from "gen_proto_text_functions.cc"
- # >
- # > Main program to take input protos and write output pb_text source files that
- # > contain generated proto text input and output functions.
- # >
- # > Main expects:
- # > - First argument is output path
- # > - Second argument is the relative path of the protos to the root. E.g.,
- # > for protos built by a rule in tensorflow/core, this will be
- # > tensorflow/core.
- # > - Then any number of source proto file names, plus one source name must be
- # > placeholder.txt from this gen tool's package. placeholder.txt is
- # > ignored for proto resolution, but is used to determine the root at which
- # > the build tool has placed the source proto files.
- # >
- function(ProtoText_Generate PREFIX OUTPUT_DIR)
- # THIS SHOULD SUCCEED!
- nnas_find_package(TensorFlowSource EXACT 1.13.1 REQUIRED)
-
- set(OUTPUT_REL "tensorflow")
- set(PROTO_DIR "${TensorFlowSource_DIR}")
-
- set(PROTO_INPUTS ${ARGN})
- list(APPEND PROTO_INPUTS "tensorflow/tools/proto_text/placeholder.txt")
-
- get_filename_component(abs_output_dir ${OUTPUT_DIR} ABSOLUTE)
- get_filename_component(abs_proto_dir ${TensorFlowSource_DIR} ABSOLUTE)
-
- # Let's reset variables before using them
- # NOTE This DOES NOT AFFECT variables in the parent scope
- unset(PROTO_FILES)
- unset(OUTPUT_DIRS)
- unset(OUTPUT_FILES)
-
- foreach(proto ${PROTO_INPUTS})
- get_filename_component(fil "${proto}" NAME)
- get_filename_component(dir "${proto}" DIRECTORY)
-
- get_filename_component(fil_we "${fil}" NAME_WE)
-
- get_filename_component(abs_fil "${abs_proto_base}/${proto}" ABSOLUTE)
- get_filename_component(abs_dir "${abs_fil}" DIRECTORY)
-
- list(APPEND PROTO_FILES "${abs_proto_dir}/${proto}")
-
- if(NOT ${fil} STREQUAL "placeholder.txt")
- list(APPEND OUTPUT_DIRS "${abs_output_dir}/${dir}")
- list(APPEND OUTPUT_FILES "${abs_output_dir}/${dir}/${fil_we}.pb_text.h")
- list(APPEND OUTPUT_FILES "${abs_output_dir}/${dir}/${fil_we}.pb_text-impl.h")
- list(APPEND OUTPUT_FILES "${abs_output_dir}/${dir}/${fil_we}.pb_text.cc")
- endif(NOT ${fil} STREQUAL "placeholder.txt")
- endforeach()
-
- add_custom_command(OUTPUT ${OUTPUT_FILES}
- # "make_directory" in CMake 3.1 cannot create multiple directories at once.
- # COMMAND ${CMAKE_COMMAND} -E make_directory ${OUTPUT_DIRS}
- COMMAND "${TENSORLFLOW_PROTO_TEXT_1_13_1_CMAKE_DIR}/make_directories.sh" ${OUTPUT_DIRS}
- COMMAND "$<TARGET_FILE:tensorflow-prototext-1.13.1>" "${abs_output_dir}/${OUTPUT_REL}" "${OUTPUT_REL}" ${PROTO_FILES}
- DEPENDS ${PROTO_FILES})
-
- set(${PREFIX}_SOURCES ${OUTPUT_FILES} PARENT_SCOPE)
- set(${PREFIX}_INCLUDE_DIRS ${abs_output_dir} PARENT_SCOPE)
- endfunction(ProtoText_Generate)
-endif(TensorFlowProtoText_FOUND)
diff --git a/infra/cmake/packages/TensorFlowProtoText-1.13.1/TensorFlowProtoTextConfigVersion.cmake b/infra/cmake/packages/TensorFlowProtoText-1.13.1/TensorFlowProtoTextConfigVersion.cmake
deleted file mode 100644
index ed79ecd91..000000000
--- a/infra/cmake/packages/TensorFlowProtoText-1.13.1/TensorFlowProtoTextConfigVersion.cmake
+++ /dev/null
@@ -1,9 +0,0 @@
-set(PACKAGE_VERSION "1.13.1")
-set(PACKAGE_VERSION_EXACT FALSE)
-set(PACKAGE_VERSION_COMPATIBLE FALSE)
-set(PACKAGE_VERSION_UNSUITABLE TRUE)
-
-if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
- set(PACKAGE_VERSION_EXACT TRUE)
- set(PACKAGE_VERSION_UNSUITABLE FALSE)
-endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/TensorFlowProtoText-1.13.1/build/CMakeLists.txt b/infra/cmake/packages/TensorFlowProtoText-1.13.1/build/CMakeLists.txt
deleted file mode 100644
index 7079aea03..000000000
--- a/infra/cmake/packages/TensorFlowProtoText-1.13.1/build/CMakeLists.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-message(STATUS "Build TensorFlowProtoText from '${TensorFlowSource_DIR}'")
-
-#
-# Build "proto_text" tool
-#
-unset(SOURCE_FILES)
-
-macro(Source_Add RPATH)
- list(APPEND SOURCE_FILES "${TensorFlowSource_DIR}/${RPATH}")
-endmacro(Source_Add)
-
-# This list comes from "tensorflow/contrib/makefile/proto_text_cc_files.txt"
-Source_Add(tensorflow/core/lib/core/status.cc)
-Source_Add(tensorflow/core/lib/core/threadpool.cc)
-Source_Add(tensorflow/core/lib/hash/hash.cc)
-Source_Add(tensorflow/core/lib/io/inputstream_interface.cc)
-Source_Add(tensorflow/core/lib/io/random_inputstream.cc)
-Source_Add(tensorflow/core/lib/io/buffered_inputstream.cc)
-Source_Add(tensorflow/core/lib/io/inputbuffer.cc)
-Source_Add(tensorflow/core/lib/io/iterator.cc)
-Source_Add(tensorflow/core/lib/io/path.cc)
-Source_Add(tensorflow/core/lib/strings/numbers.cc)
-Source_Add(tensorflow/core/lib/strings/scanner.cc)
-Source_Add(tensorflow/core/lib/strings/str_util.cc)
-Source_Add(tensorflow/core/lib/strings/strcat.cc)
-Source_Add(tensorflow/core/lib/strings/stringprintf.cc)
-Source_Add(tensorflow/core/lib/strings/proto_text_util.cc)
-Source_Add(tensorflow/core/platform/cpu_info.cc)
-Source_Add(tensorflow/core/platform/denormal.cc)
-Source_Add(tensorflow/core/platform/env.cc)
-Source_Add(tensorflow/core/platform/env_time.cc)
-Source_Add(tensorflow/core/platform/file_system.cc)
-Source_Add(tensorflow/core/platform/file_system_helper.cc)
-Source_Add(tensorflow/core/platform/protobuf_util.cc)
-Source_Add(tensorflow/core/platform/setround.cc)
-Source_Add(tensorflow/core/platform/tracing.cc)
-Source_Add(tensorflow/core/platform/posix/env.cc)
-Source_Add(tensorflow/core/platform/posix/env_time.cc)
-Source_Add(tensorflow/core/platform/posix/error.cc)
-Source_Add(tensorflow/core/platform/posix/load_library.cc)
-Source_Add(tensorflow/core/platform/posix/port.cc)
-Source_Add(tensorflow/core/platform/posix/posix_file_system.cc)
-Source_Add(tensorflow/core/platform/default/logging.cc)
-Source_Add(tensorflow/core/platform/default/mutex.cc)
-Source_Add(tensorflow/core/platform/default/protobuf.cc)
-
-Source_Add(tensorflow/tools/proto_text/gen_proto_text_functions_lib.cc)
-Source_Add(tensorflow/tools/proto_text/gen_proto_text_functions.cc)
-
-unset(PROTO_FILES)
-
-macro(Proto_Add RPATH)
- list(APPEND PROTO_FILES "${RPATH}")
-endmacro(Proto_Add)
-
-Proto_Add(tensorflow/core/lib/core/error_codes.proto)
-Proto_Add(tensorflow/core/framework/types.proto)
-Proto_Add(tensorflow/core/framework/tensor.proto)
-Proto_Add(tensorflow/core/framework/tensor_shape.proto)
-Proto_Add(tensorflow/core/framework/summary.proto)
-Proto_Add(tensorflow/core/framework/resource_handle.proto)
-
-Protobuf_Generate(PROTO_TEXT_PROTO
- "${CMAKE_CURRENT_BINARY_DIR}/generated/proto_text"
- "${TensorFlowSource_DIR}"
- ${PROTO_FILES}
-)
-
-add_executable(tensorflow-prototext-1.13.1 ${SOURCE_FILES} ${PROTO_TEXT_PROTO_SOURCES})
-target_include_directories(tensorflow-prototext-1.13.1 PRIVATE ${TensorFlowSource_DIR})
-target_include_directories(tensorflow-prototext-1.13.1 PRIVATE ${PROTO_TEXT_PROTO_INCLUDE_DIRS})
-
-target_link_libraries(tensorflow-prototext-1.13.1 PRIVATE abseil)
-target_link_libraries(tensorflow-prototext-1.13.1 PRIVATE eigen)
-target_link_libraries(tensorflow-prototext-1.13.1 PRIVATE ${PROTO_TEXT_PROTO_LIBRARIES})
-target_link_libraries(tensorflow-prototext-1.13.1 PRIVATE Google::DoubleConversion)
-target_link_libraries(tensorflow-prototext-1.13.1 PRIVATE Google::NSync)
-target_link_libraries(tensorflow-prototext-1.13.1 PRIVATE dl)
diff --git a/infra/cmake/packages/TensorFlowProtoText-1.13.1/make_directories.sh b/infra/cmake/packages/TensorFlowProtoText-1.13.1/make_directories.sh
deleted file mode 100755
index 1fb2ab683..000000000
--- a/infra/cmake/packages/TensorFlowProtoText-1.13.1/make_directories.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-while [[ $# -ne 0 ]]; do
- DIR=$1; shift
- mkdir -p "${DIR}"
-done
diff --git a/infra/cmake/packages/TensorFlowRuySource-2.3.0/TensorFlowRuySourceConfig.cmake b/infra/cmake/packages/TensorFlowRuySource-2.3.0/TensorFlowRuySourceConfig.cmake
index 3dbf05ece..3a7dc893c 100644
--- a/infra/cmake/packages/TensorFlowRuySource-2.3.0/TensorFlowRuySourceConfig.cmake
+++ b/infra/cmake/packages/TensorFlowRuySource-2.3.0/TensorFlowRuySourceConfig.cmake
@@ -9,7 +9,8 @@ function(_TensorFlowRuySource_import)
# Exact version used by TensorFlow v2.3.0.
# See tensorflow/third_party/ruy/workspace.bzl
- envoption(TENSORFLOW_2_3_0_RUY_URL https://github.com/google/ruy/archive/34ea9f4993955fa1ff4eb58e504421806b7f2e8f.zip)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_3_0_RUY_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/ruy/archive/34ea9f4993955fa1ff4eb58e504421806b7f2e8f.zip)
ExternalSource_Download(RUY DIRNAME TENSORFLOW-2.3.0-RUY ${TENSORFLOW_2_3_0_RUY_URL})
diff --git a/infra/cmake/packages/TensorFlowRuySource-2.6.0/TensorFlowRuySourceConfig.cmake b/infra/cmake/packages/TensorFlowRuySource-2.6.0/TensorFlowRuySourceConfig.cmake
new file mode 100644
index 000000000..e4dd4f2bf
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowRuySource-2.6.0/TensorFlowRuySourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_TensorFlowRuySource_import)
+ if(NOT DOWNLOAD_RUY)
+ set(TensorFlowRuySource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_RUY)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ # Exact version used by TensorFlow v2.6.0.
+ # See tensorflow/third_party/ruy/workspace.bzl
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_6_0_RUY_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/ruy/archive/e6c1b8dc8a8b00ee74e7268aac8b18d7260ab1ce.zip)
+
+ ExternalSource_Download(RUY DIRNAME TENSORFLOW-2.6.0-RUY ${TENSORFLOW_2_6_0_RUY_URL})
+
+ set(TensorFlowRuySource_DIR ${RUY_SOURCE_DIR} PARENT_SCOPE)
+ set(TensorFlowRuySource_FOUND TRUE PARENT_SCOPE)
+endfunction(_TensorFlowRuySource_import)
+
+_TensorFlowRuySource_import()
diff --git a/infra/cmake/packages/TensorFlowRuySource-2.6.0/TensorFlowRuySourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowRuySource-2.6.0/TensorFlowRuySourceConfigVersion.cmake
new file mode 100644
index 000000000..38ad0aa31
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowRuySource-2.6.0/TensorFlowRuySourceConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "2.6.0")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/TensorFlowRuySource-2.8.0/TensorFlowRuySourceConfig.cmake b/infra/cmake/packages/TensorFlowRuySource-2.8.0/TensorFlowRuySourceConfig.cmake
new file mode 100644
index 000000000..2ead7cd51
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowRuySource-2.8.0/TensorFlowRuySourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_TensorFlowRuySource_import)
+ if(NOT DOWNLOAD_RUY)
+ set(TensorFlowRuySource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_RUY)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ # Exact version used by TensorFlow v2.8.0.
+ # See tensorflow/third_party/ruy/workspace.bzl
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_8_0_RUY_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/ruy/archive/e6c1b8dc8a8b00ee74e7268aac8b18d7260ab1ce.zip)
+
+ ExternalSource_Download(RUY DIRNAME TENSORFLOW-2.8.0-RUY ${TENSORFLOW_2_8_0_RUY_URL})
+
+ set(TensorFlowRuySource_DIR ${RUY_SOURCE_DIR} PARENT_SCOPE)
+ set(TensorFlowRuySource_FOUND TRUE PARENT_SCOPE)
+endfunction(_TensorFlowRuySource_import)
+
+_TensorFlowRuySource_import()
diff --git a/infra/cmake/packages/TensorFlowRuySource-2.8.0/TensorFlowRuySourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowRuySource-2.8.0/TensorFlowRuySourceConfigVersion.cmake
new file mode 100644
index 000000000..2ad2e241e
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowRuySource-2.8.0/TensorFlowRuySourceConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "2.8.0")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/TensorFlowSource-1.14/TensorFlowSourceConfig.cmake b/infra/cmake/packages/TensorFlowSource-1.14/TensorFlowSourceConfig.cmake
index bcdf9f28c..33538c234 100644
--- a/infra/cmake/packages/TensorFlowSource-1.14/TensorFlowSourceConfig.cmake
+++ b/infra/cmake/packages/TensorFlowSource-1.14/TensorFlowSourceConfig.cmake
@@ -7,7 +7,8 @@ function(_TensorFlowSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(TENSORFLOW_1_14_URL https://github.com/tensorflow/tensorflow/archive/v1.14.0.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_1_14_URL ${EXTERNAL_DOWNLOAD_SERVER}/tensorflow/tensorflow/archive/v1.14.0.tar.gz)
ExternalSource_Download(TENSORFLOW DIRNAME TENSORFLOW-1.14 ${TENSORFLOW_1_14_URL})
diff --git a/infra/cmake/packages/TensorFlowSource-2.1.0/TensorFlowSourceConfig.cmake b/infra/cmake/packages/TensorFlowSource-2.1.0/TensorFlowSourceConfig.cmake
index 0d2a95056..aabc22f72 100644
--- a/infra/cmake/packages/TensorFlowSource-2.1.0/TensorFlowSourceConfig.cmake
+++ b/infra/cmake/packages/TensorFlowSource-2.1.0/TensorFlowSourceConfig.cmake
@@ -7,7 +7,8 @@ function(_TensorFlowSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(TENSORFLOW_2_1_0_URL https://github.com/tensorflow/tensorflow/archive/v2.1.0.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_1_0_URL ${EXTERNAL_DOWNLOAD_SERVER}/tensorflow/tensorflow/archive/v2.1.0.tar.gz)
ExternalSource_Download(TENSORFLOW DIRNAME TENSORFLOW-2.1.0 ${TENSORFLOW_2_1_0_URL})
diff --git a/infra/cmake/packages/TensorFlowSource-2.12.1/TensorFlowSourceConfig.cmake b/infra/cmake/packages/TensorFlowSource-2.12.1/TensorFlowSourceConfig.cmake
new file mode 100644
index 000000000..81fc6aecb
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowSource-2.12.1/TensorFlowSourceConfig.cmake
@@ -0,0 +1,19 @@
+function(_TensorFlowSource_import)
+ if(NOT DOWNLOAD_TENSORFLOW)
+ set(TensorFlowSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_TENSORFLOW)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_12_1_URL ${EXTERNAL_DOWNLOAD_SERVER}/tensorflow/tensorflow/archive/v2.12.1.tar.gz)
+
+ ExternalSource_Download(TENSORFLOW DIRNAME TENSORFLOW-2.12.1 ${TENSORFLOW_2_12_1_URL})
+
+ set(TensorFlowSource_DIR ${TENSORFLOW_SOURCE_DIR} PARENT_SCOPE)
+ set(TensorFlowSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_TensorFlowSource_import)
+
+_TensorFlowSource_import()
diff --git a/infra/cmake/packages/TensorFlowSource-2.12.1/TensorFlowSourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowSource-2.12.1/TensorFlowSourceConfigVersion.cmake
new file mode 100644
index 000000000..8566d0816
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowSource-2.12.1/TensorFlowSourceConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "2.12.1")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/TensorFlowSource-2.2.0/TensorFlowSourceConfig.cmake b/infra/cmake/packages/TensorFlowSource-2.2.0/TensorFlowSourceConfig.cmake
index 71220d743..7dabf88c8 100644
--- a/infra/cmake/packages/TensorFlowSource-2.2.0/TensorFlowSourceConfig.cmake
+++ b/infra/cmake/packages/TensorFlowSource-2.2.0/TensorFlowSourceConfig.cmake
@@ -7,7 +7,8 @@ function(_TensorFlowSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(TENSORFLOW_2_2_0_URL https://github.com/tensorflow/tensorflow/archive/v2.2.0.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_2_0_URL ${EXTERNAL_DOWNLOAD_SERVER}/tensorflow/tensorflow/archive/v2.2.0.tar.gz)
ExternalSource_Download(TENSORFLOW DIRNAME TENSORFLOW-2.2.0 ${TENSORFLOW_2_2_0_URL})
diff --git a/infra/cmake/packages/TensorFlowSource-2.3.0-rc0Config.cmake b/infra/cmake/packages/TensorFlowSource-2.3.0-rc0Config.cmake
index 82df579a1..967d49e87 100644
--- a/infra/cmake/packages/TensorFlowSource-2.3.0-rc0Config.cmake
+++ b/infra/cmake/packages/TensorFlowSource-2.3.0-rc0Config.cmake
@@ -10,7 +10,8 @@ function(_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(TENSORFLOW_2_3_0_RC0_URL https://github.com/tensorflow/tensorflow/archive/v2.3.0-rc0.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_3_0_RC0_URL ${EXTERNAL_DOWNLOAD_SERVER}/tensorflow/tensorflow/archive/v2.3.0-rc0.tar.gz)
ExternalSource_Download(TENSORFLOW DIRNAME TENSORFLOW-2.3.0-RC0 ${TENSORFLOW_2_3_0_RC0_URL})
diff --git a/infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfig.cmake b/infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfig.cmake
index 5c3a0f8cc..0ad0cda0b 100644
--- a/infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfig.cmake
+++ b/infra/cmake/packages/TensorFlowSource-2.3.0/TensorFlowSourceConfig.cmake
@@ -7,7 +7,8 @@ function(_TensorFlowSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(TENSORFLOW_2_3_0_URL https://github.com/tensorflow/tensorflow/archive/v2.3.0.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_3_0_URL ${EXTERNAL_DOWNLOAD_SERVER}/tensorflow/tensorflow/archive/v2.3.0.tar.gz)
ExternalSource_Download(TENSORFLOW DIRNAME TENSORFLOW-2.3.0 ${TENSORFLOW_2_3_0_URL})
diff --git a/infra/cmake/packages/TensorFlowSource-1.12/TensorFlowSourceConfig.cmake b/infra/cmake/packages/TensorFlowSource-2.6.0/TensorFlowSourceConfig.cmake
index fb9b4c789..9a7af17b1 100644
--- a/infra/cmake/packages/TensorFlowSource-1.12/TensorFlowSourceConfig.cmake
+++ b/infra/cmake/packages/TensorFlowSource-2.6.0/TensorFlowSourceConfig.cmake
@@ -7,9 +7,10 @@ function(_TensorFlowSource_import)
nnas_include(ExternalSourceTools)
nnas_include(OptionTools)
- envoption(TENSORFLOW_1_12_URL https://github.com/tensorflow/tensorflow/archive/v1.12.0.tar.gz)
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_6_0_URL ${EXTERNAL_DOWNLOAD_SERVER}/tensorflow/tensorflow/archive/v2.6.0.tar.gz)
- ExternalSource_Download(TENSORFLOW DIRNAME TENSORFLOW-1.12 ${TENSORFLOW_1_12_URL})
+ ExternalSource_Download(TENSORFLOW DIRNAME TENSORFLOW-2.6.0 ${TENSORFLOW_2_6_0_URL})
set(TensorFlowSource_DIR ${TENSORFLOW_SOURCE_DIR} PARENT_SCOPE)
set(TensorFlowSource_FOUND TRUE PARENT_SCOPE)
diff --git a/infra/cmake/packages/TensorFlowSource-2.6.0/TensorFlowSourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowSource-2.6.0/TensorFlowSourceConfigVersion.cmake
new file mode 100644
index 000000000..38ad0aa31
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowSource-2.6.0/TensorFlowSourceConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "2.6.0")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/TensorFlowSource-2.8.0/TensorFlowSourceConfig.cmake b/infra/cmake/packages/TensorFlowSource-2.8.0/TensorFlowSourceConfig.cmake
new file mode 100644
index 000000000..988a0f49f
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowSource-2.8.0/TensorFlowSourceConfig.cmake
@@ -0,0 +1,19 @@
+function(_TensorFlowSource_import)
+ if(NOT DOWNLOAD_TENSORFLOW)
+ set(TensorFlowSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT DOWNLOAD_TENSORFLOW)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(TENSORFLOW_2_8_0_URL ${EXTERNAL_DOWNLOAD_SERVER}/tensorflow/tensorflow/archive/v2.8.0.tar.gz)
+
+ ExternalSource_Download(TENSORFLOW DIRNAME TENSORFLOW-2.8.0 ${TENSORFLOW_2_8_0_URL})
+
+ set(TensorFlowSource_DIR ${TENSORFLOW_SOURCE_DIR} PARENT_SCOPE)
+ set(TensorFlowSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_TensorFlowSource_import)
+
+_TensorFlowSource_import()
diff --git a/infra/cmake/packages/TensorFlowSource-2.8.0/TensorFlowSourceConfigVersion.cmake b/infra/cmake/packages/TensorFlowSource-2.8.0/TensorFlowSourceConfigVersion.cmake
new file mode 100644
index 000000000..2ad2e241e
--- /dev/null
+++ b/infra/cmake/packages/TensorFlowSource-2.8.0/TensorFlowSourceConfigVersion.cmake
@@ -0,0 +1,10 @@
+set(PACKAGE_VERSION "2.8.0")
+set(PACKAGE_VERSION_EXACT FALSE)
+set(PACKAGE_VERSION_COMPATIBLE FALSE)
+set(PACKAGE_VERSION_UNSUITABLE TRUE)
+
+if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ set(PACKAGE_VERSION_UNSUITABLE FALSE)
+endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/cmake/packages/VulkanSourceConfig.cmake b/infra/cmake/packages/VulkanSourceConfig.cmake
new file mode 100644
index 000000000..76b69898e
--- /dev/null
+++ b/infra/cmake/packages/VulkanSourceConfig.cmake
@@ -0,0 +1,20 @@
+function(_VulkanSource_import)
+ if(NOT ${DOWNLOAD_VULKAN})
+ set(VulkanSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT ${DOWNLOAD_VULKAN})
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ envoption(VULKAN_URL ${EXTERNAL_DOWNLOAD_SERVER}/KhronosGroup/Vulkan-Headers/archive/ec2db85225ab410bc6829251bef6c578aaed5868.tar.gz)
+ ExternalSource_Download(VULKAN
+ DIRNAME VULKAN
+ URL ${VULKAN_URL})
+
+ set(VulkanSource_DIR ${VULKAN_SOURCE_DIR} PARENT_SCOPE)
+ set(VulkanSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_VulkanSource_import)
+
+_VulkanSource_import()
diff --git a/infra/cmake/packages/XnnpackSourceConfig.cmake b/infra/cmake/packages/XnnpackSourceConfig.cmake
new file mode 100644
index 000000000..36a920408
--- /dev/null
+++ b/infra/cmake/packages/XnnpackSourceConfig.cmake
@@ -0,0 +1,21 @@
+function(_XnnpackSource_import)
+ if(NOT ${DOWNLOAD_XNNPACK})
+ set(XnnpackSource_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT ${DOWNLOAD_XNNPACK})
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
+ # xnnpack commit in tflite v2.3
+ envoption(XNNPACK_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/XNNPACK/archive/8b283aa30a3186c6e640aed520543e9c067132d.tar.gz)
+ ExternalSource_Download(XNNPACK
+ DIRNAME XNNPACK
+ URL ${XNNPACK_URL})
+
+ set(XnnpackSource_DIR ${XNNPACK_SOURCE_DIR} PARENT_SCOPE)
+ set(XnnpackSource_FOUND TRUE PARENT_SCOPE)
+endfunction(_XnnpackSource_import)
+
+_XnnpackSource_import()
diff --git a/infra/command/build-docker-image b/infra/command/build-docker-image
index 7653a0c88..f4e2069c0 100644
--- a/infra/command/build-docker-image
+++ b/infra/command/build-docker-image
@@ -5,15 +5,17 @@ function Usage()
echo "Usage: $0 $(basename ${BASH_SOURCE[0]}) [OPTIONS]"
echo ""
echo "Options:"
- echo " --extension dockerfile extension in infra/docker"
+ echo " --codename ubuntu codename, default image name is nnfw/one-devtools:[codename]"
echo "Options can use as docker build option:"
docker build --help
}
-DOCKER_FILE_RPATH_BASE="infra/docker/Dockerfile"
+DOCKER_FILE_RPATH_BASE="infra/docker"
DOCKER_BUILD_ARGS=()
-DOCKER_FILE_RPATH=${DOCKER_FILE_RPATH_BASE}
-DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnas}
+
+# Default setting
+UBUNTU_CODENAME="bionic"
+DOCKER_TAG="latest"
while [[ $# -gt 0 ]]
do
@@ -25,15 +27,14 @@ do
Usage
exit 1
;;
- --extension)
- DOCKER_FILE_RPATH="${DOCKER_FILE_RPATH_BASE}.$2"
- shift
- shift
+ --codename)
+ UBUNTU_CODENAME=$2
+ DOCKER_TAG=$2
+ shift 2
;;
-t|--tag)
DOCKER_IMAGE_NAME="$2"
- shift
- shift
+ shift 2
;;
*)
DOCKER_BUILD_ARGS+=(${1})
@@ -42,6 +43,14 @@ do
esac
done
+DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnfw/one-devtools:$DOCKER_TAG}
+DOCKER_FILE_RPATH=$DOCKER_FILE_RPATH_BASE/$UBUNTU_CODENAME/Dockerfile
+
+HOST_ARCH=$(uname -m)
+if [[ -n $HOST_ARCH && $HOST_ARCH != "x86_64" ]]; then
+ DOCKER_FILE_RPATH=$DOCKER_FILE_RPATH.$HOST_ARCH
+fi
+
DOCKER_BUILD_ARGS+=("-t ${DOCKER_IMAGE_NAME}")
docker build --build-arg http_proxy="${http_proxy}" \
diff --git a/infra/command/docker-run b/infra/command/docker-run
index 08610bff2..9a186b3d2 100644
--- a/infra/command/docker-run
+++ b/infra/command/docker-run
@@ -1,10 +1,19 @@
#!/bin/bash
import "docker.configuration"
+USER_MODE=0
+
+if [[ $1 == '--user' ]]; then
+ DOCKER_RUN_OPTS+=" -u $(stat -c "%u" $NNAS_PROJECT_PATH):$(stat -c "%g" $NNAS_PROJECT_PATH)"
+ USER_MODE=1
+ shift
+fi
docker run ${DOCKER_RUN_OPTS} ${DOCKER_ENV_VARS} ${DOCKER_VOLUMES} ${DOCKER_IMAGE_NAME} "$@"
EXITCODE=$?
-docker_cleanup
+if [ $USER_MODE -eq 0 ]; then
+ docker_cleanup
+fi
exit ${EXITCODE}
diff --git a/infra/command/format b/infra/command/format
index e34c9150b..461da6f85 100644
--- a/infra/command/format
+++ b/infra/command/format
@@ -4,6 +4,7 @@ INVALID_EXIT=0
FILES_TO_CHECK=()
DIRECTORIES_TO_BE_TESTED=()
DIRECTORIES_NOT_TO_BE_TESTED=()
+DEFAULT_CLANG_FORMAT="clang-format-8"
CLANG_FORMAT_CANDIDATES=()
PATCH_FILE=format.patch
CHECK_DIFF_ONLY="0"
@@ -16,7 +17,7 @@ function Usage()
echo "If <file>s are given, it reformats the files"
echo ""
echo "Options:"
- echo " --clang-format <TOOL> clang format bin (default: clang-format-3.9, clang-format)"
+ echo " --clang-format <TOOL> clang format bin (default: $DEFAULT_CLANG_FORMAT)"
echo " --diff-only check diff files with master"
echo " --staged-only check git staged files"
}
@@ -65,39 +66,14 @@ function command_exists() {
command -v $1 > /dev/null 2>&1
}
-function exclude_symbolic_links() {
- # Check all files (CMakeLists.txt, *.cl, ... not only for C++, Python)
- if [[ ${#FILES_TO_CHECK} -ne 0 ]]; then
- FILES_EXCLUDE_SYMLINKS=$(file ${FILES_TO_CHECK} | grep -v "symbolic link" | cut -d':' -f1)
- FILES_TO_CHECK=${FILES_EXCLUDE_SYMLINKS}
- fi
-}
-
function check_newline() {
- FILES_TO_CHECK_CR=()
- for f in ${FILES_TO_CHECK[@]}; do
- # Manually ignore style checking
- if [[ ${f} == !(*.svg|*.pdf|*.png) ]]; then
- FILES_TO_CHECK_CR+=("${f}")
- fi
- done
+ # Exclude binary (refer .gitattributes file)
+ # TODO Remove svg file excluding
+ # .svg: xml type ML for vector graphic
+ FILES_TO_CHECK_EOF=`echo "$FILES_TO_CHECK" | tr ' ' '\n' | egrep -v '((\.caffemodel)|(\.png)|(\.pdf)|(\.h5)|(\.pdf)|(\.tar.gz)|(\.tflite)|(\.pdf)|(\.bmp)|(\.svg))$'`
- # Check all files (CMakeLists.txt, *.cl, ... not only for C++, Python)
- if [[ ${#FILES_TO_CHECK_CR} -ne 0 ]]; then
- CRCHECK=$(file ${FILES_TO_CHECK_CR} | grep 'with CR')
- else
- return
- fi
- FILES_TO_FIX=($(echo "$CRCHECK" | grep "with CRLF line" | cut -d':' -f1))
- for f in ${FILES_TO_FIX[@]}; do
- tr -d '\r' < $f > $f.fixed && cat $f.fixed > $f && rm $f.fixed
- done
- FILES_TO_FIX=($(echo "${CRCHECK}" | grep "with CR line" | cut -d':' -f1))
- for f in ${FILES_TO_FIX[@]}; do
- tr '\r' '\n' < $f > $f.fixed && cat $f.fixed > $f && rm $f.fixed
- done
- # Check no new line at end of file
- for f in ${FILES_TO_CHECK_CR[@]}; do
+ for f in ${FILES_TO_CHECK_EOF[@]}; do
+ # Check no new line at end of file
if diff /dev/null "$f" | tail -1 | grep '^\\ No newline' > /dev/null; then
echo >> "$f"
fi
@@ -106,22 +82,19 @@ function check_newline() {
function check_permission() {
# Check all files except script
- FILES_TO_CHECK_PERMISSION=()
- for f in ${FILES_TO_CHECK[@]}; do
- # Manually ignore permission checking
- if [[ ${f} == !(nnas|nnfw|nncc|*.sh|*.py|*/gradlew) ]] || [[ ${f} == tests/nnapi/specs/**/*.py ]]; then
- FILES_TO_CHECK_PERMISSION+=("${f}")
- fi
- done
+ # Manually ignore permission checking
+ FILES_TO_CHECK_PERMISSION=$(git ls-files -c -s --exclude-standard ${FILES_TO_CHECK[@]} | egrep '^100755' | cut -f2)
+ FILES_TO_CHECK_PERMISSION=`echo "$FILES_TO_CHECK_PERMISSION" | tr ' ' '\n' | egrep -v '((^nnas)|(^nnfw)|(^nncc)|(\.sh)|(\.py)|(/gradlew))$'`
+ FILES_TO_CHECK_PERMISSION=`echo "$FILES_TO_CHECK_PERMISSION" | egrep -v '((^infra/debian/compiler/rules)|(^infra/debian/runtime/rules))$'`
+ FILES_TO_CHECK_PERMISSION+=`echo && echo "$FILES_TO_CHECK" | egrep '^tests/nnapi/specs/.*.py$'`
+ # Transform to array
+ FILES_TO_CHECK_PERMISSION=($FILES_TO_CHECK_PERMISSION)
if [[ ${#FILES_TO_CHECK_PERMISSION} -eq 0 ]]; then
return
fi
- for FILE_TO_CHECK in ${FILES_TO_CHECK_PERMISSION[@]}; do
- RESULT=$(stat -c '%A' ${FILE_TO_CHECK} | grep 'x')
- if [ "${RESULT}" != "" ]; then
- chmod a-x ${FILE_TO_CHECK}
- fi
+ for f in ${FILES_TO_CHECK_PERMISSION[@]}; do
+ chmod a-x $f
done
}
@@ -131,9 +104,7 @@ function check_cpp_files() {
return
fi
- CLANG_FORMAT_CANDIDATES+=("clang-format-3.9")
- CLANG_FORMAT_CANDIDATES+=("clang-format")
-
+ CLANG_FORMAT_CANDIDATES+=($DEFAULT_CLANG_FORMAT)
for CLANG_FORMAT_CANDIDATE in ${CLANG_FORMAT_CANDIDATES[@]}; do
if command_exists ${CLANG_FORMAT_CANDIDATE} ; then
CLANG_FORMAT="${CLANG_FORMAT_CANDIDATE}"
@@ -142,29 +113,23 @@ function check_cpp_files() {
done
if [[ -z ${CLANG_FORMAT} ]]; then
- echo "[ERROR] clang-format is unavailable"
+ echo "[ERROR] $CLANG_FORMAT is unavailable"
echo
- echo "Please install clang-format before running format check"
+ echo " Please install $DEFAULT_CLANG_FORMAT before running format check"
exit 1
fi
- # Check c++ files
- FILES_TO_CHECK_CPP=()
- for f in ${FILES_TO_CHECK[@]}; do
- # Manually ignore style checking
- if [[ ${f} == +(*/NeuralNetworks.h|*/NeuralNetworksExtensions.h) ]]; then
- continue
- fi
-
- # File extension to check
- if [[ ${f} == +(*.h|*.hpp|*.cpp|*.cc|*.c|*.cl) ]]; then
- FILES_TO_CHECK_CPP+=("${f}")
- fi
- done
+ # Check c++ files: replace ' ' with newline, check with grep
+ FILES_TO_CHECK_CPP=`echo "$FILES_TO_CHECK" | tr ' ' '\n' | egrep '((\.c[cl]?)|(\.cpp)|(\.h(pp)?))$'`
+ # Manually ignore style checking
+ FILES_TO_CHECK_CPP=`echo "$FILES_TO_CHECK_CPP" | egrep -v '((/NeuralNetworks\.h)|(/NeuralNetworksExtensions\.h))$'`
+ # Transform to array
+ FILES_TO_CHECK_CPP=($FILES_TO_CHECK_CPP)
# Skip by '.FORMATDENY' file
for s in ${DIRECTORIES_NOT_TO_BE_TESTED[@]}; do
FILES_TO_CHECK_CPP=(${FILES_TO_CHECK_CPP[*]/$s*/})
+ FILES_TO_CHECK_CPP_BY_CLANG_FORMAT_8=(${FILES_TO_CHECK_CPP_BY_CLANG_FORMAT_8[*]/$s*/})
done
if [[ ${#FILES_TO_CHECK_CPP} -ne 0 ]]; then
@@ -189,20 +154,21 @@ function check_python_files() {
fi
# Check python files
- FILES_TO_CHECK_PYTHON=()
- for f in ${FILES_TO_CHECK[@]}; do
- # File extension to check
- if [[ ${f} == *.py ]]; then
- FILES_TO_CHECK_PYTHON+=("${f}")
- fi
- done
+ FILES_TO_CHECK_PYTHON=(`echo "$FILES_TO_CHECK" | tr ' ' '\n' | egrep '\.py$'`)
+ # Exceptional case: one-cmds don't have '.py' extension: ignore non-python source (cmake, etc) and ignore shell script: one-prepare-venv
+ FILES_TO_CHECK_PYTHON+=(`echo "$FILES_TO_CHECK" | tr ' ' '\n' | egrep '^compiler/one-cmds/[^(\./)]*$' | egrep -v '^compiler/one-cmds/one-prepare-venv$'`)
+ # Exceptional case: onecc-docker don't have '.py' extension.
+ FILES_TO_CHECK_PYTHON+=(`echo "$FILES_TO_CHECK" | tr ' ' '\n' | egrep '^compiler/onecc-docker/onecc-docker$'`)
+ # Exceptional case: visq don't have '.py' extension.
+ FILES_TO_CHECK_PYTHON+=(`echo "$FILES_TO_CHECK" | tr ' ' '\n' | egrep '^compiler/visq/visq$'`)
+
for s in ${DIRECTORIES_NOT_TO_BE_TESTED[@]}; do
skip=${s#'.'/}/
FILES_TO_CHECK_PYTHON=(${FILES_TO_CHECK_PYTHON[*]/$skip*/})
done
if [[ ${#FILES_TO_CHECK_PYTHON} -ne 0 ]]; then
- yapf -i --style='{based_on_style: pep8, column_limit: 90}' ${FILES_TO_CHECK_PYTHON[@]}
+ yapf -i ${FILES_TO_CHECK_PYTHON[@]}
EXIT_CODE=$?
if [[ ${EXIT_CODE} -ne 0 ]]; then
INVALID_EXIT=${EXIT_CODE}
@@ -220,7 +186,13 @@ fi
__Check_CPP=${CHECK_CPP:-"1"}
__Check_PYTHON=${CHECK_PYTHON:-"1"}
-FILES_TO_CHECK=$(git ls-files -c --exclude-standard ${DIRECTORIES_TO_BE_TESTED[@]})
+# Git file mode
+# 120000: symbolic link
+# 160000: git link
+# 100755: regular executable
+# 100644: regular readable
+# Reference: https://github.com/git/git/blob/cd42415/Documentation/technical/index-format.txt#L72-L81
+FILES_TO_CHECK=$(git ls-files -c -s --exclude-standard ${DIRECTORIES_TO_BE_TESTED[@]} | egrep -v '^1[26]0000' | cut -f2)
if [[ "${CHECK_DIFF_ONLY}" = "1" ]]; then
MASTER_EXIST=$(git rev-parse --verify master)
CURRENT_BRANCH=$(git branch | grep \* | cut -d ' ' -f2-)
@@ -235,6 +207,7 @@ if [[ "${CHECK_DIFF_ONLY}" = "1" ]]; then
else
FILES_TO_CHECK=$(git diff --name-only --diff-filter=d HEAD~${DIFF_COMMITS})
fi
+ FILES_TO_CHECK=$(git ls-files -c -s --exclude-standard ${FILES_TO_CHECK[@]} | egrep -v '^1[26]0000' | cut -f2)
fi
fi
@@ -242,7 +215,6 @@ for DIR_NOT_TO_BE_TESTED in $(git ls-files -co --exclude-standard '*/.FORMATDENY
DIRECTORIES_NOT_TO_BE_TESTED+=($(dirname "${DIR_NOT_TO_BE_TESTED}"))
done
-exclude_symbolic_links
check_newline
check_permission
check_cpp_files
diff --git a/infra/command/gen-coverage-report b/infra/command/gen-coverage-report
index c3a8202e7..c841dc0cb 100644
--- a/infra/command/gen-coverage-report
+++ b/infra/command/gen-coverage-report
@@ -66,13 +66,10 @@ done
"${LCOV_PATH}" -e "${RAW_COVERAGE_INFO_PATH}" -o "${EXTRACTED_COVERAGE_INFO_PATH}" \
"${CANDIDATES[@]}"
-# Exclude *.test.cpp files from coverage report
-"${LCOV_PATH}" -r "${EXTRACTED_COVERAGE_INFO_PATH}" -o "${EXCLUDED_COVERAGE_INFO_PATH}" \
- '*.test.cpp'
-
+# Exclude test files from coverage report
# Exclude flatbuffer generated files from coverage report
"${LCOV_PATH}" -r "${EXTRACTED_COVERAGE_INFO_PATH}" -o "${EXCLUDED_COVERAGE_INFO_PATH}" \
- '*_schema_generated.h'
+ '*.test.cpp' '*.test.cc' '*/test/*' '*/tests/*' '*_schema_generated.h'
# Final coverage data
cp -v ${EXCLUDED_COVERAGE_INFO_PATH} ${COVERAGE_INFO_PATH}
diff --git a/infra/config/docker.configuration b/infra/config/docker.configuration
index 08931cd28..2e001373b 100644
--- a/infra/config/docker.configuration
+++ b/infra/config/docker.configuration
@@ -3,7 +3,7 @@
# Don't run this script
[[ "${BASH_SOURCE[0]}" == "${0}" ]] && echo "Please don't execute ${BASH_SOURCE[0]}" && exit 1
-DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnas}
+DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnfw/one-devtools}
echo "Using docker image ${DOCKER_IMAGE_NAME}"
if [ -z "`docker images ${DOCKER_IMAGE_NAME}`" ]; then
diff --git a/infra/debian/compiler/changelog b/infra/debian/compiler/changelog
new file mode 100644
index 000000000..c4d358481
--- /dev/null
+++ b/infra/debian/compiler/changelog
@@ -0,0 +1,113 @@
+one (1.24.0) bionic; urgency=medium
+
+ * Introduce _one-import-onnx_ extension interface
+ * _onecc_ supports profiling of multiple backends with a single cfg file
+ * Enable more Quantize operator: FloorMod, Squeeze
+ * _visq_ supports multi-out nodes
+ * _onecc_ introduces `dynamic_batch_to_single_batch option` option.
+
+ -- seongwoo <seongwoo@sw> Thu, 18 Jul 2023 14:10:22 +0900
+
+one (1.23.0) bionic; urgency=medium
+
+ * Support more Op(s): GeLU
+ * Support more option(s): `--fuse-gelu`
+ * Support multiple backends compilation with a single configuration file
+ * Upgrade Circle schema to 0.5
+
+ -- seongwoo <seongwoo@sw> Thu, 18 May 2023 19:10:21 +0900
+
+one (1.22.0) bionic; urgency=medium
+
+ * Introduce new optimization options: `unroll_unidirseqlstm`, `forward_transpose_op`, `fold_fully_connected`, `fuse_prelu`
+ * Support more Ops for fake quantization: `Depth2Space`, `Space2Depth`, `Pack`, `Unpack`, `Abs`
+ * Support more Ops for quantization: `Abs`, `ReduceProd`
+ * Introduce _visq_ tool for quantization error visualization
+ * Introduce _Environment_ section into configuration file
+ * Improve speed of `convert_nchw_to_nhwc` option
+ * Support `Add`, `Mul` of index-type (int32, int64) tensors in _one-quantize_
+ * Support ubuntu 20.04
+
+ -- seongwoo <mhs4670go@naver.com> Fri, 24 Mar 2023 13:58:16 +0900
+
+one (1.21.0) bionic; urgency=medium
+
+ * Support unrolling of LSTM and RNN Ops in `one-import-onnx` tool
+ * Introduced new tools `one-infer`, `circle-operator`, `circle-interpreter`
+ * Introduced `Workflow`(WIP) in `one-cmds`
+ * New option `quant_config` in `one-quantize`
+ * New option `fake_quantize` in `one-quantize`
+ * More Ops supported: Densify
+ * More Ops for quantization: ReduceMax
+ * More Ops for mixed-precision quantization (MPQ): LeakyRelu, Neg, Relu6, Squeeze
+ * More Ops for `convert_nchw_to_nhwc` option: LogSoftmax, ReduceMax, SplitV, Softmax
+ * New optimization options in `one-optimize`: `replace_non_const_fc_with_bmm`, `resolve_customop_splitv`, `fold_densify`
+ * Improved reshape elimination in `convert_nchw_to_nhwc` option.
+ * Support fusion of Channel-wise Add + Relu with TConv
+ * Support negative axis in ArgMin/Max
+ * Show errors for unrecognized options in `one-optimize`
+ * Fix shape inference for `StridedSlice`
+ * Fix FuseBatchNormWithTConvPass to support TConv with bias
+ * Deprecate `--O1` option in `circle2circle`
+ * Support gcc-11
+ * Support limited Float16 for kernels constants with dequantization to Float32
+
+ -- seongwoo <mhs4670go@naver.com> Wed, 06 Sep 2022 12:00:00 +0900
+
+one (1.20.0) bionic; urgency=medium
+
+ * luci-interpreter supports multiple kernels with PAL layer including Cortext-M
+ * luci-interpreter supports integer tensor for partly kernels
+ * luci import support constant without coping to reduce memory for luci-interpreter
+ * Reduce duplicate codes to package released modules
+ * Limited support for ONNX LSTM/RNN unrolling while importing
+ * Limited support for ARM32 cross build
+ * Support new operator: SVDF
+ * New virtual CircleVariable to support tensor with variable
+ * Support quantization of BatchMatMul Op
+ * Support mixed(UINT8 + INT16) quantization
+ * Support backward propagation of quantization parameters
+ * Upgrade default python to version 3.8
+ * Support TensorFlow 2.8.0, ONNX-TF 1.10.0, ONNX 1.11.0
+ * Upgrade circle schema to follow tflite schema v3b
+ * Refactor to mio-tflite280, mio-circle04 with version and helpers methods
+ * Use one flatbuffers 2.0 version
+ * Drop support for TensorFlow 1.x
+ * Fix for several bugs, performance enhancements, and typos
+
+ -- seongwoo <mhs4670go@naver.com> Tue, 26 Apr 2022 12:00:00 +0900
+
+one (1.19.0) bionic; urgency=medium
+
+ * `circle-quantizer` supports input/output type option
+ * Introduce configuration file for optimization options
+
+ -- seongwoo <mhs4670go@naver.com> Wed, 10 Nov 2021 15:53:39 +0900
+
+one (1.18.0) bionic; urgency=medium
+
+ * More optimization pass
+
+ -- seongwoo <mhs4670go@naver.com> Fri, 15 Oct 2021 15:23:20 +0900
+
+one (1.17.0) bionic; urgency=medium
+
+ * More optimization pass
+ * Add new InstanceNorm pattern in `FuseInstanceNormPass`
+ * Add verbose option
+ * Introduce `onecc` driver to `one-cmds`
+ * Introduce `one-profile` driver to `one-cmds`
+
+ -- seongwoo <mhs4670go@naver.com> Fri, 20 Aug 2021 17:50:20 +0900
+
+one (1.16.1) bionic; urgency=medium
+
+ * Extends the point where `one-codegen` finds backends.
+
+ -- seongwoo chae <mhs4670go@naver.com> Wed, 26 May 2021 18:06:53 +0900
+
+one (1.16.0) bionic; urgency=low
+
+ * Initial release.
+
+ -- seongwoo chae <mhs4670go@naver.com> Mon, 26 Apr 2021 14:34:57 +0900
diff --git a/infra/debian/compiler/compat b/infra/debian/compiler/compat
new file mode 100644
index 000000000..ec635144f
--- /dev/null
+++ b/infra/debian/compiler/compat
@@ -0,0 +1 @@
+9
diff --git a/infra/debian/compiler/control b/infra/debian/compiler/control
new file mode 100644
index 000000000..b3a3c1bf7
--- /dev/null
+++ b/infra/debian/compiler/control
@@ -0,0 +1,25 @@
+Source: one
+Section: devel
+Priority: extra
+Maintainer: Neural Network Acceleration Solution Developers <nnfw@samsung.com>
+Build-Depends: cmake, debhelper (>=9), dh-python, python3-all, python3.8, python3.8-venv
+Standards-Version: 3.9.8
+Homepage: https://github.com/Samsung/ONE
+
+Package: one-compiler
+Architecture: amd64
+Multi-Arch: foreign
+Depends: ${misc:Depends}, ${shlibs:Depends}, python3-venv, python3-pip, python3.8, python3.8-venv
+Description: On-device Neural Engine compiler package
+
+Package: one-compiler-dev
+Architecture: amd64
+Multi-Arch: same
+Depends: one-compiler, ${shlibs:Depends}, ${misc:Depends}
+Description: one-compiler development package
+
+Package: one-compiler-test
+Architecture: amd64
+Multi-Arch: same
+Depends: one-compiler, ${shlibs:Depends}, ${misc:Depends}
+Description: one-compiler test package
diff --git a/infra/debian/compiler/copyright b/infra/debian/compiler/copyright
new file mode 100644
index 000000000..bb64695a4
--- /dev/null
+++ b/infra/debian/compiler/copyright
@@ -0,0 +1,3 @@
+Files: *
+License: Proprietary
+Copyright (c) <2018> <Samsung Electronics Co.,Ltd.>
diff --git a/infra/debian/compiler/docs/one-build.1 b/infra/debian/compiler/docs/one-build.1
new file mode 100644
index 000000000..672d39f7f
--- /dev/null
+++ b/infra/debian/compiler/docs/one-build.1
@@ -0,0 +1,96 @@
+.TH ONE-BUILD "1" "August 2021" "one-build version 1.17.0" "User Commands"
+.SH NAME
+one-build \- run ONE drivers
+.SH DESCRIPTION
+usage: one\-build [\-h] [\-v] [\-V] [\-C CONFIG]
+.PP
+\fBone\-build\fR is a command line tool that runs ONE drivers in customized order.
+.SS "Configuration file:"
+\fBone\-build\fR takes input as a configuration file that supports ini format.
+A configuration file consists of sections, each led by a [section] header.
+Each section is the ONE driver you want to run, and consists of commands in a key/value combination to pass to the driver.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.SH EXAMPLES
+Before you run \fBone\-build\fR, you must write a configuration file.
+.PP
+$ cat one-build.template.cfg
+.PP
+[one-build]
+.br
+one-import-tf=True
+.br
+one-import-tflite=False
+.br
+one-import-bcq=False
+.br
+one-import-onnx=False
+.br
+one-optimize=True
+.br
+one-quantize=False
+.br
+one-pack=True
+.br
+one-codegen=False
+.PP
+[one-import-tf]
+.br
+input_path=/path/to/inception_v3.pb
+.br
+output_path=inception_v3.circle
+.br
+input_arrays=input
+.br
+input_shapes=1,299,299,3
+.br
+output_arrays=InceptionV3/Predictions/Reshape_1
+.br
+converter_version=v1
+.br
+model_format=graph_def
+.PP
+[one-optimize]
+.br
+input_path=inception_v3.circle
+.br
+output_path=inception_v3.opt.circle
+.br
+generate_profile_data=False
+.PP
+[one-pack]
+.br
+input_path=inception_v3.opt.circle
+.br
+output_path=inception_v3_pack
+.PP
+\fBone\-build\fR section decides whether to use each driver or not.
+If the value is False, even if the corresponding section exists, the driver won't be executed.
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-build
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-build
+programs are properly installed at your site, the command
+.IP
+.B info one-build
+.PP
+should give you access to the complete manual.
+
diff --git a/infra/debian/compiler/docs/one-codegen.1 b/infra/debian/compiler/docs/one-codegen.1
new file mode 100644
index 000000000..b5296a018
--- /dev/null
+++ b/infra/debian/compiler/docs/one-codegen.1
@@ -0,0 +1,39 @@
+.TH ONE-CODEGEN "1" "August 2021" "one-codegen version 1.17.0" "User Commands"
+.SH NAME
+one-codegen \- geneate codes
+.SH DESCRIPTION
+usage: one\-codegen [\-h] [\-v] [\-C CONFIG] [\-b BACKEND] [\-\-] [COMMANDS FOR BACKEND]
+.PP
+\fBone\-codegen\fR is a command line tool for code generation.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fB\-b\fR BACKEND, \fB\-\-backend\fR BACKEND
+backend name to use
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-codegen
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-codegen
+programs are properly installed at your site, the command
+.IP
+.B info one-codegen
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-import-bcq.1 b/infra/debian/compiler/docs/one-import-bcq.1
new file mode 100644
index 000000000..b8a85cee4
--- /dev/null
+++ b/infra/debian/compiler/docs/one-import-bcq.1
@@ -0,0 +1,61 @@
+.TH ONE-IMPORT-BCQ "1" "August 2021" "one-import-bcq version 1.17.0" "User Commands"
+.SH NAME
+one-import-bcq \- convert TensorFlow with BCQ to circle
+.SH DESCRIPTION
+usage: one\-import\-bcq [\-h] [\-v] [\-V] [\-C CONFIG] [\-\-v1 | \-\-v2] [\-i INPUT_PATH]
+.br
+[\-o OUTPUT_PATH] [\-I INPUT_ARRAYS] [\-s INPUT_SHAPES]
+.br
+[\-O OUTPUT_ARRAYS]
+.PP
+\fBone\-import\-bcq\fR is a command line tool to convert TensorFlow with BCQ to circle.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fB\-\-v1\fR
+use TensorFlow Lite Converter 1.x
+.TP
+\fB\-\-v2\fR
+use TensorFlow Lite Converter 2.x
+.TP
+\fB\-i\fR INPUT_PATH, \fB\-\-input_path\fR INPUT_PATH
+full filepath of the input file
+.TP
+\fB\-o\fR OUTPUT_PATH, \fB\-\-output_path\fR OUTPUT_PATH
+full filepath of the output file
+.TP
+\fB\-I\fR INPUT_ARRAYS, \fB\-\-input_arrays\fR INPUT_ARRAYS
+names of the input arrays, comma\-separated
+.TP
+\fB\-s\fR INPUT_SHAPES, \fB\-\-input_shapes\fR INPUT_SHAPES
+shapes corresponding to \fB\-\-input_arrays\fR, colon\-separated (ex:"1,4,4,3:1,20,20,3")
+.TP
+\fB\-O\fR OUTPUT_ARRAYS, \fB\-\-output_arrays\fR OUTPUT_ARRAYS
+names of the output arrays, comma\-separated
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-import-bcq
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-import-bcq
+programs are properly installed at your site, the command
+.IP
+.B info one-import-bcq
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-import-onnx.1 b/infra/debian/compiler/docs/one-import-onnx.1
new file mode 100644
index 000000000..1953544dc
--- /dev/null
+++ b/infra/debian/compiler/docs/one-import-onnx.1
@@ -0,0 +1,63 @@
+.TH ONE-IMPORT-ONNX "1" "August 2021" "one-import-onnx version 1.17.0" "User Commands"
+.SH NAME
+one-import-onnx \- convert ONNX to circle
+.SH DESCRIPTION
+usage: one\-import\-onnx [\-h] [\-v] [\-V] [\-C CONFIG] [\-i INPUT_PATH]
+.br
+[\-o OUTPUT_PATH] [\-I INPUT_ARRAYS] [\-O OUTPUT_ARRAYS]
+.br
+[\-\-model_format MODEL_FORMAT]
+.br
+[\-\-converter_version CONVERTER_VERSION]
+.br
+[\-\-save_intermediate]
+.PP
+\fBone\-import\-onnx\fR is a command line tool to convert ONNX to circle.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fB\-\-save_intermediate\fR
+Save intermediate files to output folder
+.TP
+\fB\-i\fR INPUT_PATH, \fB\-\-input_path\fR INPUT_PATH
+full filepath of the input file
+.TP
+\fB\-o\fR OUTPUT_PATH, \fB\-\-output_path\fR OUTPUT_PATH
+full filepath of the output file
+.TP
+\fB\-I\fR INPUT_ARRAYS, \fB\-\-input_arrays\fR INPUT_ARRAYS
+names of the input arrays, comma\-separated
+.TP
+\fB\-O\fR OUTPUT_ARRAYS, \fB\-\-output_arrays\fR OUTPUT_ARRAYS
+names of the output arrays, comma\-separated
+.HP
+\fB\-\-model_format\fR MODEL_FORMAT
+.HP
+\fB\-\-converter_version\fR CONVERTER_VERSION
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-import-onnx
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-import-onnx
+programs are properly installed at your site, the command
+.IP
+.B info one-import-onnx
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-import-tf.1 b/infra/debian/compiler/docs/one-import-tf.1
new file mode 100644
index 000000000..9f05a888f
--- /dev/null
+++ b/infra/debian/compiler/docs/one-import-tf.1
@@ -0,0 +1,77 @@
+.TH ONE-IMPORT-TF "1" "August 2021" "one-import-tf version 1.17.0" "User Commands"
+.SH NAME
+one-import-tf \- convert TensorFlow to circle
+.SH DESCRIPTION
+usage: one\-import\-tf [\-h] [\-v] [\-V] [\-C CONFIG] [\-\-v1 | \-\-v2]
+.br
+[\-\-graph_def | \-\-saved_model | \-\-keras_model]
+.br
+[\-i INPUT_PATH] [\-o OUTPUT_PATH] [\-I INPUT_ARRAYS]
+.br
+[\-s INPUT_SHAPES] [\-O OUTPUT_ARRAYS]
+.br
+[\-\-save_intermediate]
+.PP
+\fBone\-import\-tf\fR is a command line tool to convert TensorFlow model to circle.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fB\-\-save_intermediate\fR
+Save intermediate files to output folder
+.TP
+\fB\-\-v1\fR
+use TensorFlow Lite Converter 1.x
+.TP
+\fB\-\-v2\fR
+use TensorFlow Lite Converter 2.x
+.TP
+\fB\-\-graph_def\fR
+use graph def file(default)
+.TP
+\fB\-\-saved_model\fR
+use saved model
+.TP
+\fB\-\-keras_model\fR
+use keras model
+.TP
+\fB\-i\fR INPUT_PATH, \fB\-\-input_path\fR INPUT_PATH
+full filepath of the input file
+.TP
+\fB\-o\fR OUTPUT_PATH, \fB\-\-output_path\fR OUTPUT_PATH
+full filepath of the output file
+.TP
+\fB\-I\fR INPUT_ARRAYS, \fB\-\-input_arrays\fR INPUT_ARRAYS
+names of the input arrays, comma\-separated
+.TP
+\fB\-s\fR INPUT_SHAPES, \fB\-\-input_shapes\fR INPUT_SHAPES
+shapes corresponding to \fB\-\-input_arrays\fR, colon\-separated (ex:"1,4,4,3:1,20,20,3")
+.TP
+\fB\-O\fR OUTPUT_ARRAYS, \fB\-\-output_arrays\fR OUTPUT_ARRAYS
+names of the output arrays, comma\-separated
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-import-tf
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-import-tf
+programs are properly installed at your site, the command
+.IP
+.B info one-import-tf
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-import-tflite.1 b/infra/debian/compiler/docs/one-import-tflite.1
new file mode 100644
index 000000000..ef63146ac
--- /dev/null
+++ b/infra/debian/compiler/docs/one-import-tflite.1
@@ -0,0 +1,44 @@
+.TH ONE-IMPORT-TFLITE "1" "August 2021" "one-import-tflite version 1.17.0" "User Commands"
+.SH NAME
+one-import-tflite \- convert TensorFlow lite to circle
+.SH DESCRIPTION
+usage: one\-import\-tflite [\-h] [\-v] [\-V] [\-C CONFIG] [\-i INPUT_PATH]
+.br
+[\-o OUTPUT_PATH]
+.PP
+\fBone\-import\-tflite\fR is a command line tool to convert TensorFlow lite to circle.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fB\-i\fR INPUT_PATH, \fB\-\-input_path\fR INPUT_PATH
+full filepath of the input file
+.TP
+\fB\-o\fR OUTPUT_PATH, \fB\-\-output_path\fR OUTPUT_PATH
+full filepath of the output file
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-import-tflite
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-import-tflite
+programs are properly installed at your site, the command
+.IP
+.B info one-import-tflite
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-import.1 b/infra/debian/compiler/docs/one-import.1
new file mode 100644
index 000000000..674e9ada3
--- /dev/null
+++ b/infra/debian/compiler/docs/one-import.1
@@ -0,0 +1,35 @@
+.TH ONE-IMPORT "1" "August 2021" "one-import version 1.17.0" "User Commands"
+.SH NAME
+one-import \- convert various format to circle
+.SH SYNOPSIS
+usage: one\-import [\-h] [\-C CONFIG] [\-v] driver
+.SH DESCRIPTION
+\fBone\-import\fR is a command line tool to convert various format to circle.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fBdriver\fR driver name to run (supported: tf, tflite, bcq, onnx)
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-import
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-import
+programs are properly installed at your site, the command
+.IP
+.B info one-import
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-infer.1 b/infra/debian/compiler/docs/one-infer.1
new file mode 100644
index 000000000..a1bafbb12
--- /dev/null
+++ b/infra/debian/compiler/docs/one-infer.1
@@ -0,0 +1,46 @@
+.TH ONE-INFER "1" "July 2022" "one-infer version 1.21.0" "User Commands"
+.SH NAME
+one-infer \- manual page for one-infer version 1.21.0
+.SH DESCRIPTION
+usage: one\-infer [\-h] [\-v] [\-C CONFIG] [\-d DRIVER | \fB\-b\fR BACKEND] [\-\-post\-process POST_PROCESS] [\-\-] [COMMANDS FOR BACKEND DRIVER]
+.PP
+command line tool to infer model
+.SS "optional arguments:"
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fB\-d\fR DRIVER, \fB\-\-driver\fR DRIVER
+backend inference driver name to execute
+.TP
+\fB\-b\fR BACKEND, \fB\-\-backend\fR BACKEND
+backend name to use
+.TP
+\fB\-\-post\-process\fR POST_PROCESS
+post processing script to convert I/O data to standard
+format
+.SH COPYRIGHT
+Copyright \(co 2020\-2022 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-infer
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-infer
+programs are properly installed at your site, the command
+.IP
+.B info one-infer
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-optimize.1 b/infra/debian/compiler/docs/one-optimize.1
new file mode 100644
index 000000000..58b2c60bd
--- /dev/null
+++ b/infra/debian/compiler/docs/one-optimize.1
@@ -0,0 +1,222 @@
+.TH ONE-OPTIMIZE "1" "August 2021" "one-optimize version 1.17.0" "User Commands"
+.SH NAME
+one-optimize \- optimize circle model
+.SH DESCRIPTION
+usage: one\-optimize [\-h] [\-v] [\-V] [\-C CONFIG] [\-p]
+.br
+[\-\-change_outputs CHANGE_OUTPUTS] [\-i INPUT_PATH]
+.br
+[\-o OUTPUT_PATH] [\-\-O1] [\-\-convert_nchw_to_nhwc]
+.br
+[\-\-nchw_to_nhwc_input_shape] [\-\-nchw_to_nhwc_output_shape]
+.br
+[\-\-fold_add_v2] [\-\-fold_cast] [\-\-fold_dequantize]
+.br
+[\-\-fold_sparse_to_dense] [\-\-forward_reshape_to_unaryop]
+.br
+[\-\-fuse_add_with_tconv] [\-\-fuse_batchnorm_with_conv]
+.br
+[\-\-fuse_batchnorm_with_dwconv]
+.br
+[\-\-fuse_batchnorm_with_tconv] [\-\-fuse_bcq]
+.br
+[\-\-fuse_preactivation_batchnorm]
+.br
+[\-\-make_batchnorm_gamma_positive]
+.br
+[\-\-fuse_activation_function] [\-\-fuse_instnorm]
+.br
+[\-\-replace_cw_mul_add_with_depthwise_conv]
+.br
+[\-\-remove_fakequant] [\-\-remove_quantdequant]
+.br
+[\-\-remove_redundant_reshape]
+.br
+[\-\-remove_redundant_transpose]
+.br
+[\-\-remove_unnecessary_reshape]
+.br
+[\-\-remove_unnecessary_slice]
+.br
+[\-\-remove_unnecessary_strided_slice]
+.br
+[\-\-remove_unnecessary_split] [\-\-resolve_customop_add]
+.br
+[\-\-resolve_customop_batchmatmul]
+.br
+[\-\-resolve_customop_matmul]
+.br
+[\-\-shuffle_weight_to_16x1float32]
+.br
+[\-\-substitute_pack_to_reshape]
+.br
+[\-\-substitute_squeeze_to_reshape]
+.br
+[\-\-substitute_transpose_to_reshape]
+.br
+[\-\-transform_min_max_to_relu6]
+.br
+[\-\-transform_min_relu_to_relu6]
+.PP
+\fBone\-optimize\fR is a command line tool to optimize circle model.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.SS "arguments for utility:"
+.TP
+\fB\-p\fR, \fB\-\-generate_profile_data\fR
+generate profiling data
+.TP
+\fB\-\-change_outputs\fR CHANGE_OUTPUTS
+Experimental: Change first subgraph output nodes to
+CSV names
+.SS "arguments for optimization:"
+.TP
+\fB\-i\fR INPUT_PATH, \fB\-\-input_path\fR INPUT_PATH
+full filepath of the input file
+.TP
+\fB\-o\fR OUTPUT_PATH, \fB\-\-output_path\fR OUTPUT_PATH
+full filepath of the output file
+.TP
+\fB\-\-O1\fR
+enable O1 optimization pass
+.TP
+\fB\-\-convert_nchw_to_nhwc\fR
+Experimental: This will convert NCHW operators to NHWC
+under the assumption that input model is NCHW.
+.TP
+\fB\-\-nchw_to_nhwc_input_shape\fR
+convert the input shape of the model (argument for
+convert_nchw_to_nhwc)
+.TP
+\fB\-\-nchw_to_nhwc_output_shape\fR
+convert the output shape of the model (argument for
+convert_nchw_to_nhwc)
+.TP
+\fB\-\-fold_add_v2\fR
+fold AddV2 op with constant inputs
+.TP
+\fB\-\-fold_cast\fR
+fold Cast op with constant input
+.TP
+\fB\-\-fold_dequantize\fR
+fold Dequantize op
+.TP
+\fB\-\-fold_sparse_to_dense\fR
+fold SparseToDense op
+.TP
+\fB\-\-forward_reshape_to_unaryop\fR
+Forward Reshape op
+.TP
+\fB\-\-fuse_add_with_tconv\fR
+fuse Add op to Transposed Convolution op
+.TP
+\fB\-\-fuse_batchnorm_with_conv\fR
+fuse BatchNorm op to Convolution op
+.TP
+\fB\-\-fuse_batchnorm_with_dwconv\fR
+fuse BatchNorm op to Depthwise Convolution op
+.TP
+\fB\-\-fuse_batchnorm_with_tconv\fR
+fuse BatchNorm op to Transposed Convolution op
+.TP
+\fB\-\-fuse_bcq\fR
+apply Binary Coded Quantization
+.TP
+\fB\-\-fuse_preactivation_batchnorm\fR
+fuse BatchNorm operators of pre\-activations to
+Convolution op
+.TP
+\fB\-\-make_batchnorm_gamma_positive\fR
+make negative gamma of BatchNorm to a small positive
+value (1e\-10). Note that this pass can change the
+execution result of the model. So, use it only when
+the impact is known to be acceptable.
+.TP
+\fB\-\-fuse_activation_function\fR
+fuse Activation function to a preceding operator
+.TP
+\fB\-\-fuse_instnorm\fR
+fuse ops to InstanceNorm operator
+.TP
+\fB\-\-replace_cw_mul_add_with_depthwise_conv\fR
+replace channel\-wise Mul/Add with DepthwiseConv2D
+.TP
+\fB\-\-remove_fakequant\fR
+remove FakeQuant ops
+.TP
+\fB\-\-remove_quantdequant\fR
+remove Quantize\-Dequantize sequence
+.TP
+\fB\-\-remove_redundant_reshape\fR
+fuse or remove subsequent Reshape ops
+.TP
+\fB\-\-remove_redundant_transpose\fR
+fuse or remove subsequent Transpose ops
+.TP
+\fB\-\-remove_unnecessary_reshape\fR
+remove unnecessary reshape ops
+.TP
+\fB\-\-remove_unnecessary_slice\fR
+remove unnecessary slice ops
+.TP
+\fB\-\-remove_unnecessary_strided_slice\fR
+remove unnecessary strided slice ops
+.TP
+\fB\-\-remove_unnecessary_split\fR
+remove unnecessary split ops
+.TP
+\fB\-\-resolve_customop_add\fR
+convert Custom(Add) op to Add op
+.TP
+\fB\-\-resolve_customop_batchmatmul\fR
+convert Custom(BatchMatmul) op to BatchMatmul op
+.TP
+\fB\-\-resolve_customop_matmul\fR
+convert Custom(Matmul) op to Matmul op
+.TP
+\fB\-\-shuffle_weight_to_16x1float32\fR
+convert weight format of FullyConnected op to
+SHUFFLED16x1FLOAT32. Note that it only converts
+weights whose row is a multiple of 16
+.TP
+\fB\-\-substitute_pack_to_reshape\fR
+convert single input Pack op to Reshape op
+.TP
+\fB\-\-substitute_squeeze_to_reshape\fR
+convert certain condition Squeeze to Reshape
+.TP
+\fB\-\-substitute_transpose_to_reshape\fR
+convert certain condition Transpose to Reshape
+.TP
+\fB\-\-transform_min_max_to_relu6\fR
+transform Minimum\-Maximum pattern to Relu6 op
+.TP
+\fB\-\-transform_min_relu_to_relu6\fR
+transform Minimum(6)\-Relu pattern to Relu6 op
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-optimize
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-optimize
+programs are properly installed at your site, the command
+.IP
+.B info one-optimize
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-pack.1 b/infra/debian/compiler/docs/one-pack.1
new file mode 100644
index 000000000..dd0422146
--- /dev/null
+++ b/infra/debian/compiler/docs/one-pack.1
@@ -0,0 +1,42 @@
+.TH ONE-PACK "1" "August 2021" "one-pack version 1.17.0" "User Commands"
+.SH NAME
+one-pack \- package circle and metadata into nnpackage
+.SH DESCRIPTION
+usage: one\-pack [\-h] [\-v] [\-V] [\-C CONFIG] [\-i INPUT_PATH] [\-o OUTPUT_PATH]
+.PP
+\fBone\-pack\fR is a command line tool to package circle and metadata into nnpackage.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fB\-i\fR INPUT_PATH, \fB\-\-input_path\fR INPUT_PATH
+full filepath of the input file
+.TP
+\fB\-o\fR OUTPUT_PATH, \fB\-\-output_path\fR OUTPUT_PATH
+full filepath of the output file
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-pack
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-pack
+programs are properly installed at your site, the command
+.IP
+.B info one-pack
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-partition.1 b/infra/debian/compiler/docs/one-partition.1
new file mode 100644
index 000000000..5b6fe933d
--- /dev/null
+++ b/infra/debian/compiler/docs/one-partition.1
@@ -0,0 +1,56 @@
+.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.6.
+.TH ONE-PARTITION "1" "June 2022" "one-partition version 1.21.0" "User Commands"
+.SH NAME
+one-partition \- manual page for one-partition version 1.21.0
+.SH DESCRIPTION
+usage: one\-partition [\-h] [\-v] [\-V] [\-C CONFIG] [\-\-backends BACKENDS]
+.TP
+[\-\-default DEFAULT] [\-\-part_file PART_FILE]
+[\-\-input_file INPUT_FILE] [\-\-work_path WORK_PATH]
+.PP
+command line tool to partition circle model by multiple backends
+.SS "optional arguments:"
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fB\-\-backends\fR BACKENDS
+backends in CSV to use for partitioning
+.TP
+\fB\-\-default\fR DEFAULT
+default backend to assign
+.TP
+\fB\-\-part_file\fR PART_FILE
+partition file which provides backend to assign
+.TP
+\fB\-\-input_file\fR INPUT_FILE
+input circle model filename
+.TP
+\fB\-\-work_path\fR WORK_PATH
+work path of partition, input files exist and output
+files are produced
+.SH COPYRIGHT
+Copyright \(co 2020\-2022 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-partition
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-partition
+programs are properly installed at your site, the command
+.IP
+.B info one-partition
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-profile.1 b/infra/debian/compiler/docs/one-profile.1
new file mode 100644
index 000000000..3952c4484
--- /dev/null
+++ b/infra/debian/compiler/docs/one-profile.1
@@ -0,0 +1,39 @@
+.TH ONE-PROFILE "1" "August 2021" "one-profile version 1.17.0" "User Commands"
+.SH NAME
+one-profile \- profile backend model file
+.SH DESCRIPTION
+usage: one\-profile [\-h] [\-v] [\-V] [\-C CONFIG] [\-b BACKEND] [\-\-] [COMMANDS FOR BACKEND]
+.PP
+\fBone\-profile\fR is a command line tool for profiling backend model.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fB\-b\fR BACKEND, \fB\-\-backend\fR BACKEND
+backend name to use
+.SH COPYRIGHT
+Copyright \(co 2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-profile
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-profile
+programs are properly installed at your site, the command
+.IP
+.B info one-profile
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/one-quantize.1 b/infra/debian/compiler/docs/one-quantize.1
new file mode 100644
index 000000000..43c4c0321
--- /dev/null
+++ b/infra/debian/compiler/docs/one-quantize.1
@@ -0,0 +1,83 @@
+.TH ONE-QUANTIZE "1" "August 2021" "one-quantize version 1.17.0" "User Commands"
+.SH NAME
+one-quantize \- quantize circle model
+.SH DESCRIPTION
+usage: one\-quantize [\-h] [\-v] [\-V] [\-C CONFIG] [\-i INPUT_PATH] [\-d INPUT_DATA]
+.br
+[\-f INPUT_DATA_FORMAT] [\-o OUTPUT_PATH] [\-p]
+.br
+[\-\-input_dtype INPUT_DTYPE]
+.br
+[\-\-quantized_dtype QUANTIZED_DTYPE]
+.br
+[\-\-granularity GRANULARITY]
+.br
+[\-\-min_percentile MIN_PERCENTILE]
+.br
+[\-\-max_percentile MAX_PERCENTILE] [\-\-mode MODE]
+.PP
+\fBone\-quantize\fR is a command line tool to quantize circle model.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.TP
+\fB\-i\fR INPUT_PATH, \fB\-\-input_path\fR INPUT_PATH
+full filepath of the input file
+.TP
+\fB\-d\fR INPUT_DATA, \fB\-\-input_data\fR INPUT_DATA
+full filepath of the input data file. if not
+specified, run with random input data.
+.TP
+\fB\-o\fR OUTPUT_PATH, \fB\-\-output_path\fR OUTPUT_PATH
+full filepath of the output file
+.TP
+\fB\-p\fR, \fB\-\-generate_profile_data\fR
+generate profiling data
+.SS "arguments for quantization:"
+.TP
+\fB\-\-input_dtype\fR INPUT_DTYPE
+input data type (supported: float32, default=float32)
+.TP
+\fB\-\-quantized_dtype\fR QUANTIZED_DTYPE
+output quantized data type (supported: uint8, int16,
+default=uint8)
+.TP
+\fB\-\-granularity\fR GRANULARITY
+quantize granularity (supported: layer, channel,
+default=layer)
+.TP
+\fB\-\-min_percentile\fR MIN_PERCENTILE
+minimum percentile (0.0~100.0, default=1.0)
+.TP
+\fB\-\-max_percentile\fR MAX_PERCENTILE
+maximum percentile (0.0~100.0, default=99.0)
+.TP
+\fB\-\-mode\fR MODE
+record mode (supported: percentile/moving_average,
+default=percentile)
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B one-quantize
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B one-quantize
+programs are properly installed at your site, the command
+.IP
+.B info one-quantize
+.PP
+should give you access to the complete manual.
diff --git a/infra/debian/compiler/docs/onecc.1 b/infra/debian/compiler/docs/onecc.1
new file mode 100644
index 000000000..352b30a00
--- /dev/null
+++ b/infra/debian/compiler/docs/onecc.1
@@ -0,0 +1,170 @@
+.\" Manpage for onecc.
+.\" Contact nnfw@samsung.com to correct errors or typos.
+.TH ONECC "1" "August 2021" "onecc version 1.17.0" "User Commands"
+.SH NAME
+onecc \- run ONE driver via several commands or configuration file
+.SH SYNOPSIS
+\fBonecc\fR [\-h] [\-v] [\-C CONFIG] [COMMAND <args>]
+.SH DESCRIPTION
+\fBonecc\fR is a command line tool to execute ONE driver via several commands or configuration file.
+.SS "Configuration file:"
+\fBonecc\fR takes input as a configuration file that supports ini format.
+A configuration file consists of sections, each led by a [section] header.
+Each section is the ONE driver you want to run, and consists of commands in a key/value combination to pass to the driver.
+.SH OPTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+show this help message and exit
+.TP
+\fB\-v\fR, \fB\-\-version\fR
+show program's version number and exit
+.TP
+\fB\-V\fR, \fB\-\-verbose\fR
+output additional information to stdout or stderr
+.TP
+\fB\-C\fR CONFIG, \fB\-\-config\fR CONFIG
+run with configuation file
+.SS compile to circle model
+.TP
+\fBimport\fR
+Convert given model to circle. See one\-import(1) for details.
+.TP
+\fBoptimize\fR
+Optimize circle model. See one-optimize(1) for details.
+.TP
+\fBquantize\fR
+Quantize circle model. See one-quantize(1) for details.
+.SS package circle model
+.TP
+\fBpack\fR
+Package circle and metadata into nnpackage. See one-pack(1) for details.
+.SS run backend tools
+.TP
+\fBcodegen\fR
+Code generation tool. See one-codegen(1) for details.
+.TP
+\fBprofile\fR
+Profile backend model file. See one-profile(1) for details.
+.SH EXAMPLES
+.SS Use command line interface
+.TP
+\fBonecc import tf --v1 -i\fR \fIinput_path\fR \fB-o\fR \fIoutput_path\fR \fB-I\fR \fIinput_arrays\fR \fB-s\fR \fIinput_shapes\fR \fB-O\fR \fIoutput_arrays\fR
+import tf model
+.TP
+\fBonecc import tflite -i\fR \fIinput_path\fR \fB-o\fR \fIoutput_path\fR
+import tflite model
+.TP
+\fBonecc import onnx -i\fR \fIinput_path\fR \fB-o\fR \fIoutput_path\fR
+import onnx model
+.TP
+\fBonecc optimize -i\fR \fIinput_path\fR \fB-o\fR \fIoutput_path\fR \fIoptimize_arguments\fR
+optimize circle model
+.TP
+\fBonecc quantize -i\fR \fIinput_path\fR \fB-o\fR \fIoutput_path\fR \fB-d\fR \fIinput_data\fR
+quantize circle model
+.TP
+\fBonecc pack -i\fR \fIinput_path\fR \fB-o\fR \fIoutput_path\fR
+package circle and metadata into nnpackage
+.TP
+\fBonecc codegen -b\fR \fIbackend\fR \fB--\fR \fIbackends_arguments\fR
+generate backend code
+.TP
+\fBonecc profile -b\fR \fIbackend\fR \fB--\fR \fIbackends_arguments\fR
+profile backend model
+.PP
+.SS Use configuration file
+.PP
+The configuration file should be written in the following format:
+.IP
+[onecc]
+.br
+one-import-tf=True
+.br
+one-import-tflite=False
+.br
+one-import-bcq=False
+.br
+one-import-onnx=False
+.br
+one-optimize=True
+.br
+one-quantize=True
+.br
+one-pack=True
+.br
+one-codegen=True
+.br
+one-profile=True
+.IP
+[one-import-tf]
+.br
+input_path=/path/to/inception_v3.pb
+.br
+output_path=inception_v3.circle
+.br
+input_arrays=input
+.br
+input_shapes=1,299,299,3
+.br
+output_arrays=InceptionV3/Predictions/Reshape_1
+.br
+converter_version=v1
+.br
+model_format=graph_def
+.IP
+[one-optimize]
+.br
+input_path=inception_v3.circle
+.br
+output_path=inception_v3.opt.circle
+.br
+generate_profile_data=False
+.IP
+[one-quantize]
+.br
+input_path=inception_v3.opt.circle
+.br
+output_path=inception_v3.quantized.circle
+.br
+input_data=inception_v3_test_data.h5
+.IP
+[one-pack]
+.br
+input_path=inception_v3.quantized.circle
+.br
+output_path=inception_v3_pack
+.IP
+[one-codegen]
+.br
+backend=dummy
+.br
+command=-o sample.out inception_v3.quantized.circle
+.IP
+[one-profile]
+.br
+backend=dummy
+.br
+command=sample.out
+.TP
+\fBonecc -C\fR \fIconfiguration file\fR
+Run ONE driver according to configuration section parameter
+.PP
+\fBonecc\fR section decides whether to use each driver or not.
+If the value is False, even if the corresponding section exists, the driver won't be executed.
+.SH COPYRIGHT
+Copyright \(co 2020\-2021 Samsung Electronics Co., Ltd. All Rights Reserved
+Licensed under the Apache License, Version 2.0
+https://github.com/Samsung/ONE
+.SH "SEE ALSO"
+The full documentation for
+.B onecc
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B onecc
+programs are properly installed at your site, the command
+.IP
+.B info onecc
+.PP
+should give you access to the complete manual.
+
diff --git a/infra/debian/compiler/one-compiler-dev.install b/infra/debian/compiler/one-compiler-dev.install
new file mode 100644
index 000000000..47f53ad20
--- /dev/null
+++ b/infra/debian/compiler/one-compiler-dev.install
@@ -0,0 +1,10 @@
+# {FILES_TO_INSTALL} {DEST_DIR}
+# bin
+usr/bin/circledump usr/share/one/bin/
+usr/bin/circle-opselector usr/share/one/bin/
+usr/bin/circle-tensordump usr/share/one/bin/
+usr/bin/tflchef usr/share/one/bin/
+usr/bin/tflchef-file usr/share/one/bin/
+usr/bin/tflchef-reverse usr/share/one/bin/
+# include
+usr/include/* usr/share/one/include/
diff --git a/infra/debian/compiler/one-compiler-dev.links b/infra/debian/compiler/one-compiler-dev.links
new file mode 100644
index 000000000..89a654db9
--- /dev/null
+++ b/infra/debian/compiler/one-compiler-dev.links
@@ -0,0 +1,6 @@
+# bin
+usr/share/one/bin/circledump usr/bin/circledump
+usr/share/one/bin/circle-tensordump usr/bin/circle-tensordump
+usr/share/one/bin/tflchef usr/bin/tflchef
+usr/share/one/bin/tflchef-file usr/bin/tflchef-file
+usr/share/one/bin/tflchef-reverse usr/bin/tflchef-reverse
diff --git a/infra/debian/compiler/one-compiler-test.install b/infra/debian/compiler/one-compiler-test.install
new file mode 100644
index 000000000..fb9714da0
--- /dev/null
+++ b/infra/debian/compiler/one-compiler-test.install
@@ -0,0 +1,5 @@
+# {FILES_TO_INSTALL} {DEST_DIR}
+# bin
+usr/bin/luci_eval_driver usr/share/one/bin/
+# test
+usr/test/* usr/share/one/test/
diff --git a/infra/debian/compiler/one-compiler.install b/infra/debian/compiler/one-compiler.install
new file mode 100644
index 000000000..700cc2d0a
--- /dev/null
+++ b/infra/debian/compiler/one-compiler.install
@@ -0,0 +1,61 @@
+# {FILES_TO_INSTALL} {DEST_DIR}
+# bin
+usr/bin/circle2circle usr/share/one/bin/
+usr/bin/circle-eval-diff usr/share/one/bin/
+usr/bin/circle-interpreter usr/share/one/bin/
+usr/bin/circle-mpqsolver usr/share/one/bin/
+usr/bin/circle-operator usr/share/one/bin/
+usr/bin/circle-partitioner usr/share/one/bin/
+usr/bin/circle-quantizer usr/share/one/bin/
+usr/bin/dalgona usr/share/one/bin/
+usr/bin/generate_bcq_metadata.py usr/share/one/bin/
+usr/bin/generate_bcq_output_arrays.py usr/share/one/bin/
+usr/bin/model2nnpkg usr/share/one/bin/
+usr/bin/onecc usr/share/one/bin/
+usr/bin/onecc.template.cfg usr/share/one/bin/
+usr/bin/one-build usr/share/one/bin/
+usr/bin/one-build.template.cfg usr/share/one/bin/
+usr/bin/one-codegen usr/share/one/bin/
+usr/bin/one-import usr/share/one/bin/
+usr/bin/one-import-bcq usr/share/one/bin/
+usr/bin/one-import-onnx usr/share/one/bin/
+usr/bin/one-import-tf usr/share/one/bin/
+usr/bin/one-import-tflite usr/share/one/bin/
+usr/bin/one-infer usr/share/one/bin/
+usr/bin/one-optimize usr/share/one/bin/
+usr/bin/one-pack usr/share/one/bin/
+usr/bin/one-partition usr/share/one/bin/
+usr/bin/one-prepare-venv usr/share/one/bin/
+usr/bin/one-profile usr/share/one/bin/
+usr/bin/one-quantize usr/share/one/bin/
+usr/bin/one-version usr/share/one/bin/
+usr/bin/onelib/backends.py usr/share/one/bin/onelib/
+usr/bin/onelib/constant.py usr/share/one/bin/onelib/
+usr/bin/onelib/make_cmd.py usr/share/one/bin/onelib/
+usr/bin/onelib/CfgRunner.py usr/share/one/bin/onelib/
+usr/bin/onelib/OptionBuilder.py usr/share/one/bin/onelib/
+usr/bin/onelib/TopologicalSortHelper.py usr/share/one/bin/onelib/
+usr/bin/onelib/WorkflowRunner.py usr/share/one/bin/onelib/
+usr/bin/onelib/Command.py usr/share/one/bin/onelib/
+usr/bin/onelib/utils.py usr/share/one/bin/onelib/
+usr/bin/onelib/export_constant.py usr/share/one/bin/onelib/
+usr/bin/onnx_legalizer.py usr/share/one/bin/
+usr/bin/rawdata2hdf5 usr/share/one/bin/
+usr/bin/record-minmax usr/share/one/bin/
+usr/bin/tf2nnpkg usr/share/one/bin/
+usr/bin/tf2tfliteV2.py usr/share/one/bin/
+usr/bin/tflite2circle usr/share/one/bin/
+usr/bin/visq usr/share/one/bin/
+usr/bin/visqlib/DumpFakeQuantFM.py usr/share/one/bin/visqlib/
+usr/bin/visqlib/DumpFP32FM.py usr/share/one/bin/visqlib/
+usr/bin/visqlib/Palette.py usr/share/one/bin/visqlib/
+usr/bin/visqlib/QErrorComputer.py usr/share/one/bin/visqlib/
+usr/bin/visqlib/Util.py usr/share/one/bin/visqlib/
+usr/bin/visqlib/DotBuilder.py usr/share/one/bin/visqlib/
+usr/bin/circle/*.py usr/share/one/bin/circle/
+# lib
+usr/lib/* usr/share/one/lib/
+# doc
+usr/doc/* usr/share/one/doc/
+# optimization
+usr/optimization/* usr/share/one/optimization/
diff --git a/infra/debian/compiler/one-compiler.links b/infra/debian/compiler/one-compiler.links
new file mode 100644
index 000000000..9e464352a
--- /dev/null
+++ b/infra/debian/compiler/one-compiler.links
@@ -0,0 +1,17 @@
+# bin
+usr/share/one/bin/one-build usr/bin/one-build
+usr/share/one/bin/onecc usr/bin/onecc
+# lib
+usr/share/one/lib/libloco.so usr/lib/libloco.so
+usr/share/one/lib/libluci_env.so usr/lib/libluci_env.so
+usr/share/one/lib/libluci_export.so usr/lib/libluci_export.so
+usr/share/one/lib/libluci_import.so usr/lib/libluci_import.so
+usr/share/one/lib/libluci_interpreter.so usr/lib/libluci_interpreter.so
+usr/share/one/lib/libluci_lang.so usr/lib/libluci_lang.so
+usr/share/one/lib/libluci_logex.so usr/lib/libluci_logex.so
+usr/share/one/lib/libluci_log.so usr/lib/libluci_log.so
+usr/share/one/lib/libluci_partition.so usr/lib/libluci_partition.so
+usr/share/one/lib/libluci_pass.so usr/lib/libluci_pass.so
+usr/share/one/lib/libluci_profile.so usr/lib/libluci_profile.so
+usr/share/one/lib/libluci_plan.so usr/lib/libluci_plan.so
+usr/share/one/lib/libluci_service.so usr/lib/libluci_service.so
diff --git a/infra/debian/compiler/one-compiler.manpages b/infra/debian/compiler/one-compiler.manpages
new file mode 100644
index 000000000..e0284ae4e
--- /dev/null
+++ b/infra/debian/compiler/one-compiler.manpages
@@ -0,0 +1,14 @@
+debian/docs/one-build.1
+debian/docs/one-codegen.1
+debian/docs/one-infer.1
+debian/docs/one-import.1
+debian/docs/one-import-bcq.1
+debian/docs/one-import-onnx.1
+debian/docs/one-import-tf.1
+debian/docs/one-import-tflite.1
+debian/docs/one-optimize.1
+debian/docs/one-pack.1
+debian/docs/one-partition.1
+debian/docs/one-profile.1
+debian/docs/one-quantize.1
+debian/docs/onecc.1
diff --git a/infra/debian/compiler/postinst b/infra/debian/compiler/postinst
new file mode 100644
index 000000000..d84e8e042
--- /dev/null
+++ b/infra/debian/compiler/postinst
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+# https://www.debian.org/doc/debian-policy/ch-maintainerscripts.html
+# Boradly speaking, the `postinst` is called after a package is unpacked.
+
+set -e
+
+# This script is invoked as root except environmental variables,
+# which causes invalid permission problem.
+# e.g. When `pip` installs user packages, it proceeds based on $HOME.
+# To proper installation, $HOME should be root.
+su - $(whoami) -p -c '/usr/share/one/bin/one-prepare-venv' # $(whoami) = root
diff --git a/infra/debian/compiler/postrm b/infra/debian/compiler/postrm
new file mode 100644
index 000000000..2972f28db
--- /dev/null
+++ b/infra/debian/compiler/postrm
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+set -e
+
+case "$1" in
+ remove|purge)
+ rm -rf /usr/share/one/
+ ;;
+ upgrade)
+ # DO NOTHING
+ ;;
+ failed-upgrade|abort-install|abort-upgrade)
+ # DO NOTHING
+ ;;
+ *)
+ # DO NOTHING
+ ;;
+esac
diff --git a/infra/debian/compiler/rules b/infra/debian/compiler/rules
new file mode 100755
index 000000000..e83680da8
--- /dev/null
+++ b/infra/debian/compiler/rules
@@ -0,0 +1,19 @@
+#!/usr/bin/make -f
+export DH_VERBOSE = 1
+export NNAS_BUILD_PREFIX = build
+export PRESET = 20230413
+export _DESTDIR = debian/tmp/usr
+
+%:
+ dh $@
+
+override_dh_auto_build:
+ ./nnas create-package --preset $(PRESET) --prefix "$(_DESTDIR)"
+
+override_dh_auto_install:
+ cmake --build "$(NNAS_BUILD_PREFIX)/nncc" -- install
+
+override_dh_install:
+ install -T -m 755 -D "infra/packaging/res/tf2nnpkg.${PRESET}" "$(_DESTDIR)/bin/tf2nnpkg"
+ dh_install
+
diff --git a/infra/debian/compiler/source/format b/infra/debian/compiler/source/format
new file mode 100644
index 000000000..89ae9db8f
--- /dev/null
+++ b/infra/debian/compiler/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/infra/debian/compiler/source/local-options b/infra/debian/compiler/source/local-options
new file mode 100644
index 000000000..296a73032
--- /dev/null
+++ b/infra/debian/compiler/source/local-options
@@ -0,0 +1,2 @@
+# This is for reproducible building. Otherwise, `debuild` recognizes build artifacts as source files.
+diff-ignore="build|externals"
diff --git a/infra/debian/runtime/changelog b/infra/debian/runtime/changelog
new file mode 100644
index 000000000..e07c50c21
--- /dev/null
+++ b/infra/debian/runtime/changelog
@@ -0,0 +1,38 @@
+one (1.21.0) bionic; urgency=low
+
+ * Runtime supports to run nnpackage with two models
+ * Conv2D and Depthwise Conv2D supports per-channel quantization of uint8 type.
+ * TRIX backend supports batch execution which run in parallel with multicore
+
+ -- Chunseok Lee <chunseok.lee@samsung.com> Tue, 06 Sep 2022 12:00:00 +0900
+
+one (1.20.0) bionic; urgency=low
+
+ * Introduce TRIX backend
+ * API supports new data type NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED
+
+ -- Chunseok Lee <chunseok.lee@samsung.com> Wed, 26 Apr 2022 12:00:00 +0900
+
+one (1.19.0) bionic; urgency=low
+
+ * Synch up version with ONE Compiler
+
+ -- Chunseok Lee <chunseok.lee@samsung.com> Wed, 10 Nov 2021 14:23:00 +0900
+
+one (1.18.0) bionic; urgency=low
+
+ * Synch up version with ONE Compiler
+
+ -- Chunseok Lee <chunseok.lee@samsung.com> Fri, 15 Oct 2021 15:23:00 +0900
+
+one (1.17.0) bionic; urgency=low
+
+ * New gpu_gl backend supports the following operations : Add, Convolution, Depthwise Convolution, Pooling, Reshape, Relu, Softmax
+
+ -- Chunseok Lee <chunseok.lee@samsung.com> Fri, 20 Aug 2021 17:00:00 +0900
+
+one (1.16.0) bionic; urgency=low
+
+ * Initial release.
+
+ -- Chunseok Lee <chunseok.lee@samsung.com> Mon, 05 Jul 2021 17:11:00 +0900
diff --git a/infra/debian/runtime/compat b/infra/debian/runtime/compat
new file mode 100644
index 000000000..ec635144f
--- /dev/null
+++ b/infra/debian/runtime/compat
@@ -0,0 +1 @@
+9
diff --git a/infra/debian/runtime/control b/infra/debian/runtime/control
new file mode 100644
index 000000000..20543baee
--- /dev/null
+++ b/infra/debian/runtime/control
@@ -0,0 +1,19 @@
+Source: one
+Section: devel
+Priority: extra
+Maintainer: Neural Network Acceleration Solution Developers <nnfw@samsung.com>
+Build-Depends: cmake, debhelper (>=9), dh-python, python3-all
+Standards-Version: 3.9.8
+Homepage: https://github.com/Samsung/ONE
+
+Package: nnfw
+Architecture: amd64
+Multi-Arch: same
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: one-runtime package
+
+Package: nnfw-dev
+Architecture: amd64
+Multi-Arch: same
+Depends: nnfw, ${shlibs:Depends}, ${misc:Depends}
+Description: one-runtime development package
diff --git a/infra/debian/runtime/copyright b/infra/debian/runtime/copyright
new file mode 100644
index 000000000..bb64695a4
--- /dev/null
+++ b/infra/debian/runtime/copyright
@@ -0,0 +1,3 @@
+Files: *
+License: Proprietary
+Copyright (c) <2018> <Samsung Electronics Co.,Ltd.>
diff --git a/infra/debian/runtime/nnfw-dev.install b/infra/debian/runtime/nnfw-dev.install
new file mode 100644
index 000000000..f246e7c24
--- /dev/null
+++ b/infra/debian/runtime/nnfw-dev.install
@@ -0,0 +1,4 @@
+# {FILES_TO_INSTALL} {DEST_DIR}
+# include
+usr/include/nnfw usr/include/
+usr/lib/pkgconfig/*.pc usr/lib/pkgconfig/
diff --git a/infra/debian/runtime/nnfw.install b/infra/debian/runtime/nnfw.install
new file mode 100644
index 000000000..44be07c9c
--- /dev/null
+++ b/infra/debian/runtime/nnfw.install
@@ -0,0 +1,3 @@
+# {FILES_TO_INSTALL} {DEST_DIR}
+# lib
+usr/lib/*.so usr/lib/
diff --git a/infra/debian/runtime/rules b/infra/debian/runtime/rules
new file mode 100755
index 000000000..a228196e9
--- /dev/null
+++ b/infra/debian/runtime/rules
@@ -0,0 +1,22 @@
+#!/usr/bin/make -f
+DEBVER := $(shell dpkg-parsechangelog -SVersion)
+export DH_VERBOSE = 1
+export _DESTDIR = debian/tmp/
+export BUILD_TYPE=release
+export OPTIONS=-DBUILD_LOGGING=0 -DBUILD_TFLITE_COMPARATOR_TEST_TOOL=0 -DBUILD_ONERT_RUN=0 -DBUILD_TFLITE_RUN=0 -DBUILD_RUNTIME_NNAPI_TEST=0 -DBUILD_TFLITE_VANILLA_RUN=0 -DBUILD_TENSORFLOW_LITE_2_8_0=0 -DBUILD_TENSORFLOW_LITE=0
+export DEBIAN_BUILD=1
+export INSTALL_PATH=debian/tmp/usr/
+%:
+ dh $@
+
+override_dh_auto_build:
+ make -f Makefile.template
+override_dh_auto_install:
+ make -f Makefile.template install
+override_dh_install:
+ install -d debian/tmp/usr/lib/pkgconfig
+ sed -i 's:@libdir@:\/usr\/lib:g' ./packaging/nnfw.pc.in
+ sed -i 's:@includedir@:\/usr\/include:g' ./packaging/nnfw.pc.in
+ sed -i 's:@version@:${DEBVER}:g' ./packaging/nnfw.pc.in
+ install -m 0644 packaging/nnfw.pc.in -T debian/tmp/usr/lib/pkgconfig/nnfw.pc
+ dh_install
diff --git a/infra/debian/runtime/source/format b/infra/debian/runtime/source/format
new file mode 100644
index 000000000..89ae9db8f
--- /dev/null
+++ b/infra/debian/runtime/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/infra/debian/runtime/source/local-options b/infra/debian/runtime/source/local-options
new file mode 100644
index 000000000..296a73032
--- /dev/null
+++ b/infra/debian/runtime/source/local-options
@@ -0,0 +1,2 @@
+# This is for reproducible building. Otherwise, `debuild` recognizes build artifacts as source files.
+diff-ignore="build|externals"
diff --git a/infra/docker/Dockerfile b/infra/docker/Dockerfile
deleted file mode 100644
index 052cc4fb6..000000000
--- a/infra/docker/Dockerfile
+++ /dev/null
@@ -1,66 +0,0 @@
-FROM ubuntu:16.04
-
-ARG UBUNTU_MIRROR
-
-RUN if [ -n "$http_proxy" ] ; then echo "Acquire::http::proxy \"${http_proxy}\";" >> /etc/apt/apt.conf ; fi
-RUN if [ -n "$https_proxy" ] ; then echo "Acquire::https::proxy \"${https_proxy}\";" >> /etc/apt/apt.conf ; fi
-RUN if [ -n "$UBUNTU_MIRROR" ] ; then sed "s/archive.ubuntu.com/${UBUNTU_MIRROR}/g" -i /etc/apt/sources.list ; fi
-
-# Install 'add-apt-repository'
-RUN apt-get update && apt-get -qqy install software-properties-common
-
-# Build tool
-RUN apt-get update && apt-get -qqy install build-essential cmake scons git lcov
-
-# Install extra dependencies (Caffe, nnkit)
-RUN apt-get update && apt-get -qqy install libboost-all-dev libgflags-dev libgoogle-glog-dev libatlas-base-dev libhdf5-dev
-
-# Install protocol buffer
-RUN apt-get update && apt-get -qqy install libprotobuf-dev protobuf-compiler
-
-# Additonal tools
-RUN apt-get update && apt-get -qqy install doxygen graphviz wget unzip clang-format-3.9 python3 python3-pip python3-venv hdf5-tools pylint
-RUN pip3 install --upgrade pip
-RUN pip3 install yapf==0.22.0 numpy
-
-# Install google test (source)
-RUN apt-get update && apt-get -qqy install libgtest-dev
-
-###
-### NOTE: Don't add new package install using apt-get or pip below this line
-###
-
-# Install native build tool gcc version 6.x
-RUN add-apt-repository ppa:ubuntu-toolchain-r/test && apt-get update && apt-get -qqy install gcc-6 g++-6
-RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-6 60 --slave /usr/bin/g++ g++ /usr/bin/g++-6 && update-alternatives --config gcc
-
-# Install cross build tool gcc version 6.x
-RUN wget https://releases.linaro.org/components/toolchain/binaries/6.3-2017.02/arm-linux-gnueabihf/gcc-linaro-6.3.1-2017.02-x86_64_arm-linux-gnueabihf.tar.xz -O gcc-hardfp.tar.xz -nv
-RUN wget https://releases.linaro.org/components/toolchain/binaries/6.2-2016.11/arm-linux-gnueabi/gcc-linaro-6.2.1-2016.11-x86_64_arm-linux-gnueabi.tar.xz -O gcc-softfp.tar.xz -nv
-RUN wget https://releases.linaro.org/components/toolchain/binaries/6.2-2016.11/aarch64-linux-gnu/gcc-linaro-6.2.1-2016.11-x86_64_aarch64-linux-gnu.tar.xz -O gcc-aarch64.tar.xz -nv
-RUN tar -xf gcc-hardfp.tar.xz -C /opt/ && rm -rf gcc-hardfp.tar.xz
-RUN tar -xf gcc-softfp.tar.xz -C /opt/ && rm -rf gcc-softfp.tar.xz
-RUN tar -xf gcc-aarch64.tar.xz -C /opt/ && rm -rf gcc-aarch64.tar.xz
-ENV PATH "/opt/gcc-linaro-6.2.1-2016.11-x86_64_arm-linux-gnueabi/bin:/opt/gcc-linaro-6.3.1-2017.02-x86_64_arm-linux-gnueabihf/bin:/opt/gcc-linaro-6.2.1-2016.11-x86_64_aarch64-linux-gnu/bin:$PATH"
-
-###
-### NOTE: Don't add build & install process using installed buildtool above this line
-###
-
-# Build and install google test static libraries
-WORKDIR /root/gtest
-RUN cmake /usr/src/gtest
-RUN make
-RUN mv *.a /usr/lib
-WORKDIR /root
-RUN rm -rf gtest
-
-# Install gbs & sdb
-RUN echo 'deb [trusted=yes] http://download.tizen.org/tools/latest-release/Ubuntu_16.04/ /' | cat >> /etc/apt/sources.list
-RUN apt-get update && apt-get -qqy install gbs
-RUN wget http://download.tizen.org/sdk/tizenstudio/official/binary/sdb_3.1.4_ubuntu-64.zip -O sdb.zip
-RUN unzip -d tmp sdb.zip && rm sdb.zip
-RUN cp tmp/data/tools/sdb /usr/bin/. && rm -rf tmp
-
-# Clean archives (to reduce image size)
-RUN apt-get clean -y
diff --git a/infra/docker/Dockerfile.1804 b/infra/docker/Dockerfile.1804
deleted file mode 100644
index cc31bba1f..000000000
--- a/infra/docker/Dockerfile.1804
+++ /dev/null
@@ -1,41 +0,0 @@
-FROM ubuntu:18.04
-
-ARG UBUNTU_MIRROR
-
-# Install 'add-apt-repository'
-RUN apt-get update && apt-get -qqy install software-properties-common
-
-# Build tool
-RUN apt-get update && apt-get -qqy install build-essential cmake scons git lcov g++-arm-linux-gnueabihf g++-aarch64-linux-gnu
-
-# Install extra dependencies (Caffe, nnkit)
-RUN apt-get update && apt-get -qqy install libboost-all-dev libgflags-dev libgoogle-glog-dev libatlas-base-dev libhdf5-dev
-
-# Install protocol buffer
-RUN apt-get update && apt-get -qqy install libprotobuf-dev protobuf-compiler
-
-# Additonal tools
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -qqy install doxygen graphviz wget unzip clang-format-3.9 python3 python3-pip python3-venv hdf5-tools pylint
-RUN pip3 install --upgrade pip
-RUN pip3 install yapf==0.22.0 numpy
-
-# Install google test (source)
-RUN apt-get update && apt-get -qqy install libgtest-dev
-
-# Build and install google test static libraries
-WORKDIR /root/gtest
-RUN cmake /usr/src/gtest
-RUN make
-RUN mv *.a /usr/lib
-WORKDIR /root
-RUN rm -rf gtest
-
-# Install gbs & sdb
-RUN echo 'deb [trusted=yes] http://download.tizen.org/tools/latest-release/Ubuntu_18.04/ /' | cat >> /etc/apt/sources.list
-RUN apt-get update && apt-get -qqy install gbs
-RUN wget http://download.tizen.org/sdk/tizenstudio/official/binary/sdb_3.1.4_ubuntu-64.zip -O sdb.zip
-RUN unzip -d tmp sdb.zip && rm sdb.zip
-RUN cp tmp/data/tools/sdb /usr/bin/. && rm -rf tmp
-
-# Clean archives (to reduce image size)
-RUN apt-get clean -y
diff --git a/infra/docker/bionic/Dockerfile b/infra/docker/bionic/Dockerfile
new file mode 100644
index 000000000..383fddc2d
--- /dev/null
+++ b/infra/docker/bionic/Dockerfile
@@ -0,0 +1,145 @@
+# Copyright 2016-2020 Jing Li
+# Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:18.04
+
+ARG UBUNTU_MIRROR
+
+# Install 'add-apt-repository'
+RUN apt-get update && apt-get -qqy install software-properties-common
+
+# Git repo for latest version (github checkout@v2 action requires v2.18)
+RUN add-apt-repository ppa:git-core/ppa -y
+
+# Build tool
+RUN apt-get update && apt-get -qqy install build-essential cmake scons git g++-arm-linux-gnueabihf g++-aarch64-linux-gnu
+
+# ARM none eabi build tool
+RUN apt-get update && apt-get -qqy install gcc-arm-none-eabi
+
+# Debian build tool
+RUN apt-get update && apt-get -qqy install fakeroot devscripts debhelper python3-all
+
+# Install extra dependencies (Caffe, nnkit)
+RUN apt-get update && apt-get -qqy install libboost-all-dev libgflags-dev libgoogle-glog-dev libatlas-base-dev libhdf5-dev
+
+# Install protocol buffer
+RUN apt-get update && apt-get -qqy install libprotobuf-dev protobuf-compiler
+
+# Additonal tools
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive \
+ apt-get -qqy install doxygen graphviz wget zip unzip clang-format-8 python3 python3-pip python3-venv python3-dev hdf5-tools pylint curl
+RUN apt-get update && apt-get -qqy install python3.8 python3.8-venv python3.8-dev
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install yapf==0.22.0 numpy flatbuffers
+RUN python3.8 -m pip install --upgrade pip
+RUN python3.8 -m pip install numpy flatbuffers
+
+# Install google test (source)
+RUN apt-get update && apt-get -qqy install libgtest-dev
+
+# Install build tool gcc version 8.x and set alternative link (c++17 support)
+RUN apt-get update && apt-get -qqy install g++-8 g++-8-arm-linux-gnueabihf g++-8-aarch64-linux-gnu
+RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 80 \
+ --slave /usr/bin/g++ g++ /usr/bin/g++-8 \
+ --slave /usr/bin/gcov gcov /usr/bin/gcov-8
+RUN update-alternatives --install /usr/bin/arm-linux-gnueabihf-gcc arm-linux-gnueabihf-gcc /usr/bin/arm-linux-gnueabihf-gcc-8 80 \
+ --slave /usr/bin/arm-linux-gnueabihf-g++ arm-linux-gnueabihf-g++ /usr/bin/arm-linux-gnueabihf-g++-8 \
+ --slave /usr/bin/arm-linux-gnueabihf-gcov arm-linux-gnueabihf-gcov /usr/bin/arm-linux-gnueabihf-gcov-8
+RUN update-alternatives --install /usr/bin/aarch64-linux-gnu-gcc aarch64-linux-gnu-gcc /usr/bin/aarch64-linux-gnu-gcc-8 80 \
+ --slave /usr/bin/aarch64-linux-gnu-g++ aarch64-linux-gnu-g++ /usr/bin/aarch64-linux-gnu-g++-8 \
+ --slave /usr/bin/aarch64-linux-gnu-gcov aarch64-linux-gnu-gcov /usr/bin/aarch64-linux-gnu-gcov-8
+
+# Install lcov 1.14-2 for gcc-8 support
+# Default version lcov 1.13-3 can't support gcc-8
+# lcov 1.13-4 with gcc-8 have bug: reports no coverage for class declaration
+WORKDIR /root/lcov
+RUN wget http://archive.ubuntu.com/ubuntu/pool/universe/l/lcov/lcov_1.14-2_all.deb
+RUN apt-get update && apt-get -qqy install libperlio-gzip-perl libjson-perl
+RUN dpkg -i lcov_1.14-2_all.deb
+WORKDIR /root
+RUN rm -rf /root/lcov
+
+# Build and install google test static libraries
+WORKDIR /root/gtest
+RUN cmake /usr/src/gtest
+RUN make
+RUN mv *.a /usr/lib
+WORKDIR /root
+RUN rm -rf gtest
+
+# Install gbs & sdb
+RUN echo 'deb [trusted=yes] http://download.tizen.org/tools/latest-release/Ubuntu_18.04/ /' | cat >> /etc/apt/sources.list
+RUN apt-get update && apt-get -qqy install gbs
+RUN wget http://download.tizen.org/sdk/tizenstudio/official/binary/sdb_3.1.4_ubuntu-64.zip -O sdb.zip
+RUN unzip -d tmp sdb.zip && rm sdb.zip
+RUN cp tmp/data/tools/sdb /usr/bin/. && rm -rf tmp/*
+
+# Install java
+RUN apt-get install -y --no-install-recommends openjdk-8-jdk
+
+# download and install Gradle
+# https://services.gradle.org/distributions/
+ARG GRADLE_VERSION=6.4.1
+ARG GRADLE_DIST=bin
+RUN cd /opt && \
+ wget -q https://services.gradle.org/distributions/gradle-${GRADLE_VERSION}-${GRADLE_DIST}.zip && \
+ unzip gradle*.zip && \
+ ls -d */ | sed 's/\/*$//g' | xargs -I{} mv {} gradle && \
+ rm gradle*.zip
+
+# download and install Android SDK
+# https://developer.android.com/studio#command-tools
+ARG ANDROID_SDK_VERSION=6514223
+ENV ANDROID_SDK_ROOT /opt/android-sdk
+RUN mkdir -p ${ANDROID_SDK_ROOT}/cmdline-tools && \
+ wget -q https://dl.google.com/android/repository/commandlinetools-linux-${ANDROID_SDK_VERSION}_latest.zip && \
+ unzip *tools*linux*.zip -d ${ANDROID_SDK_ROOT}/cmdline-tools && \
+ rm *tools*linux*.zip
+
+# accept the license agreements of the SDK components
+RUN mkdir -p ${ANDROID_SDK_ROOT}/licenses
+RUN echo 24333f8a63b6825ea9c5514f83c2829b004d1fee > ${ANDROID_SDK_ROOT}/licenses/android-sdk-license
+RUN echo d56f5187479451eabf01fb78af6dfcb131a6481e >> ${ANDROID_SDK_ROOT}/licenses/android-sdk-license
+
+# Env variable for gradle build
+ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
+ENV GRADLE_HOME /opt/gradle
+ENV PATH ${PATH}:${GRADLE_HOME}/bin:${ANDROID_SDK_ROOT}/cmdline-tools/tools/bin:${ANDROID_SDK_ROOT}/platform-tools
+ENV ANDROID_HOME ${ANDROID_SDK_ROOT}
+
+# Install NDK
+RUN sdkmanager --install "ndk;20.0.5594570"
+RUN sdkmanager "platform-tools"
+
+# Env for ko encoding build
+ENV LC_ALL "C.UTF-8"
+
+# setup adb server
+EXPOSE 5037
+
+# Setup user to match host user, and give superuser permissions
+ARG USER_ID=1000
+ARG GROUP_ID=${USER_ID}
+RUN addgroup --gid ${GROUP_ID} ubuntu && adduser --disabled-password --gecos '' --uid ${USER_ID} --gid ${GROUP_ID} ubuntu && usermod -aG sudo ubuntu
+RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+RUN echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+
+# Clean archives (to reduce image size)
+RUN apt-get clean -y
+
+# Set user to the one we just created
+USER ${USER_ID}
diff --git a/infra/docker/bionic/Dockerfile.aarch64 b/infra/docker/bionic/Dockerfile.aarch64
new file mode 100644
index 000000000..08d712c96
--- /dev/null
+++ b/infra/docker/bionic/Dockerfile.aarch64
@@ -0,0 +1,92 @@
+# Copyright 2016-2020 Jing Li
+# Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:18.04
+
+ARG UBUNTU_MIRROR
+
+# Install 'add-apt-repository'
+RUN apt-get update && apt-get -qqy install software-properties-common
+
+# Git repo for latest version (github checkout@v2 action requires v2.18)
+RUN add-apt-repository ppa:git-core/ppa -y
+
+# Build tool
+RUN apt-get update && apt-get -qqy install build-essential cmake scons git g++-arm-linux-gnueabihf
+
+# ARM none eabi build tool
+RUN apt-get update && apt-get -qqy install gcc-arm-none-eabi
+
+# Debian build tool
+RUN apt-get update && apt-get -qqy install fakeroot devscripts debhelper python3-all
+
+# Install extra dependencies (Caffe, nnkit)
+RUN apt-get update && apt-get -qqy install libboost-all-dev libgflags-dev libgoogle-glog-dev libatlas-base-dev libhdf5-dev
+
+# Install protocol buffer
+RUN apt-get update && apt-get -qqy install libprotobuf-dev protobuf-compiler
+
+# Additonal tools
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive \
+ apt-get -qqy install doxygen graphviz wget zip unzip clang-format-8 python3 python3-pip python3-venv python3-dev hdf5-tools pylint curl
+RUN apt-get update && apt-get -qqy install python3.8 python3.8-venv python3.8-dev
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install yapf==0.22.0 numpy flatbuffers
+RUN python3.8 -m pip install --upgrade pip
+RUN python3.8 -m pip install numpy flatbuffers
+
+# Install google test (source)
+RUN apt-get update && apt-get -qqy install libgtest-dev
+
+# Install build tool gcc version 8.x and set alternative link (c++17 support)
+RUN apt-get update && apt-get -qqy install g++-8 g++-8-arm-linux-gnueabihf
+RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 80 \
+ --slave /usr/bin/g++ g++ /usr/bin/g++-8 \
+ --slave /usr/bin/gcov gcov /usr/bin/gcov-8
+RUN update-alternatives --install /usr/bin/arm-linux-gnueabihf-gcc arm-linux-gnueabihf-gcc /usr/bin/arm-linux-gnueabihf-gcc-8 80 \
+ --slave /usr/bin/arm-linux-gnueabihf-g++ arm-linux-gnueabihf-g++ /usr/bin/arm-linux-gnueabihf-g++-8 \
+ --slave /usr/bin/arm-linux-gnueabihf-gcov arm-linux-gnueabihf-gcov /usr/bin/arm-linux-gnueabihf-gcov-8
+
+# Install lcov 1.14-2 for gcc-8 support
+# Default version lcov 1.13-3 can't support gcc-8
+# lcov 1.13-4 with gcc-8 have bug: reports no coverage for class declaration
+WORKDIR /root/lcov
+RUN wget http://archive.ubuntu.com/ubuntu/pool/universe/l/lcov/lcov_1.14-2_all.deb
+RUN apt-get update && apt-get -qqy install libperlio-gzip-perl libjson-perl
+RUN dpkg -i lcov_1.14-2_all.deb
+WORKDIR /root
+RUN rm -rf /root/lcov
+
+# Build and install google test static libraries
+WORKDIR /root/gtest
+RUN cmake /usr/src/gtest
+RUN make
+RUN mv *.a /usr/lib
+WORKDIR /root
+RUN rm -rf gtest
+
+# Setup user to match host user, and give superuser permissions
+ARG USER_ID=1000
+ARG GROUP_ID=${USER_ID}
+RUN addgroup --gid ${GROUP_ID} ubuntu && adduser --disabled-password --gecos '' --uid ${USER_ID} --gid ${GROUP_ID} ubuntu && usermod -aG sudo ubuntu
+RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+RUN echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+
+# Clean archives (to reduce image size)
+RUN apt-get clean -y
+
+# Set user to the one we just created
+USER ${USER_ID}
diff --git a/infra/docker/focal/Dockerfile b/infra/docker/focal/Dockerfile
new file mode 100644
index 000000000..0c6c582e9
--- /dev/null
+++ b/infra/docker/focal/Dockerfile
@@ -0,0 +1,108 @@
+# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:20.04
+
+ARG UBUNTU_MIRROR
+
+# Install 'add-apt-repository'
+RUN apt-get update && apt-get -qqy install software-properties-common
+
+# Build tool
+RUN apt-get update && apt-get -qqy install build-essential cmake scons git lcov g++-arm-linux-gnueabihf g++-aarch64-linux-gnu
+
+# Debian build tool
+RUN apt-get update && apt-get -qqy install fakeroot devscripts debhelper python3-all dh-python
+
+# Install extra dependencies (Caffe, nnkit)
+RUN apt-get update && apt-get -qqy install libboost-all-dev libgflags-dev libgoogle-glog-dev libatlas-base-dev libhdf5-dev
+
+# Install protocol buffer
+RUN apt-get update && apt-get -qqy install libprotobuf-dev protobuf-compiler
+
+# Additonal tools
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive \
+ apt-get -qqy install doxygen graphviz wget zip unzip clang-format-8 python3 python3-pip python3-venv python3-dev hdf5-tools pylint curl
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install yapf==0.22.0 numpy flatbuffers
+
+# Install google test (source)
+RUN apt-get update && apt-get -qqy install libgtest-dev
+
+# Install gbs & sdb
+RUN echo 'deb [trusted=yes] http://download.tizen.org/tools/latest-release/Ubuntu_20.04/ /' | cat >> /etc/apt/sources.list
+RUN apt-get update && apt-get -qqy install gbs
+RUN wget http://download.tizen.org/sdk/tizenstudio/official/binary/sdb_4.2.19_ubuntu-64.zip -O sdb.zip
+RUN unzip -d tmp sdb.zip && rm sdb.zip
+RUN cp tmp/data/tools/sdb /usr/bin/. && rm -rf tmp/*
+
+# ARM none eabi build tool
+RUN apt-get update && apt-get -qqy install gcc-arm-none-eabi
+
+# Install java
+RUN apt-get install -y --no-install-recommends openjdk-8-jdk
+
+# download and install Gradle
+# https://services.gradle.org/distributions/
+ARG GRADLE_VERSION=6.4.1
+ARG GRADLE_DIST=bin
+RUN cd /opt && \
+ wget -q https://services.gradle.org/distributions/gradle-${GRADLE_VERSION}-${GRADLE_DIST}.zip && \
+ unzip gradle*.zip && \
+ ls -d */ | sed 's/\/*$//g' | xargs -I{} mv {} gradle && \
+ rm gradle*.zip
+
+# download and install Android SDK
+# https://developer.android.com/studio#command-tools
+ARG ANDROID_SDK_VERSION=6514223
+ENV ANDROID_SDK_ROOT /opt/android-sdk
+RUN mkdir -p ${ANDROID_SDK_ROOT}/cmdline-tools && \
+ wget -q https://dl.google.com/android/repository/commandlinetools-linux-${ANDROID_SDK_VERSION}_latest.zip && \
+ unzip *tools*linux*.zip -d ${ANDROID_SDK_ROOT}/cmdline-tools && \
+ rm *tools*linux*.zip
+
+# accept the license agreements of the SDK components
+RUN mkdir -p ${ANDROID_SDK_ROOT}/licenses
+RUN echo 24333f8a63b6825ea9c5514f83c2829b004d1fee > ${ANDROID_SDK_ROOT}/licenses/android-sdk-license
+RUN echo d56f5187479451eabf01fb78af6dfcb131a6481e >> ${ANDROID_SDK_ROOT}/licenses/android-sdk-license
+
+# Env variable for gradle build
+ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
+ENV GRADLE_HOME /opt/gradle
+ENV PATH ${PATH}:${GRADLE_HOME}/bin:${ANDROID_SDK_ROOT}/cmdline-tools/tools/bin:${ANDROID_SDK_ROOT}/platform-tools
+ENV ANDROID_HOME ${ANDROID_SDK_ROOT}
+
+# Install NDK
+RUN sdkmanager --install "ndk;20.0.5594570"
+RUN sdkmanager "platform-tools"
+
+# Env for ko encoding build
+ENV LC_ALL "C.UTF-8"
+
+# setup adb server
+EXPOSE 5037
+
+# Setup user to match host user, and give superuser permissions
+ARG USER_ID=1000
+ARG GROUP_ID=${USER_ID}
+RUN addgroup --gid ${GROUP_ID} ubuntu && adduser --disabled-password --gecos '' --uid ${USER_ID} --gid ${GROUP_ID} ubuntu && usermod -aG sudo ubuntu
+RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+RUN echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+
+# Clean archives (to reduce image size)
+RUN apt-get clean -y
+
+# Set user to the one we just created
+USER ${USER_ID}
diff --git a/infra/docker/focal/Dockerfile.aarch64 b/infra/docker/focal/Dockerfile.aarch64
new file mode 100644
index 000000000..b63bbb10f
--- /dev/null
+++ b/infra/docker/focal/Dockerfile.aarch64
@@ -0,0 +1,62 @@
+# Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:20.04
+
+ARG UBUNTU_MIRROR
+
+# Install 'add-apt-repository'
+RUN apt-get update && apt-get -qqy install software-properties-common
+
+# Build tool
+RUN apt-get update && apt-get -qqy install build-essential cmake scons git lcov g++-arm-linux-gnueabihf
+
+# Debian build tool
+RUN apt-get update && apt-get -qqy install fakeroot devscripts debhelper python3-all dh-python
+
+# Install extra dependencies (Caffe, nnkit)
+RUN apt-get update && apt-get -qqy install libboost-all-dev libgflags-dev libgoogle-glog-dev libatlas-base-dev libhdf5-dev
+
+# Install protocol buffer
+RUN apt-get update && apt-get -qqy install libprotobuf-dev protobuf-compiler
+
+# Additonal tools
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive \
+ apt-get -qqy install doxygen graphviz wget zip unzip clang-format-8 python3 python3-pip python3-venv python3-dev hdf5-tools pylint curl
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install yapf==0.22.0 numpy flatbuffers
+
+# Install google test (source)
+RUN apt-get update && apt-get -qqy install libgtest-dev
+
+# Install gbs & sdb
+RUN echo 'deb [trusted=yes] http://download.tizen.org/tools/latest-release/Ubuntu_20.04/ /' | cat >> /etc/apt/sources.list
+RUN apt-get update && apt-get -qqy install gbs
+RUN wget http://download.tizen.org/sdk/tizenstudio/official/binary/sdb_4.2.19_ubuntu-64.zip -O sdb.zip
+RUN unzip -d tmp sdb.zip && rm sdb.zip
+RUN cp tmp/data/tools/sdb /usr/bin/. && rm -rf tmp/*
+
+# Setup user to match host user, and give superuser permissions
+ARG USER_ID=1000
+ARG GROUP_ID=${USER_ID}
+RUN addgroup --gid ${GROUP_ID} ubuntu && adduser --disabled-password --gecos '' --uid ${USER_ID} --gid ${GROUP_ID} ubuntu && usermod -aG sudo ubuntu
+RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+RUN echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+
+# Clean archives (to reduce image size)
+RUN apt-get clean -y
+
+# Set user to the one we just created
+USER ${USER_ID}
diff --git a/infra/docker/jammy/Dockerfile b/infra/docker/jammy/Dockerfile
new file mode 100644
index 000000000..aa500b0f4
--- /dev/null
+++ b/infra/docker/jammy/Dockerfile
@@ -0,0 +1,60 @@
+# Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:jammy
+
+ARG UBUNTU_MIRROR
+
+# Install 'add-apt-repository'
+RUN apt-get update && apt-get -qqy install software-properties-common
+
+# Build tool
+RUN apt-get update && apt-get -qqy install build-essential cmake scons git lcov g++-arm-linux-gnueabihf g++-aarch64-linux-gnu
+
+# Debian build tool
+RUN apt-get update && apt-get -qqy install fakeroot devscripts debhelper python3-all dh-python
+
+# Install extra dependencies (Caffe, nnkit)
+RUN apt-get update && apt-get -qqy install libboost-all-dev libgflags-dev libgoogle-glog-dev libatlas-base-dev libhdf5-dev
+
+# Install protocol buffer
+RUN apt-get update && apt-get -qqy install libprotobuf-dev protobuf-compiler
+
+# Additonal tools
+# TODO install clang-format (No official clang-format-8 package for ubuntu jammy)
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive \
+ apt-get -qqy install doxygen graphviz wget zip unzip python3 python3-pip python3-venv python3-dev hdf5-tools pylint curl
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install yapf==0.22.0 numpy flatbuffers
+
+# Install google test (source)
+RUN apt-get update && apt-get -qqy install libgtest-dev
+
+# TODO: Install gbs & sdb
+# gbs & sdb are not support ubuntu jammy yet
+
+# Setup user to match host user, and give superuser permissions
+ARG USER_ID=1000
+ARG GROUP_ID=${USER_ID}
+RUN apt-get update && apt-get -qqy install sudo
+RUN addgroup --gid ${GROUP_ID} ubuntu && adduser --disabled-password --gecos '' --uid ${USER_ID} --gid ${GROUP_ID} ubuntu && usermod -aG sudo ubuntu
+RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+RUN echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+
+# Clean archives (to reduce image size)
+RUN apt-get clean -y
+
+# Set user to the one we just created
+USER ${USER_ID}
diff --git a/infra/docker/jammy/Dockerfile.aarch64 b/infra/docker/jammy/Dockerfile.aarch64
new file mode 100644
index 000000000..a6a449dd6
--- /dev/null
+++ b/infra/docker/jammy/Dockerfile.aarch64
@@ -0,0 +1,60 @@
+# Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:jammy
+
+ARG UBUNTU_MIRROR
+
+# Install 'add-apt-repository'
+RUN apt-get update && apt-get -qqy install software-properties-common
+
+# Build tool
+RUN apt-get update && apt-get -qqy install build-essential cmake scons git lcov g++-arm-linux-gnueabihf
+
+# Debian build tool
+RUN apt-get update && apt-get -qqy install fakeroot devscripts debhelper python3-all dh-python
+
+# Install extra dependencies (Caffe, nnkit)
+RUN apt-get update && apt-get -qqy install libboost-all-dev libgflags-dev libgoogle-glog-dev libatlas-base-dev libhdf5-dev
+
+# Install protocol buffer
+RUN apt-get update && apt-get -qqy install libprotobuf-dev protobuf-compiler
+
+# Additonal tools
+# TODO install clang-format (No official clang-format-8 package for ubuntu jammy)
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive \
+ apt-get -qqy install doxygen graphviz wget zip unzip python3 python3-pip python3-venv python3-dev hdf5-tools pylint curl
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install yapf==0.22.0 numpy flatbuffers
+
+# Install google test (source)
+RUN apt-get update && apt-get -qqy install libgtest-dev
+
+# TODO: Install gbs & sdb
+# gbs & sdb are not support ubuntu jammy yet
+
+# Setup user to match host user, and give superuser permissions
+ARG USER_ID=1000
+ARG GROUP_ID=${USER_ID}
+RUN apt-get update && apt-get -qqy install sudo
+RUN addgroup --gid ${GROUP_ID} ubuntu && adduser --disabled-password --gecos '' --uid ${USER_ID} --gid ${GROUP_ID} ubuntu && usermod -aG sudo ubuntu
+RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+RUN echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+
+# Clean archives (to reduce image size)
+RUN apt-get clean -y
+
+# Set user to the one we just created
+USER ${USER_ID}
diff --git a/infra/doxygen/Doxyfile b/infra/doxygen/Doxyfile
index 0dc6fdfff..af2adfcc8 100644
--- a/infra/doxygen/Doxyfile
+++ b/infra/doxygen/Doxyfile
@@ -32,7 +32,7 @@ DOXYFILE_ENCODING = UTF-8
# title of most generated pages and in a few other places.
# The default value is: My Project.
-PROJECT_NAME = nnas
+PROJECT_NAME = "ONE - On-device Neural Engine"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
@@ -252,7 +252,7 @@ TCL_SUBST =
# members will be omitted, etc.
# The default value is: NO.
-OPTIMIZE_OUTPUT_FOR_C = NO
+OPTIMIZE_OUTPUT_FOR_C = YES
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
# Python sources only. Doxygen will then generate output that is more tailored
@@ -623,13 +623,13 @@ STRICT_PROTO_MATCHING = NO
# list. This list is created by putting \todo commands in the documentation.
# The default value is: YES.
-GENERATE_TODOLIST = YES
+GENERATE_TODOLIST = NO
# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
-GENERATE_TESTLIST = YES
+GENERATE_TESTLIST = NO
# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
@@ -642,7 +642,7 @@ GENERATE_BUGLIST = YES
# the documentation.
# The default value is: YES.
-GENERATE_DEPRECATEDLIST= YES
+GENERATE_DEPRECATEDLIST= NO
# The ENABLED_SECTIONS tag can be used to enable conditional documentation
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
@@ -790,7 +790,14 @@ WARN_LOGFILE =
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
-INPUT =
+INPUT = README.md \
+ docs/howto/ \
+ docs/overview/ \
+ docs/runtime/ \
+ compute/ \
+ compiler/ \
+ onert-micro/ \
+ runtime/
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
@@ -873,23 +880,14 @@ RECURSIVE = YES
# Note that relative paths are relative to the directory from which doxygen is
# run.
-EXCLUDE = Product/ \
- build/ \
- doxygen/ \
- report/ \
- externals/ \
- packaging/ \
- runtimes/contrib/ \
- runtimes/pure_arm_compute/ \
- tests/ \
- tools/
+EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
# The default value is: NO.
-EXCLUDE_SYMLINKS = NO
+EXCLUDE_SYMLINKS = YES
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
@@ -898,7 +896,17 @@ EXCLUDE_SYMLINKS = NO
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
-EXCLUDE_PATTERNS =
+EXCLUDE_PATTERNS = *.test.* \
+ */test/* \
+ */tests/* \
+ */unittest/* \
+ *_generated.* \
+ */3rdparty/* \
+ */contrib/* \
+ */compiler/*/*.md \
+ */compute/*/*.md \
+ */runtime/*/*.md
+
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
@@ -991,7 +999,7 @@ FILTER_SOURCE_PATTERNS =
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
-USE_MDFILE_AS_MAINPAGE = docs/nnfw/roadmap.md
+USE_MDFILE_AS_MAINPAGE = README.md
#---------------------------------------------------------------------------
# Configuration options related to source browsing
@@ -1010,7 +1018,7 @@ SOURCE_BROWSER = YES
# classes and enums directly into the documentation.
# The default value is: NO.
-INLINE_SOURCES = NO
+INLINE_SOURCES = YES
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
@@ -1023,13 +1031,13 @@ STRIP_CODE_COMMENTS = YES
# function all documented functions referencing it will be listed.
# The default value is: NO.
-REFERENCED_BY_RELATION = NO
+REFERENCED_BY_RELATION = YES
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
-REFERENCES_RELATION = NO
+REFERENCES_RELATION = YES
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES then the hyperlinks from functions in REFERENCES_RELATION and
@@ -2265,7 +2273,7 @@ DOT_FONTPATH =
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
-CLASS_GRAPH = YES
+CLASS_GRAPH = NO
# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
# graph for each documented class showing the direct and indirect implementation
@@ -2310,7 +2318,7 @@ UML_LIMIT_NUM_FIELDS = 10
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
-TEMPLATE_RELATIONS = NO
+TEMPLATE_RELATIONS = YES
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
@@ -2319,7 +2327,7 @@ TEMPLATE_RELATIONS = NO
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
-INCLUDE_GRAPH = YES
+INCLUDE_GRAPH = NO
# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
# set to YES then doxygen will generate a graph for each documented file showing
@@ -2328,7 +2336,7 @@ INCLUDE_GRAPH = YES
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
-INCLUDED_BY_GRAPH = YES
+INCLUDED_BY_GRAPH = NO
# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
# dependency graph for every global function or class method.
@@ -2340,7 +2348,7 @@ INCLUDED_BY_GRAPH = YES
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
-CALL_GRAPH = YES
+CALL_GRAPH = NO
# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
# dependency graph for every global function or class method.
@@ -2352,7 +2360,7 @@ CALL_GRAPH = YES
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
-CALLER_GRAPH = YES
+CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
# hierarchy of all classes instead of a textual one.
@@ -2401,7 +2409,7 @@ INTERACTIVE_SVG = NO
# found. If left blank, it is assumed the dot tool can be found in the path.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_PATH = /usr/local/bin/dot
+DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the \dotfile
@@ -2450,7 +2458,7 @@ PLANTUML_INCLUDE_PATH =
# Minimum value: 0, maximum value: 10000, default value: 50.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_GRAPH_MAX_NODES = 50
+DOT_GRAPH_MAX_NODES = 500
# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
# generated by dot. A depth value of 3 means that only nodes reachable from the
diff --git a/infra/nncc/CMakeLists.txt b/infra/nncc/CMakeLists.txt
index d416db2fd..bd53c33b1 100644
--- a/infra/nncc/CMakeLists.txt
+++ b/infra/nncc/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.1)
+cmake_minimum_required(VERSION 3.10)
project(nncc)
@@ -11,11 +11,6 @@ set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
set(CMAKE_INSTALL_RPATH "$ORIGIN/../lib:$ORIGIN/")
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
-# This feature works with CMake 3.5.2 or later. However, using previous versions does not produce
-# an error. We are still officially using CMake 3.1.0, but put this code for the sake of semantic
-# support in various development tools.
-# Todo: Someday, CMake needs to be updated to 3.7.2 or later to take advantage of improvements
-# such as `cmake-server`.
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(NNAS_PROJECT_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../.." CACHE
@@ -40,12 +35,19 @@ macro(nnas_include PREFIX)
endmacro(nnas_include)
macro(nnas_find_package PREFIX)
- find_package(${PREFIX} CONFIG NO_DEFAULT_PATH
- PATHS ${NNAS_PROJECT_SOURCE_DIR}/infra/cmake/packages
- ${ARGN}
- )
+ find_package(${PREFIX}
+ CONFIG NO_DEFAULT_PATH
+ PATHS ${NNAS_PROJECT_SOURCE_DIR}/infra/cmake/packages
+ ${ARGN})
endmacro(nnas_find_package)
+macro(nnas_find_package_folder PREFIX FIND_FOLDER)
+ find_package(${PREFIX}
+ CONFIG NO_DEFAULT_PATH
+ PATHS ${NNAS_PROJECT_SOURCE_DIR}/infra/cmake/packages ${FIND_FOLDER}
+ ${ARGN})
+endmacro(nnas_find_package_folder)
+
# nncc_find_resource(NAME) will update the following variables
#
# NAME_FOUND
@@ -81,30 +83,12 @@ message(STATUS "Use '${CMAKE_BUILD_TYPE}' configuration")
#
set(THREADS_PREFER_PTHREAD_FLAG TRUE)
-###
-### Configuration
-###
-option(DOWNLOAD_PROTOBUF "Download Protocol Buffer source" ON)
-option(BUILD_PROTOBUF "Locally build Protocol Buffer from the downloaded source" ON)
-option(DOWNLOAD_EIGEN "Download Eigen source" ON)
-option(DOWNLOAD_FARMHASH "Download farmhash source" ON)
-option(DOWNLOAD_GEMMLOWP "Download GEMM low precesion library source" ON)
-option(DOWNLOAD_RUY "Download ruy source" ON)
-option(DOWNLOAD_NEON2SSE "Download NEON2SSE library source" ON)
-option(DOWNLOAD_GFLAGS "Download GFlags source" OFF)
-option(DOWNLOAD_FLATBUFFERS "Download FlatBuffers source" ON)
-option(BUILD_FLATBUFFERS "Locally build Flatbuffers from the downloaded source" ON)
-option(DOWNLOAD_TENSORFLOW "Download TensorFlow source" ON)
-option(DOWNLOAD_CAFFE "Download Caffe source" ON)
-option(DOWNLOAD_PYTORCH "Download Pytorch source" ON)
-option(DOWNLOAD_ONNX "Download ONNX source" ON)
-option(DOWNLOAD_ABSEIL "Download Abseil-cpp source" ON)
-option(DOWNLOAD_PYBIND11 "Download Pybind11 source" ON)
-
-option(DOWNLOAD_GTEST "Download Google Test source" ON)
-option(BUILD_GTEST "Build Google Test from the downloaded source" ON)
-option(DOWNLOAD_HDF5 "Download HDF5 source" ON)
-option(BUILD_HDF5 "Build HDF5 from the downloaded source" ON)
+# identify platform: HOST_PLATFORM, TARGET_PLATFORM and related
+# note: this should be placed before flags and options setting
+nnas_include(IdentifyPlatform)
+
+# Configuration flags
+include("cmake/CfgOptionFlags.cmake")
nnas_find_package(GTest QUIET)
@@ -123,11 +107,9 @@ if(${ENABLE_TEST})
include(CTest)
endif(${ENABLE_TEST})
-option(ENABLE_STRICT_BUILD "Treat warning as error" OFF)
-
-# This option might be turned ON for Windows native build.
-# Check our ProtobufConfig.cmake for its usage.
-option(USE_PROTOBUF_LEGACY_IMPORT "Use legacy MODULE mode import rather than CONFIG mode" OFF)
+# apply compilation flags
+# NOTE this should be after all option
+include("cmake/ApplyCompileFlags.cmake")
###
### Target
diff --git a/infra/nncc/Makefile.arm32 b/infra/nncc/Makefile.arm32
new file mode 100644
index 000000000..9ba57ddb2
--- /dev/null
+++ b/infra/nncc/Makefile.arm32
@@ -0,0 +1,152 @@
+#
+# NOTE this is provided as experimental Makefile to ARM32 cross building
+# some modules of compiler.
+#
+
+BUILD_TYPE?=Debug
+BUILD_JOBS?=1
+
+CURRENT_DIR=$(shell pwd)
+BUILDFOLDER=build
+ARM32_FOLDER=arm32
+ROOTFS_ARM?=$(CURRENT_DIR)/tools/cross/rootfs/arm
+NNCC_CFG_OPTION_EXTRA?=
+
+TYPE_FOLDER=$(shell echo $(BUILD_TYPE) | tr A-Z a-z)
+
+BUILD_ARM32_FOLDER=$(BUILDFOLDER)/$(ARM32_FOLDER).$(TYPE_FOLDER)
+BUILD_ARM32_HOST=$(BUILDFOLDER)/$(ARM32_FOLDER).$(TYPE_FOLDER).host
+
+ARM32_INSTALL_FOLDER=$(CURRENT_DIR)/$(BUILDFOLDER)/$(ARM32_FOLDER).$(TYPE_FOLDER).install
+ARM32_INSTALL_HOST=$(CURRENT_DIR)/$(BUILDFOLDER)/$(ARM32_FOLDER).$(TYPE_FOLDER).host.install
+
+# ARM32 build
+ARM32_BUILD_ITEMS:=angkor;cwrap;pepper-str;pepper-strcast;pp
+ARM32_BUILD_ITEMS+=;pepper-csv2vec;crew
+ARM32_BUILD_ITEMS+=;oops;pepper-assert
+ARM32_BUILD_ITEMS+=;hermes;hermes-std
+ARM32_BUILD_ITEMS+=;loco;locop;logo-core;logo
+ARM32_BUILD_ITEMS+=;safemain;mio-circle05;mio-tflite280;mio-circle06;mio-tflite2121
+ARM32_BUILD_ITEMS+=;dio-hdf5
+ARM32_BUILD_ITEMS+=;luci-compute
+ARM32_BUILD_ITEMS+=;foder;circle-verify;souschef;arser;vconone
+ARM32_BUILD_ITEMS+=;luci
+ARM32_BUILD_ITEMS+=;luci-interpreter
+ARM32_BUILD_ITEMS+=;tflite2circle
+ARM32_BUILD_ITEMS+=;tflchef;circlechef
+ARM32_BUILD_ITEMS+=;circle2circle;record-minmax;circle-quantizer
+ARM32_BUILD_ITEMS+=;luci-eval-driver;luci-value-test
+
+ARM32_TOOLCHAIN_FILE=cmake/buildtool/cross/toolchain_armv7l-linux.cmake
+
+ARM32_HOST_ITEMS:=angkor;cwrap;pepper-str;pepper-strcast;pp
+ARM32_HOST_ITEMS+=;pepper-csv2vec
+ARM32_HOST_ITEMS+=;oops
+ARM32_HOST_ITEMS+=;hermes;hermes-std
+ARM32_HOST_ITEMS+=;loco;locop;logo-core;logo
+ARM32_HOST_ITEMS+=;safemain;mio-circle05;mio-tflite280;mio-circle06;mio-tflite2121
+ARM32_HOST_ITEMS+=;luci-compute
+ARM32_HOST_ITEMS+=;foder;circle-verify;souschef;arser;vconone
+ARM32_HOST_ITEMS+=;luci
+ARM32_HOST_ITEMS+=;luci-interpreter
+ARM32_HOST_ITEMS+=;tflite2circle
+ARM32_HOST_ITEMS+=;tflchef;circlechef
+ARM32_HOST_ITEMS+=;circle-tensordump
+ARM32_HOST_ITEMS+=;circle2circle
+ARM32_HOST_ITEMS+=;common-artifacts
+ARM32_HOST_ITEMS+=;luci-eval-driver;luci-value-test
+
+
+_EMPTY_:=
+_SPACE_:=$(_EMPTY_) $(_EMPTY_)
+ARM32_BUILD_WHITELIST=$(subst $(_SPACE_),,$(ARM32_BUILD_ITEMS))
+ARM32_HOST_WHITELIST=$(subst $(_SPACE_),,$(ARM32_HOST_ITEMS))
+
+NNCC_CFG_OPTION+= -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_COVERAGE=OFF -DEXTERNALS_BUILD_THREADS=$(BUILD_JOBS)
+
+NNCC_CFG_STRICT= -DENABLE_STRICT_BUILD=ON
+
+INT_TARGETS:=int_configure_arm32 int_configure_arm32_host \
+ int_build_arm32 int_build_arm32_host int_test_arm32_host int_test
+
+NNCC_ARM32_DEBUG= -DBUILD_WHITELIST="$(ARM32_BUILD_WHITELIST)"
+NNCC_ARM32_DEBUG_HOST= -DBUILD_WHITELIST="$(ARM32_HOST_WHITELIST)"
+
+DEF_TARGETS:=all
+
+VAL_TARGETS:=cfg debug test_prep test
+
+.PHONY: $(INT_TARGETS) $(DEF_TARGETS) $(VAL_TARGETS)
+
+.DEFAULT_GOAL: help
+
+help:
+ @echo "cfg : debug configure"
+ @echo "debug : debug build"
+ @echo "test_prep: debug test preparation"
+ @echo "test : debug test in target"
+
+###############################################################################
+# do not call int_xxxx directly as the depend on environment variables
+
+#
+# configures
+#
+
+int_configure_arm32_host:
+ NNCC_WORKSPACE=$(BUILD_ARM32_HOST) ./nncc configure \
+ $(NNCC_CFG_OPTION) \
+ $(NNCC_ARM32_DEBUG_HOST) $(NNCC_CFG_STRICT) \
+ -DCMAKE_INSTALL_PREFIX="$(ARM32_INSTALL_HOST)" \
+ -DENABLE_TEST=ON
+
+int_configure_arm32:
+ ROOTFS_DIR=$(ROOTFS_ARM) TARGET_ARCH=armv7l \
+ BUILD_HOST_EXEC=$(CURRENT_DIR)/$(BUILD_ARM32_HOST) \
+ NNCC_WORKSPACE=$(BUILD_ARM32_FOLDER) ./nncc configure \
+ $(NNCC_CFG_OPTION) $(NNCC_CFG_OPTION_EXTRA) \
+ $(NNCC_ARM32_DEBUG) $(NNCC_CFG_STRICT) \
+ -DCMAKE_TOOLCHAIN_FILE=$(ARM32_TOOLCHAIN_FILE) \
+ -DCMAKE_INSTALL_PREFIX="$(ARM32_INSTALL_FOLDER)" \
+ -DBUILD_ARM32_NEON=ON \
+ -DENABLE_TEST=ON
+
+# TODO remove BUILD_ARM32_NEON=ON as default is ON, after a while.
+# explictly added to prevent using cached 'BUILD_ARM32_NEON=OFF'
+
+#
+# builds
+#
+int_build_arm32_host:
+ NNCC_WORKSPACE=$(BUILD_ARM32_HOST) ./nncc build -j$(BUILD_JOBS)
+
+int_build_arm32:
+ ROOTFS_DIR=$(ROOTFS_ARM) TARGET_ARCH=armv7l \
+ BUILD_HOST_EXEC=$(CURRENT_DIR)/$(BUILD_ARM32_HOST) \
+ NNCC_WORKSPACE=$(BUILD_ARM32_FOLDER) ./nncc build -j$(BUILD_JOBS)
+
+#
+# host test; run test in host to generate random input and expected outputs
+#
+int_test_arm32_host:
+ NNCC_WORKSPACE=$(BUILD_ARM32_HOST) ./nncc test
+
+#
+# tests: run in ARM32 Ubuntu 18.04 device
+#
+int_test:
+ NNCC_WORKSPACE=$(BUILD_ARM32_FOLDER) ./nncc test
+
+################################################################################
+
+all: int_configure_arm32_host int_build_arm32_host int_configure_arm32 int_build_arm32
+
+cfg: int_configure_arm32_host int_build_arm32_host int_configure_arm32
+
+debug: int_build_arm32
+
+# NOTE before run test in ARM32, run test in host is required to prepare test data
+test_prep: int_test_arm32_host
+
+# NOTE run test in ARM32 Ubuntu 18.04 device
+test: int_test
diff --git a/infra/nncc/cmake/ApplyCompileFlags.cmake b/infra/nncc/cmake/ApplyCompileFlags.cmake
new file mode 100644
index 000000000..0cc5f9cd1
--- /dev/null
+++ b/infra/nncc/cmake/ApplyCompileFlags.cmake
@@ -0,0 +1,35 @@
+#
+# Platform independent compile flag setting
+#
+# flags for build type: debug, release
+set(CMAKE_C_FLAGS_DEBUG "-O0 -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g -DDEBUG")
+set(CMAKE_C_FLAGS_RELEASE "-O3 -DNDEBUG")
+set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG")
+
+#
+# Platform specific compile flag setting
+#
+if(EXISTS "${CMAKE_CURRENT_LIST_DIR}/buildtool/config/config_${TARGET_PLATFORM}.cmake")
+ include("${CMAKE_CURRENT_LIST_DIR}/buildtool/config/config_${TARGET_PLATFORM}.cmake")
+endif()
+
+#
+# Apply compile flags
+# note: this should be placed after cmake/buildtool/config/config_xxx.cmake files
+#
+# add common flags
+foreach(FLAG ${FLAGS_COMMON})
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FLAG}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}")
+endforeach()
+
+# add c flags
+foreach(FLAG ${FLAGS_CONLY})
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FLAG}")
+endforeach()
+
+# add cxx flags
+foreach(FLAG ${FLAGS_CXXONLY})
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}")
+endforeach()
diff --git a/infra/nncc/cmake/CfgOptionFlags.cmake b/infra/nncc/cmake/CfgOptionFlags.cmake
new file mode 100644
index 000000000..773a1f7d0
--- /dev/null
+++ b/infra/nncc/cmake/CfgOptionFlags.cmake
@@ -0,0 +1,58 @@
+#
+# Platform specific configuration
+# note: this should be placed before default setting for option setting priority
+# (platform specific setting have higher priority)
+#
+include("cmake/options/options_${TARGET_PLATFORM}.cmake")
+
+###
+### Configuration
+###
+option(DOWNLOAD_PROTOBUF "Download Protocol Buffer source" ON)
+option(BUILD_PROTOBUF "Locally build Protocol Buffer from the downloaded source" ON)
+option(DOWNLOAD_EIGEN "Download Eigen source" ON)
+option(DOWNLOAD_FARMHASH "Download farmhash source" ON)
+option(DOWNLOAD_GEMMLOWP "Download GEMM low precesion library source" ON)
+option(DOWNLOAD_RUY "Download ruy source" ON)
+option(DOWNLOAD_NEON2SSE "Download NEON2SSE library source" ON)
+option(DOWNLOAD_GFLAGS "Download GFlags source" OFF)
+option(DOWNLOAD_FLATBUFFERS "Download FlatBuffers source" ON)
+option(BUILD_FLATBUFFERS "Locally build Flatbuffers from the downloaded source" ON)
+option(DOWNLOAD_TENSORFLOW "Download TensorFlow source" ON)
+option(DOWNLOAD_CAFFE "Download Caffe source" ON)
+option(DOWNLOAD_PYTORCH "Download Pytorch source" ON)
+option(DOWNLOAD_ONNX "Download ONNX source" ON)
+option(DOWNLOAD_ABSEIL "Download Abseil-cpp source" ON)
+option(DOWNLOAD_OPENCL_HEADERS "Download OpenCl Header source" ON)
+option(DOWNLOAD_PYBIND11 "Download Pybind11 source" ON)
+option(DOWNLOAD_JSONCPP "Download Jsoncpp source" ON)
+
+option(DOWNLOAD_GTEST "Download Google Test source" ON)
+option(BUILD_GTEST "Build Google Test from the downloaded source" ON)
+option(DOWNLOAD_HDF5 "Download HDF5 source" ON)
+option(BUILD_HDF5 "Build HDF5 from the downloaded source" ON)
+
+option(ENABLE_STRICT_BUILD "Treat warning as error" OFF)
+
+# This option might be turned ON for Windows native build.
+# Check our ProtobufConfig.cmake for its usage.
+option(USE_PROTOBUF_LEGACY_IMPORT "Use legacy MODULE mode import rather than CONFIG mode" OFF)
+
+# This option might be turned ON for MCU builds of luci related components.
+# It specify which library type to use for build:
+# if set ON - luci libraries are static, otherwise - shared.
+option(STATIC_LUCI "Build luci as a static libraries" OFF)
+
+# Disable PIC(Position-Independent Code) option for luci-interpreter related components.
+# This option might be turned ON for MCU builds.
+#
+# Enabled PIC requires additional efforts for correct linkage, such as
+# implementation of trampoline functions and support of various address tables.
+# PIC is used for dynamic libraries, MCU builds of interpreter
+# do not benefit from it, so we prefer to disable PIC.
+option(NNCC_LIBRARY_NO_PIC "Disable PIC option for libraries" OFF)
+
+# one-cmds PyTorch importer is an experimental feature, it is not used in default configuration.
+# This option enables installation of one-import-pytorch utility and
+# generation of related testsuite.
+option(ENABLE_ONE_IMPORT_PYTORCH "Enable deploy of one-cmds pytoch importer and related tests" OFF)
diff --git a/infra/nncc/cmake/buildtool/config/arm-none-eabi-gcc.cmake b/infra/nncc/cmake/buildtool/config/arm-none-eabi-gcc.cmake
new file mode 100644
index 000000000..544be030a
--- /dev/null
+++ b/infra/nncc/cmake/buildtool/config/arm-none-eabi-gcc.cmake
@@ -0,0 +1,66 @@
+set(CMAKE_SYSTEM_NAME Generic)
+
+set(CMAKE_SYSTEM_PROCESSOR "${CPU_ARCH}")
+set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
+set(CMAKE_C_COMPILER "${C_COMPILER}")
+set(CMAKE_CXX_COMPILER "${CXX_COMPILER}")
+set(CMAKE_ASM_COMPILER "${ASM_COMPILER}")
+set(CMAKE_OBJCOPY "${OBJCOPY}")
+
+set(TARGET_CPU "cortex-m4" CACHE STRING "Target CPU")
+
+# Convert TARGET_CPU=Cortex-M33+nofp+nodsp into
+# - CMAKE_SYSTEM_PROCESSOR=cortex-m33
+# - TARGET_CPU_FEATURES=no-fp;no-dsp
+string(REPLACE "+" ";" TARGET_CPU_FEATURES ${TARGET_CPU})
+list(POP_FRONT TARGET_CPU_FEATURES CMAKE_SYSTEM_PROCESSOR)
+string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} CMAKE_SYSTEM_PROCESSOR)
+
+set(CMAKE_EXECUTABLE_SUFFIX ".elf")
+set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
+set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+
+# Select C/C++ version
+set(CMAKE_C_STANDARD 99)
+set(CMAKE_CXX_STANDARD 14)
+
+# Compile options
+add_compile_options(
+ -mcpu=${TARGET_CPU}
+ -mthumb
+ "$<$<CONFIG:DEBUG>:-gdwarf-3>"
+ "$<$<COMPILE_LANGUAGE:CXX>:-funwind-tables;-frtti;-fexceptions>")
+
+# Compile definescd
+add_compile_definitions(
+ "$<$<NOT:$<CONFIG:DEBUG>>:NDEBUG>")
+
+# Link options
+add_link_options(
+ -mcpu=${TARGET_CPU}
+ -mthumb
+ --specs=nosys.specs)
+
+# Set floating point unit
+if("${TARGET_CPU}" MATCHES "\\+fp")
+ set(FLOAT hard)
+elseif("${TARGET_CPU}" MATCHES "\\+nofp")
+ set(FLOAT soft)
+elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "cortex-m33" OR
+ "${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "cortex-m55")
+ set(FLOAT hard)
+else()
+ set(FLOAT soft)
+endif()
+
+if (FLOAT)
+ add_compile_options(-mfloat-abi=${FLOAT})
+ add_link_options(-mfloat-abi=${FLOAT})
+endif()
+
+# Compilation warnings
+add_compile_options(
+ -Wno-all
+)
diff --git a/infra/nncc/cmake/buildtool/config/config_aarch64-linux.cmake b/infra/nncc/cmake/buildtool/config/config_aarch64-linux.cmake
new file mode 100644
index 000000000..fcae94f28
--- /dev/null
+++ b/infra/nncc/cmake/buildtool/config/config_aarch64-linux.cmake
@@ -0,0 +1,13 @@
+#
+# aarch64 linux compile options
+#
+
+message(STATUS "Building for aarch64 Linux")
+
+# include linux common
+include("${CMAKE_CURRENT_LIST_DIR}/config_linux.cmake")
+
+# addition for arm-linux
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ "-march=armv8-a"
+ )
diff --git a/infra/nncc/cmake/buildtool/config/config_aarch64-tizen.cmake b/infra/nncc/cmake/buildtool/config/config_aarch64-tizen.cmake
new file mode 100644
index 000000000..0f304ecf3
--- /dev/null
+++ b/infra/nncc/cmake/buildtool/config/config_aarch64-tizen.cmake
@@ -0,0 +1,17 @@
+#
+# aarch64 tizen compile options
+#
+
+message(STATUS "Building for AARCH64 Tizen")
+
+# Build flag for tizen
+set(CMAKE_C_FLAGS_DEBUG "-O -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O -g -DDEBUG")
+
+# TODO : add and use option_tizen if something uncommon comes up
+# include linux common
+include("cmake/buildtool/config/config_linux.cmake")
+
+# addition for aarch64-tizen
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ )
diff --git a/infra/nncc/cmake/buildtool/config/config_armv7hl-tizen.cmake b/infra/nncc/cmake/buildtool/config/config_armv7hl-tizen.cmake
new file mode 100644
index 000000000..fc6876a23
--- /dev/null
+++ b/infra/nncc/cmake/buildtool/config/config_armv7hl-tizen.cmake
@@ -0,0 +1,29 @@
+#
+# armv7l tizen compile options
+#
+
+message(STATUS "Building for ARMv7hl(hardfp) Tizen")
+
+# Build flag for tizen
+set(CMAKE_C_FLAGS_DEBUG "-O -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O -g -DDEBUG")
+
+# TODO : add and use option_tizen if something uncommon comes up
+# include linux common
+include("cmake/buildtool/config/config_linux.cmake")
+
+# addition for arm-linux
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ "-mtune=cortex-a8"
+ "-mfloat-abi=hard"
+ "-funsafe-math-optimizations"
+ )
+
+if(BUILD_ARM32_NEON)
+ set(FLAGS_COMMON ${FLAGS_COMMON}
+ "-mfpu=neon-vfpv4"
+ "-ftree-vectorize"
+ )
+else(BUILD_ARM32_NEON)
+ message(STATUS "ARMv7l: NEON is disabled")
+endif(BUILD_ARM32_NEON)
diff --git a/infra/nncc/cmake/buildtool/config/config_armv7l-linux.cmake b/infra/nncc/cmake/buildtool/config/config_armv7l-linux.cmake
new file mode 100644
index 000000000..87704db33
--- /dev/null
+++ b/infra/nncc/cmake/buildtool/config/config_armv7l-linux.cmake
@@ -0,0 +1,25 @@
+#
+# armv7l linux compile options
+#
+
+message(STATUS "Building for ARMv7l Linux")
+
+# include linux common
+include("${CMAKE_CURRENT_LIST_DIR}/config_linux.cmake")
+
+# addition for arm-linux
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ "-march=armv7-a"
+ "-mtune=cortex-a8"
+ "-mfloat-abi=hard"
+ "-mfp16-format=ieee"
+ )
+
+if(BUILD_ARM32_NEON)
+ set(FLAGS_COMMON ${FLAGS_COMMON}
+ "-mfpu=vfpv3-d16"
+ "-ftree-vectorize"
+ )
+else(BUILD_ARM32_NEON)
+ message(STATUS "ARMv7l: NEON is disabled")
+endif(BUILD_ARM32_NEON)
diff --git a/infra/nncc/cmake/buildtool/config/config_armv7l-tizen.cmake b/infra/nncc/cmake/buildtool/config/config_armv7l-tizen.cmake
new file mode 100644
index 000000000..b1ffe65c1
--- /dev/null
+++ b/infra/nncc/cmake/buildtool/config/config_armv7l-tizen.cmake
@@ -0,0 +1,29 @@
+#
+# armv7l tizen compile options
+#
+
+message(STATUS "Building for ARMv7l(softfp) Tizen")
+
+# Build flag for tizen
+set(CMAKE_C_FLAGS_DEBUG "-O -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O -g -DDEBUG")
+
+# TODO : add and use option_tizen if something uncommon comes up
+# include linux common
+include("cmake/buildtool/config/config_linux.cmake")
+
+# addition for arm-linux
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ "-mtune=cortex-a8"
+ "-mfloat-abi=softfp"
+ "-funsafe-math-optimizations"
+ )
+
+if(BUILD_ARM32_NEON)
+ set(FLAGS_COMMON ${FLAGS_COMMON}
+ "-mfpu=neon-vfpv4"
+ "-ftree-vectorize"
+ )
+else(BUILD_ARM32_NEON)
+ message(STATUS "ARMv7l: NEON is disabled")
+endif(BUILD_ARM32_NEON)
diff --git a/infra/nncc/cmake/buildtool/config/config_i686-tizen.cmake b/infra/nncc/cmake/buildtool/config/config_i686-tizen.cmake
new file mode 100644
index 000000000..3929e07fd
--- /dev/null
+++ b/infra/nncc/cmake/buildtool/config/config_i686-tizen.cmake
@@ -0,0 +1,17 @@
+#
+# i686 tizen compile options
+#
+
+message(STATUS "Building for i686 Tizen")
+
+# Build flag for tizen
+set(CMAKE_C_FLAGS_DEBUG "-O -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O -g -DDEBUG")
+
+# TODO : add and use option_tizen if something uncommon comes up
+# include linux common
+include("cmake/buildtool/config/config_linux.cmake")
+
+# addition for i686-tizen
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ )
diff --git a/infra/nncc/cmake/buildtool/config/config_linux.cmake b/infra/nncc/cmake/buildtool/config/config_linux.cmake
new file mode 100644
index 000000000..d7b17cfef
--- /dev/null
+++ b/infra/nncc/cmake/buildtool/config/config_linux.cmake
@@ -0,0 +1,11 @@
+#
+# linux common compile options
+#
+
+# Disable annoying ABI compatibility warning.
+if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
+ list(APPEND FLAGS_CXXONLY "-Wno-psabi")
+endif()
+
+# lib pthread as a variable (pthread must be disabled on android)
+set(LIB_PTHREAD pthread)
diff --git a/infra/nncc/cmake/buildtool/config/config_x86_64-tizen.cmake b/infra/nncc/cmake/buildtool/config/config_x86_64-tizen.cmake
new file mode 100644
index 000000000..0f304ecf3
--- /dev/null
+++ b/infra/nncc/cmake/buildtool/config/config_x86_64-tizen.cmake
@@ -0,0 +1,17 @@
+#
+# aarch64 tizen compile options
+#
+
+message(STATUS "Building for AARCH64 Tizen")
+
+# Build flag for tizen
+set(CMAKE_C_FLAGS_DEBUG "-O -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O -g -DDEBUG")
+
+# TODO : add and use option_tizen if something uncommon comes up
+# include linux common
+include("cmake/buildtool/config/config_linux.cmake")
+
+# addition for aarch64-tizen
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ )
diff --git a/infra/nncc/cmake/buildtool/cross/toolchain_armv7l-linux.cmake b/infra/nncc/cmake/buildtool/cross/toolchain_armv7l-linux.cmake
new file mode 100644
index 000000000..4956d91f9
--- /dev/null
+++ b/infra/nncc/cmake/buildtool/cross/toolchain_armv7l-linux.cmake
@@ -0,0 +1,38 @@
+#
+# config for arm-linux
+#
+include(CMakeForceCompiler)
+
+set(CMAKE_SYSTEM_NAME Linux)
+set(CMAKE_SYSTEM_PROCESSOR armv7l)
+
+set(CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
+set(CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)
+
+# where is the target environment
+set(NNAS_PROJECT_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../../..")
+set(ROOTFS_ARM "${NNAS_PROJECT_SOURCE_DIR}/tools/cross/rootfs/arm")
+include("${NNAS_PROJECT_SOURCE_DIR}/infra/cmake/modules/OptionTools.cmake")
+
+envoption(ROOTFS_DIR ${ROOTFS_ARM})
+if(NOT EXISTS "${ROOTFS_DIR}/lib/arm-linux-gnueabihf")
+ message(FATAL_ERROR "Please prepare RootFS for ARM")
+endif()
+
+set(CMAKE_SYSROOT ${ROOTFS_DIR})
+set(CMAKE_SHARED_LINKER_FLAGS
+ "${CMAKE_SHARED_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}"
+ CACHE INTERNAL "" FORCE)
+set(CMAKE_EXE_LINKER_FLAGS
+ "${CMAKE_EXE_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}"
+ CACHE INTERNAL "" FORCE)
+
+# search for programs in the build host directories
+set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+
+# for libraries and headers in the target directories
+set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+
+# Set cache variable to ignore try-run error by find_package(Threads REQUIRED) on cross build
+set(THREADS_PTHREAD_ARG "2" CACHE STRING "Result from TRY_RUN" FORCE)
diff --git a/infra/nncc/cmake/options/options_aarch64-darwin.cmake b/infra/nncc/cmake/options/options_aarch64-darwin.cmake
new file mode 100644
index 000000000..89398bdf4
--- /dev/null
+++ b/infra/nncc/cmake/options/options_aarch64-darwin.cmake
@@ -0,0 +1,4 @@
+#
+# aarch64 darwin cmake options
+#
+
diff --git a/infra/nncc/cmake/options/options_aarch64-linux.cmake b/infra/nncc/cmake/options/options_aarch64-linux.cmake
new file mode 100644
index 000000000..becd574b1
--- /dev/null
+++ b/infra/nncc/cmake/options/options_aarch64-linux.cmake
@@ -0,0 +1,4 @@
+#
+# aarch64 linux cmake options
+#
+
diff --git a/infra/nncc/cmake/options/options_aarch64-tizen.cmake b/infra/nncc/cmake/options/options_aarch64-tizen.cmake
new file mode 100644
index 000000000..be97cb314
--- /dev/null
+++ b/infra/nncc/cmake/options/options_aarch64-tizen.cmake
@@ -0,0 +1,4 @@
+#
+# aarch64 tizen cmake options
+#
+
diff --git a/infra/nncc/cmake/options/options_armv7em-generic.cmake b/infra/nncc/cmake/options/options_armv7em-generic.cmake
new file mode 100644
index 000000000..d671b73f1
--- /dev/null
+++ b/infra/nncc/cmake/options/options_armv7em-generic.cmake
@@ -0,0 +1,3 @@
+#
+# armv7em generic cmake options
+#
diff --git a/infra/nncc/cmake/options/options_armv7hl-tizen.cmake b/infra/nncc/cmake/options/options_armv7hl-tizen.cmake
new file mode 100644
index 000000000..e787ecef8
--- /dev/null
+++ b/infra/nncc/cmake/options/options_armv7hl-tizen.cmake
@@ -0,0 +1,5 @@
+#
+# armv7hl tizen cmake options
+#
+
+option(BUILD_ARM32_NEON "Use NEON for ARM32 build" ON)
diff --git a/infra/nncc/cmake/options/options_armv7l-linux.cmake b/infra/nncc/cmake/options/options_armv7l-linux.cmake
new file mode 100644
index 000000000..d1cc367ee
--- /dev/null
+++ b/infra/nncc/cmake/options/options_armv7l-linux.cmake
@@ -0,0 +1,5 @@
+#
+# armv7l linux cmake options
+#
+
+option(BUILD_ARM32_NEON "Use NEON for ARM32 cross build" ON)
diff --git a/infra/nncc/cmake/options/options_armv7l-tizen.cmake b/infra/nncc/cmake/options/options_armv7l-tizen.cmake
new file mode 100644
index 000000000..9a96f403a
--- /dev/null
+++ b/infra/nncc/cmake/options/options_armv7l-tizen.cmake
@@ -0,0 +1,5 @@
+#
+# armv7l tizen cmake options
+#
+
+option(BUILD_ARM32_NEON "Use NEON for ARM32 build" ON)
diff --git a/infra/nncc/cmake/options/options_i686-tizen.cmake b/infra/nncc/cmake/options/options_i686-tizen.cmake
new file mode 100644
index 000000000..028efca97
--- /dev/null
+++ b/infra/nncc/cmake/options/options_i686-tizen.cmake
@@ -0,0 +1,3 @@
+#
+# i686 tizen cmake options
+#
diff --git a/infra/nncc/cmake/options/options_riscv64-tizen.cmake b/infra/nncc/cmake/options/options_riscv64-tizen.cmake
new file mode 100644
index 000000000..d26d03473
--- /dev/null
+++ b/infra/nncc/cmake/options/options_riscv64-tizen.cmake
@@ -0,0 +1,3 @@
+#
+# riscv64 tizen cmake options
+#
diff --git a/infra/nncc/cmake/options/options_x86_64-darwin.cmake b/infra/nncc/cmake/options/options_x86_64-darwin.cmake
new file mode 100644
index 000000000..1a29135b4
--- /dev/null
+++ b/infra/nncc/cmake/options/options_x86_64-darwin.cmake
@@ -0,0 +1,4 @@
+#
+# x86_64 darwin cmake options
+#
+
diff --git a/infra/nncc/cmake/options/options_x86_64-linux.cmake b/infra/nncc/cmake/options/options_x86_64-linux.cmake
new file mode 100644
index 000000000..0fb72f18b
--- /dev/null
+++ b/infra/nncc/cmake/options/options_x86_64-linux.cmake
@@ -0,0 +1,3 @@
+#
+# x86_64 linux cmake options
+#
diff --git a/infra/nncc/cmake/options/options_x86_64-tizen.cmake b/infra/nncc/cmake/options/options_x86_64-tizen.cmake
new file mode 100644
index 000000000..a29a0afc2
--- /dev/null
+++ b/infra/nncc/cmake/options/options_x86_64-tizen.cmake
@@ -0,0 +1,3 @@
+#
+# x86_64 tizen cmake options
+#
diff --git a/infra/nncc/command/utcount b/infra/nncc/command/utcount
index d06c5c9de..65aea8bae 100644
--- a/infra/nncc/command/utcount
+++ b/infra/nncc/command/utcount
@@ -9,15 +9,17 @@ if [[ ! -d "${BUILD_WORKSPACE_PATH}" ]]; then
exit 255
fi
-BUILD_ITEMS="angkor cwrap pepper-str pepper-strcast pp stdex \
+BUILD_ITEMS="angkor cwrap pepper-str pepper-strcast pp \
oops pepper-assert \
hermes hermes-std \
loco locop locomotiv logo-core logo \
-foder souschef arser vconone \
-safemain mio-circle mio-tflite \
+foder souschef arser vconone crew \
+safemain mio-circle mio-tflite mio-tflite260 \
tflite2circle \
luci \
luci-interpreter \
+luci-eval-driver \
+luci-pass-value-test \
luci-value-test \
record-minmax \
circle2circle circle-quantizer"
diff --git a/infra/nncc/config/docker.configuration b/infra/nncc/config/docker.configuration
index 7078585a2..2765c3642 100644
--- a/infra/nncc/config/docker.configuration
+++ b/infra/nncc/config/docker.configuration
@@ -1,4 +1,4 @@
-DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnas}
+DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnfw/one-devtools}
echo "Using docker image ${DOCKER_IMAGE_NAME}"
if [ -z "`docker images ${DOCKER_IMAGE_NAME}`" ]; then
@@ -11,7 +11,7 @@ DOCKER_PATH="$NNCC_PROJECT_PATH"
export GIT_SSL_NO_VERIFY=1
-DOCKER_VOLUMES=" -v $HOST_PATH:$DOCKER_PATH"
+DOCKER_VOLUMES+=" -v $HOST_PATH:$DOCKER_PATH"
DOCKER_ENV_VARS+=" -e http_proxy"
DOCKER_ENV_VARS+=" -e no_proxy"
diff --git a/infra/nnfw/CMakeLists.txt b/infra/nnfw/CMakeLists.txt
index 5caf37fbf..857c15bf5 100644
--- a/infra/nnfw/CMakeLists.txt
+++ b/infra/nnfw/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.5.1)
+cmake_minimum_required(VERSION 3.16.3)
project(nnfw)
@@ -6,9 +6,12 @@ enable_testing()
set(CMAKE_SKIP_BUILD_RPATH FALSE)
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
-set(CMAKE_INSTALL_RPATH "$ORIGIN/../lib:$ORIGIN/")
+set(CMAKE_INSTALL_RPATH "$ORIGIN/../lib:$ORIGIN/../lib/nnfw/odc:$ORIGIN/")
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
+### CMAKE_BUILD_TYPE_LC: Build type lower case
+string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LC)
+
set(NNAS_PROJECT_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../.." CACHE
INTERNAL "Where to find nnas top-level source directory"
)
@@ -52,14 +55,15 @@ macro(nnas_find_package PREFIX)
)
endmacro(nnas_find_package)
+# C++14 feature requires 5 or later
+# Using std::unordered_map shows build fail under 6.2
+if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6.2)
+ message(FATAL "Runtime build requires GNU Compiler version 6.2 or later.")
+endif()
+
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_EXTENSIONS OFF)
-# This feature works with CMake 3.5.2 or later. However, using previous versions does not produce
-# an error. We are still officially using CMake 3.5.1, but put this code for the sake of semantic
-# support in various development tools.
-# Todo: Someday, CMake needs to be updated to 3.7.2 or later to take advantage of improvements
-# such as `cmake-server`.
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
# identify platform: HOST_PLATFORM, TARGET_PLATFORM and related
diff --git a/infra/nnfw/cmake/ApplyCompileFlags.cmake b/infra/nnfw/cmake/ApplyCompileFlags.cmake
index b042b0c42..b1c7ff568 100644
--- a/infra/nnfw/cmake/ApplyCompileFlags.cmake
+++ b/infra/nnfw/cmake/ApplyCompileFlags.cmake
@@ -31,3 +31,13 @@ endforeach()
foreach(FLAG ${FLAGS_CXXONLY})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}")
endforeach()
+
+# lib pthread as a variable (finding pthread build option must be disabled on android)
+# Define here to use on external lib build
+set(LIB_PTHREAD lib_pthread)
+add_library(${LIB_PTHREAD} INTERFACE)
+if(NOT TARGET_OS STREQUAL "android")
+ # Get compile option (ex. "-pthread" on linux GNU build tool)
+ find_package(Threads)
+ target_link_libraries(${LIB_PTHREAD} INTERFACE Threads::Threads)
+endif()
diff --git a/infra/nnfw/cmake/CfgOptionFlags.cmake b/infra/nnfw/cmake/CfgOptionFlags.cmake
index b3d058164..8d37cf27c 100644
--- a/infra/nnfw/cmake/CfgOptionFlags.cmake
+++ b/infra/nnfw/cmake/CfgOptionFlags.cmake
@@ -15,29 +15,28 @@ option(ENABLE_COVERAGE "Build for coverage test" OFF)
option(BUILD_EXT_MULTITHREAD "Build external build using multi thread" ON)
option(BUILD_ONERT "Build onert" ON)
option(BUILD_LOGGING "Build logging runtime" ON)
-CMAKE_DEPENDENT_OPTION(BUILD_RUNTIME_NNAPI_TEST "Build Runtime NN API Generated Test"
- # Set BUILD_RUNTIME_NNAPI_TEST as ON
- # if CMAKE_COMPILER_IS_GNUCC AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6.2
- ON "CMAKE_COMPILER_IS_GNUCC;NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6.2"
- # Otherwise set BUILD_RUNTIME_NNAPI_TEST as OFF
- OFF)
+option(BUILD_RUNTIME_NNAPI_TEST "Build Runtime NN API Generated Test" ON)
option(BUILD_RUNTIME_NNFW_API_TEST "Build Runtime NNFW API Tests" ON)
option(BUILD_TFLITE_RUN "Build tflite-run" ON)
option(BUILD_TFLITE_VANILLA_RUN "Build tflite-vanilla-run" OFF)
-option(BUILD_TFLITE_BENCHMARK_MODEL "Build tflite benchmark model" OFF)
-option(BUILD_NNAPI_TEST "Build nnapi_test" ON)
-option(BUILD_NNPACKAGE_RUN "Build nnpackge_run" ON)
+option(BUILD_ONERT_RUN "Build onert_run" ON)
+option(BUILD_ONERT_TRAIN "Build onert_train" ON)
option(BUILD_TFLITE_LOADER "Build TensorFlow Lite loader" ON)
option(BUILD_CIRCLE_LOADER "Build circle loader" ON)
-option(BUILD_TFLITE_LOADER_TEST_TOOL "Build tflite loader testing tool" ON)
+option(BUILD_TRIX_LOADER "Build trix loader" ON)
+option(BUILD_TFLITE_COMPARATOR_TEST_TOOL "Build tflite loader testing tool" ON)
option(BUILD_WITH_HDF5 "Build test tool with HDF5 library" ON)
option(GENERATE_RUNTIME_NNAPI_TESTS "Generate NNAPI operation gtest" ON)
option(ENVVAR_ONERT_CONFIG "Use environment variable for onert configuration" ON)
option(INSTALL_TEST_SCRIPTS "Install test scripts" ON)
+option(BUILD_GPU_CL "Build gpu_cl backend" OFF)
+option(BUILD_NPUD "Build NPU daemon" OFF)
+option(ENVVAR_NPUD_CONFIG "Use environment variable for npud configuration" OFF)
+option(BUILD_MINMAX_H5DUMPER "Build minmax h5dumper" ON)
+option(ENABLE_ONERT_TRAIN "Enable onert training feature" ON)
#
# Default build configuration for contrib
#
-option(BUILD_ANDROID_TFLITE "Enable android support for TensorFlow Lite" OFF)
option(BUILD_ANDROID_BENCHMARK_APP "Enable Android Benchmark App" OFF)
option(BUILD_BENCHMARK_ACL "Build ARM Compute Library Benchmarks" OFF)
option(BUILD_DETECTION_APP "Build detection example app" OFF)
@@ -53,13 +52,13 @@ option(BUILD_MLAPSE "Build mlapse benchmark toolkit" OFF)
#
option(BUILD_KBENCHMARK "Build kernel benchmark tool" OFF)
option(BUILD_OPENCL_TOOL "Build OpenCL tool" OFF)
-option(BUILD_NNAPI_QUICKCHECK "Build NN API Quickcheck tools" OFF)
option(BUILD_TFLITE_ACCURACY "Build tflite accuracy tool" OFF)
#
# Default external libraries source download and build configuration
#
option(DOWNLOAD_TENSORFLOW "Download Tensorflow source" ON)
option(DOWNLOAD_ABSEIL "Download Abseil source" ON)
+option(DOWNLOAD_OPENCL_HEADERS "Download Opencl_headers source" OFF)
option(DOWNLOAD_EIGEN "Download Eigen source" ON)
option(DOWNLOAD_FARMHASH "Download farmhash source" ON)
option(DOWNLOAD_GEMMLOWP "Download GEMM low precesion library source" ON)
@@ -69,13 +68,28 @@ option(DOWNLOAD_ARMCOMPUTE "Download ARM Compute source" ON)
option(DOWNLOAD_NONIUS "Download nonius source" ON)
option(DOWNLOAD_BOOST "Download boost source" OFF)
option(DOWNLOAD_RUY "Download ruy source" ON)
+option(DOWNLOAD_CPUINFO "Download cpuinfo source" ON)
+option(DOWNLOAD_OOURAFFT "Download Ooura FFT source" ON)
+option(DOWNLOAD_GTEST "Download Google Test source and build Google Test" ON)
option(BUILD_BOOST "Build boost source" OFF)
option(BUILD_TENSORFLOW_LITE "Build TensorFlow Lite from the downloaded source" ON)
-option(BUILD_TENSORFLOW_LITE_2_3_0 "Build TensorFlow Lite 2.3.0 from the downloaded source" OFF)
-option(BUILD_GTEST "Download and build Google Test" ON)
+option(BUILD_TENSORFLOW_LITE_GPU "Build TensorFlow Lite GPU delegate from the downloaded source" OFF)
option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" ON)
+option(DEBUG_ARMCOMPUTE "Build ARM Compute as debug type" OFF)
option(BUILD_RUY "Build ruy library from the downloaded source" ON)
+option(BUILD_CPUINFO "Build cpuinfo library from the downloaded source" ON)
option(PROFILE_RUY "Enable ruy library profiling" OFF)
+option(DOWNLOAD_XNNPACK "Download xnnpack source" ON)
+option(BUILD_XNNPACK "Build xnnpack library from the downloaded source" ON)
+option(DOWNLOAD_PTHREADPOOL "Download pthreadpool source" ON)
+option(BUILD_PTHREADPOOL "Build pthreadpool library from the source" ON)
+option(DOWNLOAD_PSIMD "Download psimd source" ON)
+option(BUILD_PSIMD "Build psimd library from the source" ON)
+option(DOWNLOAD_FP16 "Download fp16 source" ON)
+option(BUILD_FP16 "Build fp16 library from the source" ON)
+option(DOWNLOAD_FXDIV "Download fxdiv source" ON)
+option(BUILD_FXDIV "Build fxdiv library from the source" ON)
+
#
## Default sample build configuration
diff --git a/infra/nnfw/cmake/buildtool/config/config_aarch64-android.cmake b/infra/nnfw/cmake/buildtool/config/config_aarch64-android.cmake
index e0c81dee7..fb63b3c47 100644
--- a/infra/nnfw/cmake/buildtool/config/config_aarch64-android.cmake
+++ b/infra/nnfw/cmake/buildtool/config/config_aarch64-android.cmake
@@ -1,8 +1,5 @@
include("cmake/buildtool/config/config_linux.cmake")
-# On Android, pthread is contained in bionic(libc)
-set(LIB_PTHREAD "")
-
# SIMD for aarch64
set(FLAGS_COMMON ${FLAGS_COMMON}
"-ftree-vectorize"
diff --git a/infra/nnfw/cmake/buildtool/config/config_aarch64-tizen.cmake b/infra/nnfw/cmake/buildtool/config/config_aarch64-tizen.cmake
index 0f304ecf3..33e376517 100644
--- a/infra/nnfw/cmake/buildtool/config/config_aarch64-tizen.cmake
+++ b/infra/nnfw/cmake/buildtool/config/config_aarch64-tizen.cmake
@@ -15,3 +15,5 @@ include("cmake/buildtool/config/config_linux.cmake")
# addition for aarch64-tizen
set(FLAGS_COMMON ${FLAGS_COMMON}
)
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-incompatible-pointer-types")
diff --git a/infra/nnfw/cmake/buildtool/config/config_armv7hl-tizen.cmake b/infra/nnfw/cmake/buildtool/config/config_armv7hl-tizen.cmake
new file mode 100644
index 000000000..dec1b4afb
--- /dev/null
+++ b/infra/nnfw/cmake/buildtool/config/config_armv7hl-tizen.cmake
@@ -0,0 +1,22 @@
+#
+# armv7l tizen compile options
+#
+
+message(STATUS "Building for ARMv7hl(hardfp) Tizen")
+
+# Build flag for tizen
+set(CMAKE_C_FLAGS_DEBUG "-O -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O -g -DDEBUG")
+
+# TODO : add and use option_tizen if something uncommon comes up
+# include linux common
+include("cmake/buildtool/config/config_linux.cmake")
+
+# addition for arm-linux
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ "-mtune=cortex-a8"
+ "-mfloat-abi=hard"
+ "-mfpu=neon-vfpv4"
+ "-funsafe-math-optimizations"
+ "-ftree-vectorize"
+ )
diff --git a/infra/nnfw/cmake/buildtool/config/config_i686-tizen.cmake b/infra/nnfw/cmake/buildtool/config/config_i686-tizen.cmake
new file mode 100644
index 000000000..3929e07fd
--- /dev/null
+++ b/infra/nnfw/cmake/buildtool/config/config_i686-tizen.cmake
@@ -0,0 +1,17 @@
+#
+# i686 tizen compile options
+#
+
+message(STATUS "Building for i686 Tizen")
+
+# Build flag for tizen
+set(CMAKE_C_FLAGS_DEBUG "-O -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O -g -DDEBUG")
+
+# TODO : add and use option_tizen if something uncommon comes up
+# include linux common
+include("cmake/buildtool/config/config_linux.cmake")
+
+# addition for i686-tizen
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ )
diff --git a/infra/nnfw/cmake/buildtool/config/config_linux.cmake b/infra/nnfw/cmake/buildtool/config/config_linux.cmake
index 86dd0f217..681d165d2 100644
--- a/infra/nnfw/cmake/buildtool/config/config_linux.cmake
+++ b/infra/nnfw/cmake/buildtool/config/config_linux.cmake
@@ -2,20 +2,16 @@
# linux common compile options
#
-# remove warning from arm cl
+# Remove warning: ignoring attributes on template argument (ACL, Eigen, etc)
# https://github.com/ARM-software/ComputeLibrary/issues/330
-set(GCC_VERSION_DISABLE_WARNING 6.0)
-if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER GCC_VERSION_DISABLE_WARNING)
- message(STATUS "GCC version higher than ${GCC_VERSION_DISABLE_WARNING}")
- set(FLAGS_CXXONLY ${FLAGS_CXXONLY}
- "-Wno-ignored-attributes"
- )
-endif()
+set(FLAGS_CXXONLY ${FLAGS_CXXONLY} "-Wno-ignored-attributes")
# Disable annoying ABI compatibility warning.
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
list(APPEND FLAGS_CXXONLY "-Wno-psabi")
endif()
-# lib pthread as a variable (pthread must be disabled on android)
-set(LIB_PTHREAD pthread)
+# Build fail on memcpy (ex. compute/cker/include/cker/Shape.h:211:16)
+if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0)
+ list(APPEND FLAGS_CXXONLY "-Wno-error=stringop-overflow -Wno-error=array-bounds")
+endif()
diff --git a/infra/nnfw/cmake/buildtool/config/config_riscv64-tizen.cmake b/infra/nnfw/cmake/buildtool/config/config_riscv64-tizen.cmake
new file mode 100644
index 000000000..2345da47c
--- /dev/null
+++ b/infra/nnfw/cmake/buildtool/config/config_riscv64-tizen.cmake
@@ -0,0 +1,17 @@
+#
+# riscv64 tizen compile options
+#
+
+message(STATUS "Building for RISC-V64 Tizen")
+
+# Build flag for tizen
+set(CMAKE_C_FLAGS_DEBUG "-O -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O -g -DDEBUG")
+
+# TODO : add and use option_tizen if something uncommon comes up
+# include linux common
+include("cmake/buildtool/config/config_linux.cmake")
+
+# addition for riscv64-tizen
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ )
diff --git a/infra/nnfw/cmake/buildtool/config/config_x86_64-darwin.cmake b/infra/nnfw/cmake/buildtool/config/config_x86_64-darwin.cmake
index dbd45fc03..52d6c6b2b 100644
--- a/infra/nnfw/cmake/buildtool/config/config_x86_64-darwin.cmake
+++ b/infra/nnfw/cmake/buildtool/config/config_x86_64-darwin.cmake
@@ -7,6 +7,3 @@ message(STATUS "Building for x86-64 Darwin")
set(FLAGS_COMMON ${FLAGS_COMMON}
"-msse4"
)
-
-# lib pthread as a variable (pthread must be disabled on android)
-set(LIB_PTHREAD pthread)
diff --git a/infra/nnfw/cmake/buildtool/config/config_x86_64-tizen.cmake b/infra/nnfw/cmake/buildtool/config/config_x86_64-tizen.cmake
new file mode 100644
index 000000000..0f304ecf3
--- /dev/null
+++ b/infra/nnfw/cmake/buildtool/config/config_x86_64-tizen.cmake
@@ -0,0 +1,17 @@
+#
+# aarch64 tizen compile options
+#
+
+message(STATUS "Building for AARCH64 Tizen")
+
+# Build flag for tizen
+set(CMAKE_C_FLAGS_DEBUG "-O -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O -g -DDEBUG")
+
+# TODO : add and use option_tizen if something uncommon comes up
+# include linux common
+include("cmake/buildtool/config/config_linux.cmake")
+
+# addition for aarch64-tizen
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ )
diff --git a/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-linux.cmake b/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-linux.cmake
index 3356aa72d..07b26a937 100644
--- a/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-linux.cmake
+++ b/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-linux.cmake
@@ -21,12 +21,6 @@ endif()
set(CMAKE_SYSROOT ${ROOTFS_DIR})
set(CMAKE_FIND_ROOT_PATH ${ROOTFS_DIR})
-set(CMAKE_SHARED_LINKER_FLAGS
- "${CMAKE_SHARED_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}"
- CACHE INTERNAL "" FORCE)
-set(CMAKE_EXE_LINKER_FLAGS
- "${CMAKE_EXE_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}"
- CACHE INTERNAL "" FORCE)
# search for programs in the build host directories
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
diff --git a/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-tizen.cmake b/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-tizen.cmake
index 4d5d7ac56..cab7325dd 100644
--- a/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-tizen.cmake
+++ b/infra/nnfw/cmake/buildtool/cross/toolchain_aarch64-tizen.cmake
@@ -23,12 +23,6 @@ endif()
set(CMAKE_SYSROOT ${ROOTFS_DIR})
set(CMAKE_FIND_ROOT_PATH ${ROOTFS_DIR})
-set(CMAKE_SHARED_LINKER_FLAGS
- "${CMAKE_SHARED_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}"
- CACHE INTERNAL "" FORCE)
-set(CMAKE_EXE_LINKER_FLAGS
- "${CMAKE_EXE_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}"
- CACHE INTERNAL "" FORCE)
# search for programs in the build host directories
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
diff --git a/infra/nnfw/cmake/buildtool/cross/toolchain_armv7l-linux.cmake b/infra/nnfw/cmake/buildtool/cross/toolchain_armv7l-linux.cmake
index 8f2cb6735..c69259f85 100644
--- a/infra/nnfw/cmake/buildtool/cross/toolchain_armv7l-linux.cmake
+++ b/infra/nnfw/cmake/buildtool/cross/toolchain_armv7l-linux.cmake
@@ -21,12 +21,6 @@ endif()
set(CMAKE_SYSROOT ${ROOTFS_DIR})
set(CMAKE_FIND_ROOT_PATH ${ROOTFS_DIR})
-set(CMAKE_SHARED_LINKER_FLAGS
- "${CMAKE_SHARED_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}"
- CACHE INTERNAL "" FORCE)
-set(CMAKE_EXE_LINKER_FLAGS
- "${CMAKE_EXE_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}"
- CACHE INTERNAL "" FORCE)
# search for programs in the build host directories
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
diff --git a/infra/nnfw/cmake/buildtool/cross/toolchain_armv7l-tizen.cmake b/infra/nnfw/cmake/buildtool/cross/toolchain_armv7l-tizen.cmake
deleted file mode 100644
index 72513cdc1..000000000
--- a/infra/nnfw/cmake/buildtool/cross/toolchain_armv7l-tizen.cmake
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# config for arm-linux
-#
-include(CMakeForceCompiler)
-
-set(CMAKE_SYSTEM_NAME Linux)
-set(CMAKE_SYSTEM_PROCESSOR armv7l)
-
-set(CMAKE_C_COMPILER arm-linux-gnueabi-gcc)
-set(CMAKE_CXX_COMPILER arm-linux-gnueabi-g++)
-
-set(TIZEN_TOOLCHAIN "armv7l-tizen-linux-gnueabi/6.2.1")
-
-# where is the target environment
-set(NNAS_PROJECT_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../../..")
-set(ROOTFS_ARM "${NNAS_PROJECT_SOURCE_DIR}/tools/cross/rootfs/armel")
-include("${NNAS_PROJECT_SOURCE_DIR}/infra/cmake/modules/OptionTools.cmake")
-
-envoption(ROOTFS_DIR ${ROOTFS_ARM})
-if(NOT EXISTS "${ROOTFS_DIR}/usr/lib/gcc/${TIZEN_TOOLCHAIN}")
- message(FATAL_ERROR "Please prepare RootFS for tizen ARM softfp")
-endif()
-
-set(CMAKE_SYSROOT ${ROOTFS_DIR})
-set(CMAKE_FIND_ROOT_PATH ${ROOTFS_DIR})
-set(CMAKE_SHARED_LINKER_FLAGS
- "${CMAKE_SHARED_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}"
- CACHE INTERNAL "" FORCE)
-set(CMAKE_EXE_LINKER_FLAGS
- "${CMAKE_EXE_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}"
- CACHE INTERNAL "" FORCE)
-
-# search for programs in the build host directories
-set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
-
-# for libraries and headers in the target directories
-set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
-set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
-
-# Set cache variable to ignore try-run error by find_package(Threads REQUIRED) on cross build
-set(THREADS_PTHREAD_ARG "2" CACHE STRING "Result from TRY_RUN" FORCE)
-
-
-add_compile_options(-mthumb)
-add_compile_options(-mfpu=neon-vfpv4)
-add_compile_options(-mfloat-abi=softfp)
-add_compile_options(--sysroot=${ROOTFS_DIR})
-
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}")
-
-set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --sysroot=${ROOTFS_DIR}")
-
-include_directories(SYSTEM ${ROOTFS_DIR}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/)
-include_directories(SYSTEM ${ROOTFS_DIR}/usr/lib/gcc/${TIZEN_TOOLCHAIN}/include/c++/armv7l-tizen-linux-gnueabi)
-add_compile_options(-Wno-deprecated-declarations) # compile-time option
-add_compile_options(-D__extern_always_inline=inline) # compile-time option
-
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -B${ROOTFS_DIR}/usr/lib/gcc/${TIZEN_TOOLCHAIN}")
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -L${ROOTFS_DIR}/lib")
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -L${ROOTFS_DIR}/usr/lib")
-set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -L${ROOTFS_DIR}/usr/lib/gcc/${TIZEN_TOOLCHAIN}")
-
-set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -B${ROOTFS_DIR}/usr/lib/gcc/${TIZEN_TOOLCHAIN}")
-set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -L${ROOTFS_DIR}/lib")
-set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -L${ROOTFS_DIR}/usr/lib")
-set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -L${ROOTFS_DIR}/usr/lib/gcc/${TIZEN_TOOLCHAIN}")
diff --git a/infra/nnfw/cmake/options/options_aarch64-android.cmake b/infra/nnfw/cmake/options/options_aarch64-android.cmake
index d720b202a..5de2be333 100644
--- a/infra/nnfw/cmake/options/options_aarch64-android.cmake
+++ b/infra/nnfw/cmake/options/options_aarch64-android.cmake
@@ -1,18 +1,18 @@
# aarch64 android cmake options
#
-option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
-# NOTE BUILD_ANDROID_TFLITE(JNI lib) is disabled due to BuiltinOpResolver issue.
-# tensorflow-lite does not build BuiltinOpResolver but JNI lib need it
-# Related Issue : #1403
-option(BUILD_ANDROID_TFLITE "Enable android support for TensorFlow Lite" ON)
option(BUILD_ANDROID_BENCHMARK_APP "Enable Android Benchmark App" ON)
option(DOWNLOAD_NEON2SSE "Download NEON2SSE library source" OFF)
# Need boost library
option(DOWNLOAD_BOOST "Download boost source" ON)
option(BUILD_BOOST "Build boost source" ON)
-option(BUILD_RUNTIME_NNAPI_TEST "Build Runtime NN API Generated Test" OFF)
-option(BUILD_NNAPI_TEST "Build nnapi_test" OFF)
-option(BUILD_NNPACKAGE_RUN "Build nnpackge_run" ON)
-option(BUILD_TFLITE_RUN "Build tflite-run" OFF)
-option(BUILD_TFLITE_LOADER_TEST_TOOL "Build tflite loader testing tool" OFF)
option(BUILD_LOGGING "Build logging runtime" OFF)
+
+option(DOWNLOAD_OPENGL_HEADERS "Download Opengl_headers source" ON)
+option(DOWNLOAD_EGL_HEADERS "Download Egl_headers source" ON)
+option(DOWNLOAD_VULKAN "Download vulkan source" ON)
+option(DOWNLOAD_OPENCL_HEADERS "Download Opencl_headers source" ON)
+option(DOWNLOAD_PYBIND11 "Download Pybind11 source" ON)
+option(BUILD_GPU_CL "Build gpu_cl backend" ON)
+option(BUILD_TENSORFLOW_LITE_GPU "Build TensorFlow Lite GPU delegate from the downloaded source" ON)
+
+option(BUILD_MINMAX_H5DUMPER "Build minmax h5dumper" OFF)
diff --git a/infra/nnfw/cmake/options/options_aarch64-tizen.cmake b/infra/nnfw/cmake/options/options_aarch64-tizen.cmake
index 57d4c1061..cccd77f98 100644
--- a/infra/nnfw/cmake/options/options_aarch64-tizen.cmake
+++ b/infra/nnfw/cmake/options/options_aarch64-tizen.cmake
@@ -3,8 +3,17 @@
#
option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
option(BUILD_TENSORFLOW_LITE "Build TensorFlow Lite from the downloaded source" OFF)
+option(DOWNLOAD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
option(DOWNLOAD_NEON2SSE "Download NEON2SSE library source" OFF)
+option(DOWNLOAD_GTEST "Download Google Test source and build Google Test" OFF)
option(BUILD_LOGGING "Build logging runtime" OFF)
option(GENERATE_RUNTIME_NNAPI_TESTS "Generate NNAPI operation gtest" OFF)
option(ENVVAR_ONERT_CONFIG "Use environment variable for onert configuration" OFF)
+
+option(BUILD_NPUD "Build NPU daemon" ON)
+# Do not allow to use CONFIG option on Tizen
+option(ENVVAR_NPUD_CONFIG "Use environment variable for npud configuration" OFF)
+
+option(BUILD_MINMAX_H5DUMPER "Build minmax h5dumper" OFF)
+option(ENABLE_ONERT_TRAIN "Enable onert training feature" OFF)
diff --git a/infra/nnfw/cmake/options/options_armv7hl-tizen.cmake b/infra/nnfw/cmake/options/options_armv7hl-tizen.cmake
new file mode 100644
index 000000000..07dc0404e
--- /dev/null
+++ b/infra/nnfw/cmake/options/options_armv7hl-tizen.cmake
@@ -0,0 +1,27 @@
+#
+# armv7hl tizen cmake options
+#
+option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
+option(BUILD_TENSORFLOW_LITE "Build TensorFlow Lite from the downloaded source" OFF)
+option(DOWNLOAD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
+option(DOWNLOAD_NEON2SSE "Download NEON2SSE library source" OFF)
+option(DOWNLOAD_GTEST "Download Google Test source and build Google Test" OFF)
+
+option(BUILD_LOGGING "Build logging runtime" OFF)
+option(GENERATE_RUNTIME_NNAPI_TESTS "Generate NNAPI operation gtest" OFF)
+option(ENVVAR_ONERT_CONFIG "Use environment variable for onert configuration" OFF)
+
+option(DOWNLOAD_OPENCL_HEADERS "Download Opencl_headers source" ON)
+option(DOWNLOAD_OPENGL_HEADERS "Download Opengl_headers source" ON)
+option(DOWNLOAD_EGL_HEADERS "Download Egl_headers source" ON)
+option(DOWNLOAD_VULKAN "Download vulkan source" ON)
+
+option(BUILD_GPU_CL "Build gpu_cl backend" ON)
+option(BUILD_TENSORFLOW_LITE_GPU "Build TensorFlow Lite GPU delegate from the downloaded source" ON)
+
+option(BUILD_NPUD "Build NPU daemon" ON)
+# Do not allow to use CONFIG option on Tizen
+option(ENVVAR_NPUD_CONFIG "Use environment variable for npud configuration" OFF)
+
+option(BUILD_MINMAX_H5DUMPER "Build minmax h5dumper" OFF)
+option(ENABLE_ONERT_TRAIN "Enable onert training feature" OFF)
diff --git a/infra/nnfw/cmake/options/options_armv7l-linux.cmake b/infra/nnfw/cmake/options/options_armv7l-linux.cmake
index e10e573c4..c73a2befa 100644
--- a/infra/nnfw/cmake/options/options_armv7l-linux.cmake
+++ b/infra/nnfw/cmake/options/options_armv7l-linux.cmake
@@ -3,3 +3,10 @@
#
option(DOWNLOAD_NEON2SSE "Download NEON2SSE library source" OFF)
option(BUILD_OPENCL_TOOL "Build OpenCL tool" ON)
+
+option(DOWNLOAD_OPENGL_HEADERS "Download Opengl_headers source" ON)
+option(DOWNLOAD_EGL_HEADERS "Download Egl_headers source" ON)
+option(DOWNLOAD_VULKAN "Download vulkan source" ON)
+option(DOWNLOAD_OPENCL_HEADERS "Download Opencl_headers source" ON)
+option(BUILD_GPU_CL "Build gpu_cl backend" ON)
+option(BUILD_TENSORFLOW_LITE_GPU "Build TensorFlow Lite GPU delegate from the downloaded source" ON)
diff --git a/infra/nnfw/cmake/options/options_armv7l-tizen.cmake b/infra/nnfw/cmake/options/options_armv7l-tizen.cmake
index c27a7ad01..4fdcbc33f 100644
--- a/infra/nnfw/cmake/options/options_armv7l-tizen.cmake
+++ b/infra/nnfw/cmake/options/options_armv7l-tizen.cmake
@@ -3,8 +3,25 @@
#
option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
option(BUILD_TENSORFLOW_LITE "Build TensorFlow Lite from the downloaded source" OFF)
+option(DOWNLOAD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
option(DOWNLOAD_NEON2SSE "Download NEON2SSE library source" OFF)
+option(DOWNLOAD_GTEST "Download Google Test source and build Google Test" OFF)
option(BUILD_LOGGING "Build logging runtime" OFF)
option(GENERATE_RUNTIME_NNAPI_TESTS "Generate NNAPI operation gtest" OFF)
option(ENVVAR_ONERT_CONFIG "Use environment variable for onert configuration" OFF)
+
+option(DOWNLOAD_OPENCL_HEADERS "Download Opencl_headers source" ON)
+option(DOWNLOAD_OPENGL_HEADERS "Download Opengl_headers source" ON)
+option(DOWNLOAD_EGL_HEADERS "Download Egl_headers source" ON)
+option(DOWNLOAD_VULKAN "Download vulkan source" ON)
+
+option(BUILD_GPU_CL "Build gpu_cl backend" ON)
+option(BUILD_TENSORFLOW_LITE_GPU "Build TensorFlow Lite GPU delegate from the downloaded source" ON)
+
+option(BUILD_NPUD "Build NPU daemon" ON)
+# Do not allow to use CONFIG option on Tizen
+option(ENVVAR_NPUD_CONFIG "Use environment variable for npud configuration" OFF)
+
+option(BUILD_MINMAX_H5DUMPER "Build minmax h5dumper" OFF)
+option(ENABLE_ONERT_TRAIN "Enable onert training feature" OFF)
diff --git a/infra/nnfw/cmake/options/options_i686-tizen.cmake b/infra/nnfw/cmake/options/options_i686-tizen.cmake
new file mode 100644
index 000000000..bdeb2d9ce
--- /dev/null
+++ b/infra/nnfw/cmake/options/options_i686-tizen.cmake
@@ -0,0 +1,21 @@
+#
+# i686 tizen cmake options
+#
+option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
+option(BUILD_TENSORFLOW_LITE "Build TensorFlow Lite from the downloaded source" OFF)
+option(DOWNLOAD_ARMCOMPUTE "Download ARM Compute source" OFF)
+option(DOWNLOAD_GTEST "Download Google Test source and build Google Test" OFF)
+
+option(BUILD_LOGGING "Build logging runtime" OFF)
+option(GENERATE_RUNTIME_NNAPI_TESTS "Generate NNAPI operation gtest" OFF)
+option(ENVVAR_ONERT_CONFIG "Use environment variable for onert configuration" OFF)
+
+option(BUILD_XNNPACK "Build XNNPACK" OFF)
+option(DOWNLOAD_OPENCL_HEADERS "Download opencl headers" OFF)
+
+option(BUILD_NPUD "Build NPU daemon" ON)
+# Do not allow to use CONFIG option on Tizen
+option(ENVVAR_NPUD_CONFIG "Use environment variable for npud configuration" OFF)
+
+option(BUILD_MINMAX_H5DUMPER "Build minmax h5dumper" OFF)
+option(ENABLE_ONERT_TRAIN "Enable onert training feature" OFF)
diff --git a/infra/nnfw/cmake/options/options_riscv64-tizen.cmake b/infra/nnfw/cmake/options/options_riscv64-tizen.cmake
new file mode 100644
index 000000000..c2f8c79f1
--- /dev/null
+++ b/infra/nnfw/cmake/options/options_riscv64-tizen.cmake
@@ -0,0 +1,20 @@
+#
+# riscv64 tizen cmake options
+#
+option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
+option(BUILD_TENSORFLOW_LITE "Build TensorFlow Lite from the downloaded source" OFF)
+option(DOWNLOAD_ARMCOMPUTE "Download ARM Compute source" OFF)
+option(DOWNLOAD_GTEST "Download Google Test source and build Google Test" OFF)
+
+option(BUILD_LOGGING "Build logging runtime" OFF)
+option(GENERATE_RUNTIME_NNAPI_TESTS "Generate NNAPI operation gtest" OFF)
+option(ENVVAR_ONERT_CONFIG "Use environment variable for onert configuration" OFF)
+
+option(BUILD_XNNPACK "Build XNNPACK" OFF)
+option(DOWNLOAD_OPENCL_HEADERS "Download opencl headers" OFF)
+
+option(BUILD_NPUD "Build NPU daemon" OFF)
+# Do not allow to use CONFIG option on Tizen
+option(ENVVAR_NPUD_CONFIG "Use environment variable for npud configuration" OFF)
+
+option(BUILD_MINMAX_H5DUMPER "Build minmax h5dumper" OFF)
diff --git a/infra/nnfw/cmake/options/options_x86_64-darwin.cmake b/infra/nnfw/cmake/options/options_x86_64-darwin.cmake
index 97642e6ce..135cfbf6e 100644
--- a/infra/nnfw/cmake/options/options_x86_64-darwin.cmake
+++ b/infra/nnfw/cmake/options/options_x86_64-darwin.cmake
@@ -3,3 +3,4 @@
#
option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
option(DOWNLOAD_ARMCOMPUTE "Download ARM Compute source" OFF)
+option(BUILD_XNNPACK "Build XNNPACK" OFF)
diff --git a/infra/nnfw/cmake/options/options_x86_64-linux.cmake b/infra/nnfw/cmake/options/options_x86_64-linux.cmake
index 97642e6ce..1cb72d593 100644
--- a/infra/nnfw/cmake/options/options_x86_64-linux.cmake
+++ b/infra/nnfw/cmake/options/options_x86_64-linux.cmake
@@ -2,4 +2,5 @@
# x86_64 linux cmake options
#
option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
+option(BUILD_XNNPACK "Build XNNPACK" OFF)
option(DOWNLOAD_ARMCOMPUTE "Download ARM Compute source" OFF)
diff --git a/infra/nnfw/cmake/options/options_x86_64-tizen.cmake b/infra/nnfw/cmake/options/options_x86_64-tizen.cmake
new file mode 100644
index 000000000..70da68c82
--- /dev/null
+++ b/infra/nnfw/cmake/options/options_x86_64-tizen.cmake
@@ -0,0 +1,21 @@
+#
+# x86_64 linux cmake options
+#
+option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF)
+option(BUILD_TENSORFLOW_LITE "Build TensorFlow Lite from the downloaded source" OFF)
+option(DOWNLOAD_ARMCOMPUTE "Download ARM Compute source" OFF)
+option(DOWNLOAD_GTEST "Download Google Test source and build Google Test" OFF)
+
+option(BUILD_LOGGING "Build logging runtime" OFF)
+option(GENERATE_RUNTIME_NNAPI_TESTS "Generate NNAPI operation gtest" OFF)
+option(ENVVAR_ONERT_CONFIG "Use environment variable for onert configuration" OFF)
+
+option(BUILD_XNNPACK "Build XNNPACK" OFF)
+option(DOWNLOAD_OPENCL_HEADERS "Download opencl headers" OFF)
+
+option(BUILD_NPUD "Build NPU daemon" ON)
+# Do not allow to use CONFIG option on Tizen
+option(ENVVAR_NPUD_CONFIG "Use environment variable for npud configuration" OFF)
+
+option(BUILD_MINMAX_H5DUMPER "Build minmax h5dumper" OFF)
+option(ENABLE_ONERT_TRAIN "Enable onert training feature" OFF)
diff --git a/infra/nnfw/cmake/packages/ARMComputeConfig.cmake b/infra/nnfw/cmake/packages/ARMComputeConfig.cmake
index 67f6ef8a2..acc244aa1 100644
--- a/infra/nnfw/cmake/packages/ARMComputeConfig.cmake
+++ b/infra/nnfw/cmake/packages/ARMComputeConfig.cmake
@@ -1,7 +1,7 @@
function(_ARMCompute_Import)
include(FindPackageHandleStandardArgs)
- list(APPEND ARMCompute_LIB_SEARCH_PATHS ${ARMCompute_PREFIX})
+ list(APPEND ARMCompute_LIB_SEARCH_PATHS ${ARMCompute_PREFIX}/lib)
find_path(INCLUDE_DIR NAMES arm_compute/core/ITensor.h PATHS ${ARMCompute_INCLUDE_SEARCH_PATHS})
@@ -11,14 +11,25 @@ function(_ARMCompute_Import)
message(STATUS "Search acl in ${ARMCompute_LIB_SEARCH_PATHS}")
- if(NOT INCLUDE_DIR)
+ # ARMCompute v21.02 moves some headers into "src/".
+ # And we cannot build armcompute-ex library without these headers.
+ # So we need to download and use source code if our build root doesn't have headers in "src/" (tizen's devel package includes these headers).
+ # TODO Don't use headers in "src/"
+ find_path(HEADER_SRC_DIR NAMES src/core/CL/ICLKernel.h PATHS ${ARMCompute_INCLUDE_SEARCH_PATHS})
+ if(NOT INCLUDE_DIR OR NOT HEADER_SRC_DIR)
nnas_find_package(ARMComputeSource QUIET)
if (NOT ARMComputeSource_FOUND)
set(ARMCompute_FOUND FALSE PARENT_SCOPE)
return()
endif()
- set(INCLUDE_DIR ${ARMComputeSource_DIR} ${ARMComputeSource_DIR}/include)
- endif(NOT INCLUDE_DIR)
+
+ # Clean if INCLUDE_DIR is NOT_FOUND
+ if(NOT INCLUDE_DIR)
+ unset(INCLUDE_DIR)
+ endif(NOT INCLUDE_DIR)
+
+ list(APPEND INCLUDE_DIR ${ARMComputeSource_DIR} ${ARMComputeSource_DIR}/include)
+ endif(NOT INCLUDE_DIR OR NOT HEADER_SRC_DIR)
if(NOT CORE_LIBRARY)
set(ARMCompute_FOUND FALSE PARENT_SCOPE)
@@ -62,34 +73,19 @@ function(_ARMCompute_Import)
set(ARMCompute_FOUND TRUE PARENT_SCOPE)
endfunction(_ARMCompute_Import)
-### Check whether library exists
-function(_ARMCompute_Check VAR LIBDIR)
- set(FOUND TRUE)
-
- if(NOT EXISTS "${LIBDIR}/libarm_compute_core.so")
- set(FOUND FALSE)
- endif()
-
- if(NOT EXISTS "${LIBDIR}/libarm_compute.so")
- set(FOUND FALSE)
- endif()
-
- if(NOT EXISTS "${LIBDIR}/libarm_compute_graph.so")
- set(FOUND FALSE)
- endif()
-
- set(${VAR} ${FOUND} PARENT_SCOPE)
-endfunction(_ARMCompute_Check)
-
# Let's build and install ARMCompute libraries
-# NOTE This function silently returns on error
-function(_ARMCompute_Build ARMCompute_INSTALL_PREFIX)
- ### Check whether library exists
- _ARMCompute_Check(ARMCompute_FOUND ${ARMCompute_INSTALL_PREFIX})
-
- if(ARMCompute_FOUND)
- return()
- endif(ARMCompute_FOUND)
+function(_ARMCompute_Build ARMComputeInstall_DIR)
+ set(PKG_NAME "ARMCOMPUTE")
+ set(PKG_IDENTIFIER "21.02")
+ set(INSTALL_STAMP_PATH "${ARMComputeInstall_DIR}/${PKG_NAME}.stamp")
+ set(ARMComputeBuild_DIR "${CMAKE_BINARY_DIR}/externals/armcompute")
+
+ if(EXISTS ${INSTALL_STAMP_PATH})
+ file(READ ${INSTALL_STAMP_PATH} READ_IDENTIFIER)
+ if("${READ_IDENTIFIER}" STREQUAL "${PKG_IDENTIFIER}")
+ return()
+ endif("${READ_IDENTIFIER}" STREQUAL "${PKG_IDENTIFIER}")
+ endif(EXISTS ${INSTALL_STAMP_PATH})
### Let's build with SCONS
nnas_find_package(ARMComputeSource QUIET)
@@ -105,13 +101,16 @@ function(_ARMCompute_Build ARMCompute_INSTALL_PREFIX)
return()
endif(NOT SCONS_PATH)
- if(CMAKE_BUILD_TYPE)
- string(TOLOWER "${CMAKE_BUILD_TYPE}" SCON_BUILD_TYPE)
- else(CMAKE_BUILD_TYPE)
+ if(DEBUG_ARMCOMPUTE)
+ set(SCON_BUILD_TYPE "debug")
+ else(DEBUG_ARMCOMPUTE)
set(SCON_BUILD_TYPE "release")
- endif(CMAKE_BUILD_TYPE)
+ endif(DEBUG_ARMCOMPUTE)
#### Architecture-specific configurations
+
+ #### BUILD_DIR is in source tree to reduce CI build overhead
+ #### TODO Change BUILD_DIR to ${ARMComputeBuild_DIR}
if(TARGET_ARCH STREQUAL "armv7l")
set(BUILD_ARCH "armv7a")
set(BUILD_DIR "${BUILD_ARCH}-${TARGET_OS}.${SCON_BUILD_TYPE}")
@@ -137,12 +136,19 @@ function(_ARMCompute_Build ARMCompute_INSTALL_PREFIX)
list(APPEND SCONS_OPTIONS "Werror=0")
list(APPEND SCONS_OPTIONS "os=${TARGET_OS}")
- if(DEFINED ACL_BUILD_THREADS)
- set(N ${ACL_BUILD_THREADS})
- else(DEFINED ACL_BUILD_THREADS)
+ #### Disable test build
+ list(APPEND SCONS_OPTIONS "benchmark_tests=0")
+ list(APPEND SCONS_OPTIONS "validation_tests=0")
+ list(APPEND SCONS_OPTIONS "benchmark_examples=0")
+ list(APPEND SCONS_OPTIONS "validate_examples=0")
+ list(APPEND SCONS_OPTIONS "reference_openmp=0")
+
+ if(DEFINED EXTERNALS_BUILD_THREADS)
+ set(N ${EXTERNALS_BUILD_THREADS})
+ else(DEFINED EXTERNALS_BUILD_THREADS)
include(ProcessorCount)
ProcessorCount(N)
- endif(DEFINED ACL_BUILD_THREADS)
+ endif(DEFINED EXTERNALS_BUILD_THREADS)
if((NOT N EQUAL 0) AND BUILD_EXT_MULTITHREAD)
list(APPEND SCONS_OPTIONS -j${N})
@@ -155,26 +161,34 @@ function(_ARMCompute_Build ARMCompute_INSTALL_PREFIX)
list(APPEND SCONS_OPTIONS "build_dir=${BUILD_DIR}")
endif(DEFINED BUILD_DIR)
+ list(APPEND SCONS_OPTIONS "install_dir=${ARMComputeInstall_DIR}")
+
+ set(SCONS_CC "gcc")
+ set(SCONS_CXX "g++")
+ if(ANDROID)
+ list(APPEND SCONS_OPTIONS "toolchain_prefix=${ANDROID_TOOLCHAIN_PREFIX}")
+ list(APPEND SCONS_OPTIONS "compiler_prefix=${ANDROID_TOOLCHAIN_ROOT}/bin/aarch64-linux-android${ANDROID_API_LEVEL}-")
+ set(SCONS_CC "clang")
+ set(SCONS_CXX "clang++")
+ endif(ANDROID)
+
message(STATUS "Build ARMCompute with ${SCONS_PATH} ('${SCONS_OPTIONS}'")
# Build ARMCompute libraries with SCONS
- # NOTE ARMCompute SConstruct unconditioanlly appends "arm-linux-gnueabihf-" prefix for linux
- execute_process(COMMAND /usr/bin/env CC=gcc CXX=g++ "${SCONS_PATH}" ${SCONS_OPTIONS}
+ # NOTE ARMCompute build process don't allow logging by using OUTPUT_FILE and ERROR_FILE option
+ execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory "${ARMComputeInstall_DIR}")
+ execute_process(COMMAND /usr/bin/env CC=${SCONS_CC} CXX=${SCONS_CXX} "${SCONS_PATH}" ${SCONS_OPTIONS}
WORKING_DIRECTORY ${ARMComputeSource_DIR}
- RESULT_VARIABLE ARMCompute_BUILD)
+ RESULT_VARIABLE BUILD_EXITCODE)
- # Install ARMCompute libraries to overlay
- execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory "${ARMCompute_INSTALL_PREFIX}"
- WORKING_DIRECTORY ${ARMComputeSource_DIR}
- RESULT_VARIABLE ARMCompute_BUILD)
- execute_process(COMMAND ${CMAKE_COMMAND} -E copy "build/${BUILD_DIR}/libarm_compute_core.so" "${ARMCompute_INSTALL_PREFIX}"
- COMMAND ${CMAKE_COMMAND} -E copy "build/${BUILD_DIR}/libarm_compute.so" "${ARMCompute_INSTALL_PREFIX}"
- COMMAND ${CMAKE_COMMAND} -E copy "build/${BUILD_DIR}/libarm_compute_graph.so" "${ARMCompute_INSTALL_PREFIX}"
- WORKING_DIRECTORY ${ARMComputeSource_DIR}
- RESULT_VARIABLE ARMCompute_BUILD)
+ if(NOT BUILD_EXITCODE EQUAL 0)
+ message(FATAL_ERROR "${PKG_NAME} Package: Build and install failed (check '${BUILD_LOG_PATH}' for details)")
+ endif(NOT BUILD_EXITCODE EQUAL 0)
+
+ file(WRITE "${INSTALL_STAMP_PATH}" "${PKG_IDENTIFIER}")
endfunction(_ARMCompute_Build)
-set(ARMCompute_PREFIX ${EXT_OVERLAY_DIR}/lib)
+set(ARMCompute_PREFIX ${EXT_OVERLAY_DIR})
if(BUILD_ARMCOMPUTE)
_ARMCompute_Build("${ARMCompute_PREFIX}")
endif(BUILD_ARMCOMPUTE)
diff --git a/infra/nnfw/cmake/packages/BoostConfig.cmake b/infra/nnfw/cmake/packages/BoostConfig.cmake
index 4f60e9107..f2759f8e1 100644
--- a/infra/nnfw/cmake/packages/BoostConfig.cmake
+++ b/infra/nnfw/cmake/packages/BoostConfig.cmake
@@ -16,6 +16,18 @@ function(_Boost_Build Boost_PREFIX)
set(BoostBuild_DIR ${CMAKE_BINARY_DIR}/externals/boost)
set(BoostInstall_DIR ${Boost_PREFIX})
+ set(INSTALL_STAMP_PATH "${BoostInstall_DIR}/BOOST.stamp")
+ set(BUILD_LOG_PATH "${BoostBuild_DIR}/BOOST.log")
+ set(PKG_NAME "BOOST")
+ set(PKG_IDENTIFIER "1.58.0")
+
+ if(EXISTS ${INSTALL_STAMP_PATH})
+ file(READ ${INSTALL_STAMP_PATH} READ_IDENTIFIER)
+ if("${READ_IDENTIFIER}" STREQUAL "${PKG_IDENTIFIER}")
+ return()
+ endif("${READ_IDENTIFIER}" STREQUAL "${PKG_IDENTIFIER}")
+ endif(EXISTS ${INSTALL_STAMP_PATH})
+
unset(Boost_Options)
list(APPEND Boost_Options --build-dir=${BoostBuild_DIR})
@@ -25,6 +37,17 @@ function(_Boost_Build Boost_PREFIX)
list(APPEND Boost_Options --with-system)
list(APPEND Boost_Options --with-filesystem)
+ if(DEFINED EXTERNALS_BUILD_THREADS)
+ set(N ${EXTERNALS_BUILD_THREADS})
+ else(DEFINED EXTERNALS_BUILD_THREADS)
+ include(ProcessorCount)
+ ProcessorCount(N)
+ endif(DEFINED EXTERNALS_BUILD_THREADS)
+
+ if((NOT N EQUAL 0) AND BUILD_EXT_MULTITHREAD)
+ list(APPEND Boost_Options -j${N})
+ endif()
+
set(JAM_FILENAME ${BoostBuild_DIR}/user-config.jam)
if(ANDROID)
@@ -41,7 +64,15 @@ function(_Boost_Build Boost_PREFIX)
# Install Boost libraries
execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory "${BoostInstall_DIR}")
execute_process(COMMAND /usr/bin/env BOOST_BUILD_PATH="${BoostBuild_DIR}" ${BoostSource_DIR}/b2 install ${Boost_Options}
- WORKING_DIRECTORY ${BoostSource_DIR})
+ WORKING_DIRECTORY ${BoostSource_DIR}
+ OUTPUT_FILE ${BUILD_LOG_PATH}
+ RESULT_VARIABLE BUILD_EXITCODE)
+
+ if(NOT BUILD_EXITCODE EQUAL 0)
+ message(FATAL_ERROR "${PKG_NAME} Package: Build and install failed (check '${BUILD_LOG_PATH}' for details)")
+ endif(NOT BUILD_EXITCODE EQUAL 0)
+
+ file(WRITE "${INSTALL_STAMP_PATH}" "${PKG_IDENTIFIER}")
endfunction(_Boost_Build)
diff --git a/infra/nnfw/cmake/packages/CpuInfoConfig.cmake b/infra/nnfw/cmake/packages/CpuInfoConfig.cmake
new file mode 100644
index 000000000..dddec8988
--- /dev/null
+++ b/infra/nnfw/cmake/packages/CpuInfoConfig.cmake
@@ -0,0 +1,39 @@
+function(_CpuInfo_Build)
+ nnas_find_package(CpuInfoSource QUIET)
+
+ # NOTE This line prevents multiple definitions of cpuinfo target
+ if(TARGET cpuinfo)
+ set(CpuInfoSource_DIR ${CpuInfoSource_DIR} PARENT_SCOPE)
+ set(CpuInfo_FOUND TRUE PARENT_SCOPE)
+ return()
+ endif(TARGET cpuinfo)
+
+ if(NOT CpuInfoSource_FOUND)
+ message(STATUS "CPUINFO: Source not found")
+ set(CpuInfo_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT CpuInfoSource_FOUND)
+
+ nnas_include(ExternalProjectTools)
+
+ # Set build option
+ # - Static (position independent)
+ # - No logging
+ # - Library only (CPUINFO_RUNTIME_TYPE is not used)
+ set(CPUINFO_LIBRARY_TYPE "static" CACHE STRING "")
+ set(CPUINFO_LOG_LEVEL "none" CACHE STRING "")
+ set(CPUINFO_BUILD_TOOLS OFF CACHE BOOL "")
+ set(CPUINFO_BUILD_BENCHMARKS OFF CACHE BOOL "")
+ set(CPUINFO_BUILD_UNIT_TESTS OFF CACHE BOOL "")
+ set(CPUINFO_BUILD_MOCK_TESTS OFF CACHE BOOL "")
+ add_extdirectory("${CpuInfoSource_DIR}" cpuinfo EXCLUDE_FROM_ALL)
+ set_target_properties(cpuinfo PROPERTIES POSITION_INDEPENDENT_CODE ON)
+ set(CpuInfoSource_DIR ${CpuInfoSource_DIR} PARENT_SCOPE)
+ set(CpuInfo_FOUND TRUE PARENT_SCOPE)
+endfunction(_CpuInfo_Build)
+
+if(BUILD_CPUINFO)
+ _CpuInfo_Build()
+else(BUILD_CPUINFO)
+ set(CpuInfo_FOUND FALSE)
+endif(BUILD_CPUINFO)
diff --git a/infra/nnfw/cmake/packages/EigenConfig.cmake b/infra/nnfw/cmake/packages/EigenConfig.cmake
index e71830a16..15378827e 100644
--- a/infra/nnfw/cmake/packages/EigenConfig.cmake
+++ b/infra/nnfw/cmake/packages/EigenConfig.cmake
@@ -1,5 +1,5 @@
function(_Eigen_import)
- nnas_find_package(TensorFlowEigenSource EXACT 2.3.0 QUIET)
+ nnas_find_package(TensorFlowEigenSource EXACT 2.8.0 QUIET)
if(NOT TensorFlowEigenSource_FOUND)
set(Eigen_FOUND FALSE PARENT_SCOPE)
diff --git a/infra/nnfw/cmake/packages/FarmhashSourceConfig.cmake b/infra/nnfw/cmake/packages/FarmhashSourceConfig.cmake
deleted file mode 100644
index ab53f97b2..000000000
--- a/infra/nnfw/cmake/packages/FarmhashSourceConfig.cmake
+++ /dev/null
@@ -1,19 +0,0 @@
-function(_FarmhashSource_import)
- if(NOT ${DOWNLOAD_FARMHASH})
- set(FarmhashSource_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT ${DOWNLOAD_FARMHASH})
-
- nnas_include(ExternalSourceTools)
- nnas_include(OptionTools)
-
- # NOTE TensorFlow 1.12 downloads farmhash from the following URL
- envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
- set(FARMHASH_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz)
- ExternalSource_Download("farmhash" ${FARMHASH_URL})
-
- set(FarmhashSource_DIR ${farmhash_SOURCE_DIR} PARENT_SCOPE)
- set(FarmhashSource_FOUND ${farmhash_SOURCE_GET} PARENT_SCOPE)
-endfunction(_FarmhashSource_import)
-
-_FarmhashSource_import()
diff --git a/infra/nnfw/cmake/packages/FlatBuffersConfig.cmake b/infra/nnfw/cmake/packages/FlatBuffersConfig.cmake
index 13ad1113a..032724ae2 100644
--- a/infra/nnfw/cmake/packages/FlatBuffersConfig.cmake
+++ b/infra/nnfw/cmake/packages/FlatBuffersConfig.cmake
@@ -6,8 +6,8 @@ function(_FlatBuffers_import)
return()
endif(Flatbuffers_FOUND)
- # NOTE Tizen uses 1.11
- nnas_find_package(FlatBuffersSource EXACT 1.11 QUIET)
+ # NOTE Tizen uses 2.0
+ nnas_find_package(FlatBuffersSource EXACT 2.0 QUIET)
if(NOT FlatBuffersSource_FOUND)
set(FlatBuffers_FOUND FALSE PARENT_SCOPE)
diff --git a/infra/nnfw/cmake/packages/Fp16Config.cmake b/infra/nnfw/cmake/packages/Fp16Config.cmake
new file mode 100644
index 000000000..6c31613c0
--- /dev/null
+++ b/infra/nnfw/cmake/packages/Fp16Config.cmake
@@ -0,0 +1,30 @@
+function(_Fp16_Build)
+ nnas_find_package(Fp16Source QUIET)
+
+ # NOTE This line prevents multiple definitions of target
+ if(TARGET fp16)
+ set(Fp16Source_DIR ${Fp16Source_DIR} PARENT_SCOPE)
+ set(Fp16_FOUND TRUE PARENT_SCOPE)
+ return()
+ endif(TARGET fp16)
+
+ if(NOT Fp16Source_FOUND)
+ message(STATUS "FP16: Source not found")
+ set(Fp16_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT Fp16Source_FOUND)
+
+ set(FP16_BUILD_TESTS OFF CACHE BOOL "Build FP16 unit tests")
+ set(FP16_BUILD_BENCHMARKS OFF CACHE BOOL "Build FP16 micro-benchmarks")
+ nnas_find_package(PsimdSource)
+ set(PSIMD_SOURCE_DIR ${PsimdSource_DIR} CACHE STRING "String to disable download PSIMD on fp16")
+ add_extdirectory("${Fp16Source_DIR}" FP16 EXCLUDE_FROM_ALL)
+ set(Fp16Source_DIR ${Fp16Source_DIR} PARENT_SCOPE)
+ set(Fp16_FOUND TRUE PARENT_SCOPE)
+endfunction(_Fp16_Build)
+
+if(BUILD_FP16)
+ _Fp16_Build()
+else()
+ set(Fp16_FOUND FALSE)
+endif()
diff --git a/infra/nnfw/cmake/packages/FxdivConfig.cmake b/infra/nnfw/cmake/packages/FxdivConfig.cmake
new file mode 100644
index 000000000..6f268aec8
--- /dev/null
+++ b/infra/nnfw/cmake/packages/FxdivConfig.cmake
@@ -0,0 +1,29 @@
+function(_Fxdiv_Build)
+ nnas_find_package(FxdivSource QUIET)
+
+ # NOTE This line prevents multiple definitions of target
+ if(TARGET fxdiv)
+ set(FxdivSource_DIR ${FxdivSource_DIR} PARENT_SCOPE)
+ set(Fxdiv_FOUND TRUE PARENT_SCOPE)
+ return()
+ endif(TARGET fxdiv)
+
+ if(NOT FxdivSource_FOUND)
+ message(STATUS "FXDIV: Source not found")
+ set(Fxdiv_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT FxdivSource_FOUND)
+
+ set(FXDIV_BUILD_TESTS OFF CACHE BOOL "Build FXdiv unit tests")
+ set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "Build FXdiv micro-benchmarks")
+
+ add_extdirectory("${FxdivSource_DIR}" FXDIV EXCLUDE_FROM_ALL)
+ set(FxdivSource_DIR ${FxdivSource_DIR} PARENT_SCOPE)
+ set(Fxdiv_FOUND TRUE PARENT_SCOPE)
+endfunction(_Fxdiv_Build)
+
+if(BUILD_FXDIV)
+ _Fxdiv_Build()
+else()
+ set(Fxdiv_FOUND FALSE)
+endif()
diff --git a/infra/nnfw/cmake/packages/GEMMLowpConfig.cmake b/infra/nnfw/cmake/packages/GEMMLowpConfig.cmake
index ddfcc787e..b321961ca 100644
--- a/infra/nnfw/cmake/packages/GEMMLowpConfig.cmake
+++ b/infra/nnfw/cmake/packages/GEMMLowpConfig.cmake
@@ -1,5 +1,5 @@
function(_GEMMLowp_import)
- nnfw_find_package(GEMMLowpSource QUIET)
+ nnas_find_package(GEMMLowpSource QUIET)
if(NOT GEMMLowpSource_FOUND)
set(GEMMLowp_FOUND FALSE PARENT_SCOPE)
diff --git a/infra/nnfw/cmake/packages/GEMMLowpSourceConfig.cmake b/infra/nnfw/cmake/packages/GEMMLowpSourceConfig.cmake
deleted file mode 100644
index 97c8e0597..000000000
--- a/infra/nnfw/cmake/packages/GEMMLowpSourceConfig.cmake
+++ /dev/null
@@ -1,19 +0,0 @@
-function(_GEMMLowpSource_import)
- if(NOT ${DOWNLOAD_GEMMLOWP})
- set(GEMMLowpSource_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT ${DOWNLOAD_GEMMLOWP})
-
- nnas_include(ExternalSourceTools)
- nnas_include(OptionTools)
-
- # NOTE TensorFlow 1.12 uses the following URL
- envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
- set(GEMMLOWP_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/gemmlowp/archive/38ebac7b059e84692f53e5938f97a9943c120d98.tar.gz)
- ExternalSource_Download("gemmlowp" ${GEMMLOWP_URL})
-
- set(GEMMLowpSource_DIR ${gemmlowp_SOURCE_DIR} PARENT_SCOPE)
- set(GEMMLowpSource_FOUND ${gemmlowp_SOURCE_GET} PARENT_SCOPE)
-endfunction(_GEMMLowpSource_import)
-
-_GEMMLowpSource_import()
diff --git a/infra/nnfw/cmake/packages/GLib2.0Config.cmake b/infra/nnfw/cmake/packages/GLib2.0Config.cmake
new file mode 100644
index 000000000..d4c6bf241
--- /dev/null
+++ b/infra/nnfw/cmake/packages/GLib2.0Config.cmake
@@ -0,0 +1,41 @@
+function(_GLIB_2_0_import)
+ find_library(GLIB_LIBRARIES
+ NAMES glib-2.0)
+
+ get_filename_component(GLIB_LIBRARY_DIR ${GLIB_LIBRARIES} DIRECTORY)
+ find_path(GLIBCONFIG_INCLUDE_DIR
+ NAMES glibconfig.h
+ PATHS ${GLIB_LIBRARY_DIR}
+ PATH_SUFFIXES glib-2.0/include
+ NO_CMAKE_FIND_ROOT_PATH)
+
+ find_path(GLIB_INCLUDE_DIR
+ NAMES glib.h
+ PATH_SUFFIXES glib-2.0)
+
+ set(GLIB_FOUND TRUE)
+
+ if(NOT GLIB_LIBRARIES)
+ set(GLIB_FOUND FALSE)
+ endif(NOT GLIB_LIBRARIES)
+
+ if(NOT GLIBCONFIG_INCLUDE_DIR)
+ set(GLIB_FOUND FALSE)
+ endif(NOT GLIBCONFIG_INCLUDE_DIR)
+
+ if(NOT GLIB_INCLUDE_DIR)
+ set(GLIB_FOUND FALSE)
+ endif(NOT GLIB_INCLUDE_DIR)
+
+ set(GLIB_INCLUDE_DIRS ${GLIB_INCLUDE_DIR} ${GLIBCONFIG_INCLUDE_DIR})
+
+ if(NOT GLIB_FOUND)
+ message(STATUS "Failed to find GLib 2.0")
+ endif(NOT GLIB_FOUND)
+
+ set(GLIB2.0_FOUND ${GLIB_FOUND} PARENT_SCOPE)
+ set(GLIB2.0_INCLUDE_DIRS ${GLIB_INCLUDE_DIRS} PARENT_SCOPE)
+ set(GLIB2.0_LIBRARIES ${GLIB_LIBRARIES} PARENT_SCOPE)
+endfunction(_GLIB_2_0_import)
+
+_GLIB_2_0_import()
diff --git a/infra/nnfw/cmake/packages/GObject2.0Config.cmake b/infra/nnfw/cmake/packages/GObject2.0Config.cmake
new file mode 100644
index 000000000..f1bfb3aba
--- /dev/null
+++ b/infra/nnfw/cmake/packages/GObject2.0Config.cmake
@@ -0,0 +1,30 @@
+function(_GOBJECT_2_0_import)
+ nnfw_find_package(GLib2.0 REQUIRED)
+
+ find_library(GOBJECT_LIBRARIES
+ NAMES gobject-2.0)
+
+ # The gobject-2.0 requires glib-2.0 and access the header file based on
+ # the glib-2.0 include directory.
+ set(GOBJECT_INCLUDE_DIRS ${GLIB2.0_INCLUDE_DIRS})
+
+ set(GOBJECT_FOUND TRUE)
+
+ if(NOT GOBJECT_LIBRARIES)
+ set(GOBJECT_FOUND FALSE)
+ endif(NOT GOBJECT_LIBRARIES)
+
+ if(NOT GOBJECT_INCLUDE_DIRS)
+ set(GOBJECT_FOUND FALSE)
+ endif(NOT GOBJECT_INCLUDE_DIRS)
+
+ if(NOT GOBJECT_FOUND)
+ message(STATUS "Failed to find gobject-2.0")
+ endif(NOT GOBJECT_FOUND)
+
+ set(GOBJECT2.0_FOUND ${GOBJECT_FOUND} PARENT_SCOPE)
+ set(GOBJECT2.0_INCLUDE_DIRS ${GOBJECT_INCLUDE_DIRS} PARENT_SCOPE)
+ set(GOBJECT2.0_LIBRARIES ${GOBJECT_LIBRARIES} PARENT_SCOPE)
+endfunction(_GOBJECT_2_0_import)
+
+_GOBJECT_2_0_import()
diff --git a/infra/nnfw/cmake/packages/GTestConfig.cmake b/infra/nnfw/cmake/packages/GTestConfig.cmake
index f3aadf998..d0f7b1845 100644
--- a/infra/nnfw/cmake/packages/GTestConfig.cmake
+++ b/infra/nnfw/cmake/packages/GTestConfig.cmake
@@ -1,26 +1,26 @@
-if(${BUILD_GTEST})
- nnas_include(ExternalSourceTools)
- nnas_include(ExternalProjectTools)
- nnas_include(OptionTools)
+if(${DOWNLOAD_GTEST})
+ nnas_find_package(GTestSource QUIET)
- envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
- set(GTEST_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/googletest/archive/release-1.8.0.tar.gz)
- ExternalSource_Download("gtest" ${GTEST_URL})
-
- # gtest_SOURCE_DIR is used in gtest subdirectorty's cmake
- set(sourcedir_gtest ${gtest_SOURCE_DIR})
- unset(gtest_SOURCE_DIR)
+ if(NOT GTestSource_FOUND)
+ set(GTest_FOUND FALSE)
+ return()
+ endif(NOT GTestSource_FOUND)
if(NOT TARGET gtest_main)
- add_extdirectory(${sourcedir_gtest} gtest EXCLUDE_FROM_ALL)
+ nnas_include(ExternalProjectTools)
+ add_extdirectory(${GTestSource_DIR} gtest EXCLUDE_FROM_ALL)
endif(NOT TARGET gtest_main)
set(GTest_FOUND TRUE)
return()
-endif(${BUILD_GTEST})
+endif(${DOWNLOAD_GTEST})
### Find and use pre-installed Google Test
-find_package(GTest)
+if(NOT GTest_FOUND)
+ # Reset package config directory cache to prevent recursive find
+ unset(GTest_DIR CACHE)
+ find_package(GTest)
+endif(NOT GTest_FOUND)
find_package(Threads)
if(${GTEST_FOUND} AND TARGET Threads::Threads)
@@ -44,7 +44,7 @@ if(${GTEST_FOUND} AND TARGET Threads::Threads)
add_library(gmock INTERFACE)
target_include_directories(gmock INTERFACE ${GMOCK_INCLUDE_DIR})
target_link_libraries(gmock INTERFACE ${GMOCK_LIBRARIES} Threads::Threads)
- endif(GMOCK_LIBRARIES)
+ endif(GMOCK_LIBRARIES AND GMOCK_INCLUDE_DIR)
endif(NOT TARGET gmock)
if(NOT TARGET gmock_main)
diff --git a/infra/nnfw/cmake/packages/Gio2.0Config.cmake b/infra/nnfw/cmake/packages/Gio2.0Config.cmake
new file mode 100644
index 000000000..26d36072f
--- /dev/null
+++ b/infra/nnfw/cmake/packages/Gio2.0Config.cmake
@@ -0,0 +1,32 @@
+function(_GIO_2_0_import)
+ nnfw_find_package(GLib2.0 REQUIRED)
+ nnfw_find_package(GObject2.0 REQUIRED)
+
+ find_library(GIO_LIBRARIES
+ NAMES gio-2.0)
+
+ # The gio-2.0 requires glib-2.0 and access the header file based on
+ # the glib-2.0 include directory.
+ set(GIO_INCLUDE_DIRS ${GLIB2.0_INCLUDE_DIRS} ${GOBJECT2.0_INCLUDE_DIRS})
+ set(GIO_LIBRARIES ${GIO_LIBRARIES} ${GOBJECT2.0_LIBRARIES})
+
+ set(GIO_FOUND TRUE)
+
+ if(NOT GIO_LIBRARIES)
+ set(GIO_FOUND FALSE)
+ endif(NOT GIO_LIBRARIES)
+
+ if(NOT GIO_INCLUDE_DIRS)
+ set(GIO_FOUND FALSE)
+ endif(NOT GIO_INCLUDE_DIRS)
+
+ if(NOT GIO_FOUND)
+ message(STATUS "Failed to find gio-2.0")
+ endif(NOT GIO_FOUND)
+
+ set(GIO2.0_FOUND ${GIO_FOUND} PARENT_SCOPE)
+ set(GIO2.0_INCLUDE_DIRS ${GIO_INCLUDE_DIRS} PARENT_SCOPE)
+ set(GIO2.0_LIBRARIES ${GIO_LIBRARIES} PARENT_SCOPE)
+endfunction(_GIO_2_0_import)
+
+_GIO_2_0_import()
diff --git a/infra/nnfw/cmake/packages/Giounix2.0Config.cmake b/infra/nnfw/cmake/packages/Giounix2.0Config.cmake
new file mode 100644
index 000000000..69f5e0e04
--- /dev/null
+++ b/infra/nnfw/cmake/packages/Giounix2.0Config.cmake
@@ -0,0 +1,30 @@
+function(_GIO_UNIX_2_0_import)
+ nnfw_find_package(Gio2.0 REQUIRED)
+
+ find_path(GIO_UNIX_INCLUDE_DIR
+ NAMES gio/gunixfdlist.h
+ PATH_SUFFIXES glib-2.0)
+
+ # The gio-unix-2.0 requires gio-2.0 and link the gio-2.0 library.
+ set(GIO_UNIX_LIBRARIES ${GIO2.0_LIBRARIES})
+
+ set(GIO_UNIX_FOUND TRUE)
+
+ if(NOT GIO_UNIX_LIBRARIES)
+ set(GIO_UNIX_FOUND FALSE)
+ endif(NOT GIO_UNIX_LIBRARIES)
+
+ if(NOT GIO_UNIX_INCLUDE_DIR)
+ set(GIO_UNIX_FOUND FALSE)
+ endif(NOT GIO_UNIX_INCLUDE_DIR)
+
+ if(NOT GIO_UNIX_FOUND)
+ message(STATUS "Failed to find gio-unix-2.0")
+ endif(NOT GIO_UNIX_FOUND)
+
+ set(GIO_UNIX_2.0_FOUND ${GIO_UNIX_FOUND} PARENT_SCOPE)
+ set(GIO_UNIX_2.0_INCLUDE_DIRS ${GIO_UNIX_INCLUDE_DIR} PARENT_SCOPE)
+ set(GIO_UNIX_2.0_LIBRARIES ${GIO_UNIX_LIBRARIES} PARENT_SCOPE)
+endfunction(_GIO_UNIX_2_0_import)
+
+_GIO_UNIX_2_0_import()
diff --git a/infra/nnfw/cmake/packages/LuciConfig.cmake b/infra/nnfw/cmake/packages/LuciConfig.cmake
new file mode 100644
index 000000000..426556b3a
--- /dev/null
+++ b/infra/nnfw/cmake/packages/LuciConfig.cmake
@@ -0,0 +1,43 @@
+# Assume that luci and related libraries and headers are installed on overlay directory
+
+set(Luci_FOUND FALSE)
+
+find_path(LUCI_HEADERS
+ NAMES loco.h luci/IR/CircleNode.h
+ PATHS ${EXT_OVERLAY_DIR}/include)
+
+macro(_load_library LUCI_NAME)
+ add_library(luci::${LUCI_NAME} SHARED IMPORTED)
+ find_library(LUCI_LIB_PATH_${LUCI_NAME} NAMES luci_${LUCI_NAME} PATHS ${EXT_OVERLAY_DIR}/lib)
+ if (NOT LUCI_LIB_PATH_${LUCI_NAME})
+ return()
+ endif()
+ set_target_properties(luci::${LUCI_NAME} PROPERTIES
+ IMPORTED_LOCATION ${LUCI_LIB_PATH_${LUCI_NAME}}
+ INTERFACE_INCLUDE_DIRECTORIES ${LUCI_HEADERS})
+endmacro()
+
+_load_library(env)
+_load_library(export)
+_load_library(import)
+_load_library(lang)
+_load_library(logex)
+_load_library(log)
+_load_library(partition)
+_load_library(pass)
+_load_library(plan)
+_load_library(profile)
+_load_library(service)
+
+# Need luci::loco to avoid "DSO missing from command line" link error
+# TODO Find better way to do this
+add_library(luci::loco SHARED IMPORTED)
+find_library(LOCO_LIB_PATH NAMES loco PATHS ${EXT_OVERLAY_DIR}/lib)
+if (NOT LOCO_LIB_PATH)
+ return()
+endif()
+set_target_properties(luci::loco PROPERTIES
+ IMPORTED_LOCATION ${LOCO_LIB_PATH}
+ INTERFACE_INCLUDE_DIRECTORIES ${LUCI_HEADERS})
+
+set(Luci_FOUND TRUE)
diff --git a/infra/nnfw/cmake/packages/NEON2SSESourceConfig.cmake b/infra/nnfw/cmake/packages/NEON2SSESourceConfig.cmake
deleted file mode 100644
index 7bae616e7..000000000
--- a/infra/nnfw/cmake/packages/NEON2SSESourceConfig.cmake
+++ /dev/null
@@ -1,19 +0,0 @@
-function(_NEON2SSESource_import)
- if(NOT ${DOWNLOAD_NEON2SSE})
- set(NEON2SSESource_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT ${DOWNLOAD_NEON2SSE})
-
- nnas_include(ExternalSourceTools)
- nnas_include(OptionTools)
-
- # NOTE TensorFlow 1.12 downloads NEON2SSE from the following URL
- envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
- set(NEON2SSE_URL ${EXTERNAL_DOWNLOAD_SERVER}/intel/ARM_NEON_2_x86_SSE/archive/0f77d9d182265259b135dad949230ecbf1a2633d.tar.gz)
- ExternalSource_Download("neon_2_sse" ${NEON2SSE_URL})
-
- set(NEON2SSESource_DIR ${neon_2_sse_SOURCE_DIR} PARENT_SCOPE)
- set(NEON2SSESource_FOUND ${neon_2_sse_SOURCE_GET} PARENT_SCOPE)
-endfunction(_NEON2SSESource_import)
-
-_NEON2SSESource_import()
diff --git a/infra/nnfw/cmake/packages/PsimdConfig.cmake b/infra/nnfw/cmake/packages/PsimdConfig.cmake
new file mode 100644
index 000000000..a3587b6cf
--- /dev/null
+++ b/infra/nnfw/cmake/packages/PsimdConfig.cmake
@@ -0,0 +1,26 @@
+function(_Psimd_Build)
+ nnas_find_package(PsimdSource QUIET)
+
+ # NOTE This line prevents multiple definitions of target
+ if(TARGET psimd)
+ set(PsimdSource_DIR ${PsimdSource_DIR} PARENT_SCOPE)
+ set(Psimd_FOUND TRUE PARENT_SCOPE)
+ return()
+ endif(TARGET psimd)
+
+ if(NOT PsimdSource_FOUND)
+ message(STATUS "PSIMD: Source not found")
+ set(Psimd_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT PsimdSource_FOUND)
+
+ add_extdirectory("${PsimdSource_DIR}" PSIMD EXCLUDE_FROM_ALL)
+ set(PsimdSource_DIR ${PsimdSource_DIR} PARENT_SCOPE)
+ set(Psimd_FOUND TRUE PARENT_SCOPE)
+endfunction(_Psimd_Build)
+
+if(BUILD_PSIMD)
+ _Psimd_Build()
+else()
+ set(Psimd_FOUND FALSE)
+endif()
diff --git a/infra/nnfw/cmake/packages/PthreadpoolConfig.cmake b/infra/nnfw/cmake/packages/PthreadpoolConfig.cmake
new file mode 100644
index 000000000..6283826f6
--- /dev/null
+++ b/infra/nnfw/cmake/packages/PthreadpoolConfig.cmake
@@ -0,0 +1,35 @@
+function(_Pthreadpool_Build)
+ nnas_find_package(PthreadpoolSource QUIET)
+
+ # NOTE This line prevents multiple definitions of target
+ if(TARGET pthreadpool)
+ set(PthreadpoolSource_DIR ${PthreadpoolSource_DIR} PARENT_SCOPE)
+ set(Pthreadpool_FOUND TRUE PARENT_SCOPE)
+ return()
+ endif(TARGET pthreadpool)
+
+ if(NOT PthreadpoolSource_FOUND)
+ message(STATUS "PTHREADPOOL: Source not found")
+ set(Pthreadpool_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT PthreadpoolSource_FOUND)
+
+ SET(PTHREADPOOL_BUILD_TESTS OFF CACHE BOOL "Build pthreadpool unit tests")
+ SET(PTHREADPOOL_BUILD_BENCHMARKS OFF CACHE BOOL "Build pthreadpool micro-benchmarks")
+
+ nnas_find_package(FxdivSource)
+ set(FXDIV_SOURCE_DIR ${FxdivSource_DIR} CACHE STRING "String to disable download FXDIV")
+
+ add_extdirectory("${PthreadpoolSource_DIR}" PTHREADPOOL EXCLUDE_FROM_ALL)
+ set_target_properties(pthreadpool PROPERTIES POSITION_INDEPENDENT_CODE ON)
+ # Suppress warnings generated by pthreadpool
+ set_target_properties(pthreadpool PROPERTIES COMPILE_FLAGS "-Wno-deprecated-declarations")
+ set(PthreadpoolSource_DIR ${PthreadpoolSource_DIR} PARENT_SCOPE)
+ set(Pthreadpool_FOUND TRUE PARENT_SCOPE)
+endfunction(_Pthreadpool_Build)
+
+if(BUILD_PTHREADPOOL)
+ _Pthreadpool_Build()
+else()
+ set(Pthreadpool_FOUND FALSE)
+endif()
diff --git a/infra/nnfw/cmake/packages/Ruy/CMakeLists.txt b/infra/nnfw/cmake/packages/Ruy/CMakeLists.txt
index f4d9f8881..a1c4656e3 100644
--- a/infra/nnfw/cmake/packages/Ruy/CMakeLists.txt
+++ b/infra/nnfw/cmake/packages/Ruy/CMakeLists.txt
@@ -1,4 +1,4 @@
-set(RUY_BASE ${RuySource_DIR}/ruy)
+set(RUY_BASE ${TensorFlowRuySource_DIR}/ruy)
#
# Ruy library
@@ -13,6 +13,7 @@ list(REMOVE_ITEM RUY_SRCS "${RUY_BASE}/example.cc")
list(REMOVE_ITEM RUY_SRCS "${RUY_BASE}/example_advanced.cc")
list(REMOVE_ITEM RUY_SRCS "${RUY_BASE}/tune_tool.cc")
list(REMOVE_ITEM RUY_SRCS "${RUY_BASE}/pmu.cc")
+list(REMOVE_ITEM RUY_SRCS "${RUY_BASE}/create_trmul_params.cc")
list(APPEND RUY_INSTRUMENTATION_SRCS "${RUY_BASE}/profiler/instrumentation.cc")
@@ -21,12 +22,16 @@ if(PROFILE_RUY)
list(APPEND RUY_PROFILER_SRCS "${RUY_BASE}/profiler/treeview.cc")
endif(PROFILE_RUY)
-list(APPEND RUY_INCLUDES "${RuySource_DIR}")
+list(APPEND RUY_INCLUDES "${TensorFlowRuySource_DIR}")
add_library(ruy STATIC ${RUY_SRCS})
target_include_directories(ruy SYSTEM PUBLIC ${RUY_INCLUDES})
target_compile_options(ruy PRIVATE -O3)
+target_include_directories(ruy PRIVATE ${CpuInfoSource_DIR})
+target_link_libraries(ruy PRIVATE cpuinfo)
+target_compile_definitions(ruy PRIVATE RUY_HAVE_CPUINFO)
+
add_library(ruy_instrumentation ${RUY_INSTRUMENTATION_SRCS})
target_include_directories(ruy_instrumentation SYSTEM PUBLIC ${RUY_INCLUDES})
target_compile_options(ruy_instrumentation PRIVATE -O3)
diff --git a/infra/nnfw/cmake/packages/RuyConfig.cmake b/infra/nnfw/cmake/packages/RuyConfig.cmake
index 278e33cb3..6f5f4b71e 100644
--- a/infra/nnfw/cmake/packages/RuyConfig.cmake
+++ b/infra/nnfw/cmake/packages/RuyConfig.cmake
@@ -1,22 +1,42 @@
-function(_Ruy_import)
+function(_Ruy_Build)
# NOTE This line prevents multiple definitions of ruy target
if(TARGET ruy)
- set(Ruy_FOUND TRUE)
+ set(Ruy_FOUND TRUE PARENT_SCOPE)
return()
endif(TARGET ruy)
- nnfw_find_package(RuySource QUIET)
+ nnas_find_package(TensorFlowRuySource EXACT 2.8 QUIET)
+ nnfw_find_package(CpuInfo QUIET)
- if(NOT RuySource_FOUND)
+ if(NOT TensorFlowRuySource_FOUND)
+ message(STATUS "RUY: Source not found")
set(Ruy_FOUND FALSE PARENT_SCOPE)
return()
- endif(NOT RuySource_FOUND)
+ endif(NOT TensorFlowRuySource_FOUND)
- if(BUILD_RUY)
- add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/Ruy" ruy)
- endif(BUILD_RUY)
+ if (NOT CpuInfo_FOUND)
+ message(STATUS "RUY: CPUINFO not found")
+ set(Ruy_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT CpuInfo_FOUND)
+
+ # Ruy's cmake requires cmake >= 3.14
+ # If we ready cmake >= 3.14, enable below comment out code
+ #if(PROFILE_RUY)
+ # # Will be used on ruy build
+ # set(RUY_PROFILER ON)
+ #endif(PROFILE_RUY)
+ #add_extdirectory("${RuySource_DIR}" Ruy)
+ #
+ ## Ignore warning from ruy
+ #target_compile_options(ruy INTERFACE -Wno-comment)
+ add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/Ruy" ruy)
set(Ruy_FOUND TRUE PARENT_SCOPE)
-endfunction(_Ruy_import)
+endfunction(_Ruy_Build)
-_Ruy_import()
+if(BUILD_RUY)
+ _Ruy_Build()
+else(BUILD_RUY)
+ set(Ruy_FOUND FASLE)
+endif(BUILD_RUY)
diff --git a/infra/nnfw/cmake/packages/RuySourceConfig.cmake b/infra/nnfw/cmake/packages/RuySourceConfig.cmake
deleted file mode 100644
index 08170fb4f..000000000
--- a/infra/nnfw/cmake/packages/RuySourceConfig.cmake
+++ /dev/null
@@ -1,19 +0,0 @@
-function(_RuySource_import)
- if(NOT ${DOWNLOAD_RUY})
- set(RuySource_DIR FALSE PARENT_SCOPE)
- return()
- endif(NOT ${DOWNLOAD_RUY})
-
- nnas_include(ExternalSourceTools)
- nnas_include(OptionTools)
-
- # NOTE Downloads source from latest ruy library (2020-04-10)
- envoption(EXTERNAL_DOWNLOAD_SERVER "https://github.com")
- set(RUY_URL ${EXTERNAL_DOWNLOAD_SERVER}/google/ruy/archive/2e2658f964638ab7aa562d4b48b76007d44e38f0.tar.gz)
- ExternalSource_Download("ruy" ${RUY_URL})
-
- set(RuySource_DIR ${ruy_SOURCE_DIR} PARENT_SCOPE)
- set(RuySource_FOUND ${ruy_SOURCE_GET} PARENT_SCOPE)
-endfunction(_RuySource_import)
-
-_RuySource_import()
diff --git a/infra/nnfw/cmake/packages/TRIXEngineConfig.cmake b/infra/nnfw/cmake/packages/TRIXEngineConfig.cmake
new file mode 100644
index 000000000..dfc10ebf2
--- /dev/null
+++ b/infra/nnfw/cmake/packages/TRIXEngineConfig.cmake
@@ -0,0 +1,42 @@
+# Looking for pre-installed TRIX engine package
+set(TRIX_ENGINE_PREFIX "/usr" CACHE PATH "Where to find TRIX engine header and library")
+
+function(_TRIXEngine_import)
+ # Find the header & lib
+ find_library(TRIXEngine_LIB
+ NAMES npu-engine
+ PATHS "${TRIX_ENGINE_PREFIX}/lib"
+ )
+
+ find_path(TRIXEngine_INCLUDE_DIR
+ NAMES libnpuhost.h
+ PATHS "${TRIX_ENGINE_PREFIX}/include/npu-engine"
+ )
+
+ set(TRIXEngine_FOUND TRUE)
+
+ if(NOT TRIXEngine_LIB)
+ set(TRIXEngine_FOUND FALSE)
+ endif(NOT TRIXEngine_LIB)
+
+ if(NOT TRIXEngine_INCLUDE_DIR)
+ set(TRIXEngine_FOUND FALSE)
+ endif(NOT TRIXEngine_INCLUDE_DIR)
+
+ if(NOT TRIXEngine_FOUND)
+ message(STATUS "Failed to find TRIX Engine")
+ else(NOT TRIXEngine_FOUND)
+
+ # Add target
+ if(NOT TARGET trix_engine)
+ add_library(trix_engine INTERFACE)
+ target_link_libraries(trix_engine INTERFACE ${TRIXEngine_LIB})
+ target_include_directories(trix_engine INTERFACE ${TRIXEngine_INCLUDE_DIR})
+ endif(NOT TARGET trix_engine)
+ endif(NOT TRIXEngine_FOUND)
+
+ set(TRIXEngine_FOUND ${TRIXEngine_FOUND} PARENT_SCOPE)
+ set(TRIXEngine_INCLUDE_DIRS ${TRIXEngine_INCLUDE_DIR} PARENT_SCOPE)
+endfunction(_TRIXEngine_import)
+
+_TRIXEngine_import()
diff --git a/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.cmake b/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.cmake
new file mode 100644
index 000000000..0e0a0436e
--- /dev/null
+++ b/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.cmake
@@ -0,0 +1,104 @@
+# This script need to set:
+#
+# VARIABLE | description
+# --- | ---
+# PACKAGE_VERSION | full provided version string
+# PACKAGE_VERSION_EXACT | true if version is exact match
+# PACKAGE_VERSION_COMPATIBLE | true if version is compatible
+# PACKAGE_VERSION_UNSUITABLE | true if unsuitable as any version
+#
+# Reference: https://cmake.org/cmake/help/v3.10/command/find_package.html
+
+set(TRIX_ENGINE_PREFIX "/usr" CACHE PATH "Where to find TRIX engine header and library")
+
+if(NOT PACKAGE_FIND_VERSION)
+ message(FATAL_ERROR "Please pass version requirement to use TRIX Engine dependency")
+endif()
+
+# Find the header & lib from TRIX_ENGINE_PREFIX
+find_library(TRIXEngine_LIB
+ NAMES npu-engine
+ HINTS "${TRIX_ENGINE_PREFIX}/lib"
+)
+find_path(TRIXEngine_INCLUDE_DIR
+ NAMES libnpuhost.h
+ HINTS "${TRIX_ENGINE_PREFIX}/include/npu-engine"
+)
+
+if(NOT TRIXEngine_INCLUDE_DIR OR NOT TRIXEngine_LIB)
+ set(PACKAGE_VERSION_EXACT FALSE)
+ set(PACKAGE_VERSION_COMPATIBLE FALSE)
+ set(PACKAGE_VERSION_UNSUITABLE TRUE)
+ return()
+endif(NOT TRIXEngine_INCLUDE_DIR OR NOT TRIXEngine_LIB)
+
+# TODO Assert TRIX_ENGINE_PREFIX is directory
+
+# TODO Can we run this only once per configure?
+try_run(MAJOR_VER MAJOR_COMPILABLE "${CMAKE_BINARY_DIR}/TRIXEngineConfigVersion.major"
+ SOURCES "${CMAKE_CURRENT_LIST_DIR}/TRIXEngineConfigVersion.major.cpp"
+ CMAKE_FLAGS
+ "-DINCLUDE_DIRECTORIES=${TRIXEngine_INCLUDE_DIR}"
+ "-DLINK_LIBRARIES=${TRIXEngine_LIB}"
+)
+
+if(NOT MAJOR_COMPILABLE)
+ # This means VERSION < 2.2.7
+ # `getVersion` API introduced from TRIX Engine 2.2.7
+ if(PACKAGE_FIND_VERSION VERSION_GREATER_EQUAL 2.2.7)
+ set(PACKAGE_VERSION_EXACT FALSE)
+ set(PACKAGE_VERSION_COMPATIBLE FALSE)
+ set(PACKAGE_VERSION_UNSUITABLE TRUE)
+ return()
+ else()
+ # TODO How to support this case?
+ message(FATAL_ERROR "TRIX Engine version is too low (< 2.2.7)")
+ endif()
+endif(NOT MAJOR_COMPILABLE)
+
+try_run(MINOR_VER MINOR_COMPILABLE "${CMAKE_BINARY_DIR}/TRIXEngineConfigVersion.minor"
+ SOURCES "${CMAKE_CURRENT_LIST_DIR}/TRIXEngineConfigVersion.minor.cpp"
+ CMAKE_FLAGS
+ "-DINCLUDE_DIRECTORIES=${TRIXEngine_INCLUDE_DIR}"
+ "-DLINK_LIBRARIES=${TRIXEngine_LIB}"
+)
+
+try_run(EXTRA_VER EXTRA_COMPILABLE "${CMAKE_BINARY_DIR}/TRIXEngineConfigVersion.extra"
+ SOURCES "${CMAKE_CURRENT_LIST_DIR}/TRIXEngineConfigVersion.extra.cpp"
+ CMAKE_FLAGS
+ "-DINCLUDE_DIRECTORIES=${TRIXEngine_INCLUDE_DIR}"
+ "-DLINK_LIBRARIES=${TRIXEngine_LIB}"
+)
+
+macro(assert)
+ # if(NOT ${ARGV}) makes error when ARGV starts with 'NOT'
+ if(${ARGV})
+ # Do nothing
+ else(${ARGV})
+ message(FATAL_ERROR "Internal error ${ARGV}")
+ endif(${ARGV})
+endmacro(assert)
+
+assert(MAJOR_COMPILABLE)
+assert(MINOR_COMPILABLE)
+assert(EXTRA_COMPILABLE)
+assert(NOT MAJOR_VER STREQUAL FAILED_TO_RUN)
+assert(NOT MINOR_VER STREQUAL FAILED_TO_RUN)
+assert(NOT EXTRA_VER STREQUAL FAILED_TO_RUN)
+
+set(PACKAGE_VERSION ${MAJOR_VER}.${MINOR_VER}.${EXTRA_VER})
+
+if(PACKAGE_VERSION VERSION_EQUAL PACKAGE_FIND_VERSION)
+ set(PACKAGE_VERSION_EXACT TRUE)
+else()
+ set(PACKAGE_VERSION_EXACT FALSE)
+endif()
+
+# Assume TRIX Engine is backward compatible
+if(PACKAGE_VERSION VERSION_GREATER_EQUAL PACKAGE_FIND_VERSION)
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+else()
+ set(PACKAGE_VERSION_COMPATIBLE FALSE)
+endif()
+
+set(PACKAGE_VERSION_UNSUITABLE FALSE)
diff --git a/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.extra.cpp b/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.extra.cpp
new file mode 100644
index 000000000..05fe70ddb
--- /dev/null
+++ b/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.extra.cpp
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <libnpuhost.h>
+
+int main(void)
+{
+ uint32_t ret = 0;
+ getVersion(nullptr, nullptr, &ret);
+ return ret;
+}
diff --git a/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.major.cpp b/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.major.cpp
new file mode 100644
index 000000000..a3de06d65
--- /dev/null
+++ b/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.major.cpp
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <libnpuhost.h>
+
+int main(void)
+{
+ uint32_t ret = 0;
+ getVersion(&ret, nullptr, nullptr);
+ return ret;
+}
diff --git a/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.minor.cpp b/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.minor.cpp
new file mode 100644
index 000000000..1193a5c18
--- /dev/null
+++ b/infra/nnfw/cmake/packages/TRIXEngineConfigVersion.minor.cpp
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <libnpuhost.h>
+
+int main(void)
+{
+ uint32_t ret = 0;
+ getVersion(nullptr, &ret, nullptr);
+ return ret;
+}
diff --git a/infra/nnfw/cmake/packages/TensorFlowEigen-1.13.1/TensorFlowEigenConfig.cmake b/infra/nnfw/cmake/packages/TensorFlowEigen-1.13.1/TensorFlowEigenConfig.cmake
deleted file mode 100644
index 253b290bd..000000000
--- a/infra/nnfw/cmake/packages/TensorFlowEigen-1.13.1/TensorFlowEigenConfig.cmake
+++ /dev/null
@@ -1,19 +0,0 @@
-function(_Eigen_import)
- nnas_find_package(EigenSource QUIET)
-
- if(NOT EigenSource_FOUND)
- set(TensorFlowEigen_1_13_1_FOUND FALSE PARENT_SCOPE)
- return()
- endif(NOT EigenSource_FOUND)
-
- if(NOT TARGET eigen-tf-1.13.1)
- add_library(eigen-tf-1.13.1 INTERFACE)
- target_include_directories(eigen-tf-1.13.1 SYSTEM INTERFACE "${EigenSource_DIR}")
- # Add EIGEN_MPL2_ONLY to remove license issue posibility
- target_compile_definitions(eigen-tf-1.13.1 INTERFACE EIGEN_MPL2_ONLY)
- endif(NOT TARGET eigen-tf-1.13.1)
-
- set(TensorFlowEigen_1_13_1_FOUND TRUE PARENT_SCOPE)
-endfunction(_Eigen_import)
-
-_Eigen_import()
diff --git a/infra/nnfw/cmake/packages/TensorFlowEigen-1.13.1/TensorFlowEigenConfigVersion.cmake b/infra/nnfw/cmake/packages/TensorFlowEigen-1.13.1/TensorFlowEigenConfigVersion.cmake
deleted file mode 100644
index ed79ecd91..000000000
--- a/infra/nnfw/cmake/packages/TensorFlowEigen-1.13.1/TensorFlowEigenConfigVersion.cmake
+++ /dev/null
@@ -1,9 +0,0 @@
-set(PACKAGE_VERSION "1.13.1")
-set(PACKAGE_VERSION_EXACT FALSE)
-set(PACKAGE_VERSION_COMPATIBLE FALSE)
-set(PACKAGE_VERSION_UNSUITABLE TRUE)
-
-if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
- set(PACKAGE_VERSION_EXACT TRUE)
- set(PACKAGE_VERSION_UNSUITABLE FALSE)
-endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/nnfw/cmake/packages/TensorFlowGpuConfig.cmake b/infra/nnfw/cmake/packages/TensorFlowGpuConfig.cmake
new file mode 100644
index 000000000..5d20dd3c4
--- /dev/null
+++ b/infra/nnfw/cmake/packages/TensorFlowGpuConfig.cmake
@@ -0,0 +1,51 @@
+# TensorFlowGpuConfig.cmake
+macro(return_unless VAR)
+if(NOT ${VAR})
+ message("TensorFlowGpu: ${VAR} NOT TRUE")
+ set(TensorFlowGpu_FOUND FALSE PARENT_SCOPE)
+ return()
+endif(NOT ${VAR})
+endmacro(return_unless)
+
+function(_Build_TfliteGpuDelagate_)
+ nnas_find_package(TensorFlowSource EXACT 2.8.0 QUIET)
+ return_unless(TensorFlowSource_FOUND)
+
+ nnas_find_package(TensorFlowGEMMLowpSource EXACT 2.8.0 QUIET)
+ return_unless(TensorFlowGEMMLowpSource_FOUND)
+
+ nnas_find_package(TensorFlowEigenSource EXACT 2.8.0 QUIET)
+ return_unless(TensorFlowEigenSource_FOUND)
+
+ nnas_find_package(AbseilSource REQUIRED)
+ return_unless(AbseilSource_FOUND)
+
+ nnas_find_package(Farmhash REQUIRED)
+ return_unless(Farmhash_FOUND)
+
+ nnas_find_package(Fp16Source REQUIRED)
+ return_unless(Fp16Source_FOUND)
+
+ nnas_find_package(VulkanSource QUIET)
+ return_unless(VulkanSource_FOUND)
+
+ nnas_find_package(Opengl_HeadersSource QUIET)
+ return_unless(Opengl_HeadersSource_FOUND)
+
+ nnas_find_package(Egl_HeadersSource QUIET)
+ return_unless(Egl_HeadersSource_FOUND)
+
+ if(NOT TARGET TensorFlowGpu)
+ nnas_include(ExternalProjectTools)
+ add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/TensorFlowLiteGpu" TensorFlowLiteGpu)
+ endif()
+ set(TensorFlowSource_DIR ${TensorFlowSource_DIR} PARENT_SCOPE)
+ set(TensorFlowGpu_DIR ${TensorFlowGpu_DIR} PARENT_SCOPE)
+endfunction(_Build_TfliteGpuDelagate_)
+
+if(BUILD_TENSORFLOW_LITE_GPU)
+ _Build_TfliteGpuDelagate_()
+ set(TensorFlowGpu_FOUND TRUE PARENT_SCOPE)
+else(BUILD_TENSORFLOW_LITE_GPU)
+ set(TensorFlowGpu_FOUND FALSE PARENT_SCOPE)
+endif(BUILD_TENSORFLOW_LITE_GPU)
diff --git a/infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLite/CMakeLists.txt b/infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLite/CMakeLists.txt
deleted file mode 100644
index 2c9618d68..000000000
--- a/infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLite/CMakeLists.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-set(TENSORFLOW_LITE_BASE ${TensorFlowSource_DIR}/tensorflow/lite)
-
-#
-# Tensorflow Lite library
-#
-file(GLOB TFLITE_CORE_SRCS "${TENSORFLOW_LITE_BASE}/*.c" "${TENSORFLOW_LITE_BASE}/*.cc" "${TENSORFLOW_LITE_BASE}/core/*.cc")
-file(GLOB TFLITE_CORE_TESTS "${TENSORFLOW_LITE_BASE}/*test*.cc")
-list(REMOVE_ITEM TFLITE_CORE_SRCS ${TFLITE_CORE_TESTS})
-
-file(GLOB_RECURSE TFLITE_KERNEL_SRCS "${TENSORFLOW_LITE_BASE}/kernels/*.cc")
-file(GLOB_RECURSE TFLITE_KERNEL_TESTS "${TENSORFLOW_LITE_BASE}/kernels/*test*.cc")
-list(REMOVE_ITEM TFLITE_KERNEL_SRCS ${TFLITE_KERNEL_TESTS})
-
-file(GLOB TFLITE_LIB_SRCS "${TENSORFLOW_LITE_BASE}/c/*.c" "${TENSORFLOW_LITE_BASE}/c/*.cc")
-file(GLOB TFLITE_LIB_TESTS "${TENSORFLOW_LITE_BASE}/c/*test*.cc")
-list(REMOVE_ITEM TFLITE_LIB_SRCS ${TFLITE_LIB_TESTS})
-
-file(GLOB TFLITE_API_SRCS "${TENSORFLOW_LITE_BASE}/core/api/*.c" "${TENSORFLOW_LITE_BASE}/core/api/*.cc")
-file(GLOB TFLITE_API_TESTS "${TENSORFLOW_LITE_BASE}/core/api/*test*.cc")
-list(REMOVE_ITEM TFLITE_API_SRCS ${TFLITE_API_TESTS})
-
-file(GLOB TFLITE_PROFILING_SRCS "${TENSORFLOW_LITE_BASE}/profiling/*.cc")
-file(GLOB TFLITE_PROFILING_TESTS "${TENSORFLOW_LITE_BASE}/profiling/*test*.cc")
-list(REMOVE_ITEM TFLITE_PROFILING_SRCS ${TFLITE_PROFILING_TESTS})
-
-# We will use our own BuiltinOpResolver
-list(REMOVE_ITEM TFLITE_KERNEL_SRCS "${TENSORFLOW_LITE_BASE}/kernels/register.cc")
-# We will use our own summarizer
-list(REMOVE_ITEM TFLITE_PROFILING_SRCS "${TENSORFLOW_LITE_BASE}/profiling/profile_summarizer.cc")
-list(APPEND TFLITE_SRCS ${TFLITE_CORE_SRCS})
-list(APPEND TFLITE_SRCS ${TFLITE_KERNEL_SRCS})
-list(APPEND TFLITE_SRCS ${TFLITE_LIB_SRCS})
-list(APPEND TFLITE_SRCS ${TFLITE_API_SRCS})
-list(APPEND TFLITE_SRCS ${TFLITE_PROFILING_SRCS})
-
-list(APPEND TFLITE_SRCS "${FarmhashSource_DIR}/src/farmhash.cc")
-
-list(APPEND TFLITE_INCLUDES "${TensorFlowSource_DIR}")
-list(APPEND TFLITE_INCLUDES "${AbseilSource_DIR}")
-list(APPEND TFLITE_INCLUDES "${GEMMLowpSource_DIR}")
-list(APPEND TFLITE_INCLUDES "${FarmhashSource_DIR}/src")
-
-if(NEON2SSESource_FOUND)
- list(APPEND TFLITE_INCLUDES "${NEON2SSESource_DIR}")
-endif(NEON2SSESource_FOUND)
-
-# This kernels are not used on nnfw
-## spectrogram
-list(REMOVE_ITEM TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/kernels/audio_spectrogram.cc")
-list(REMOVE_ITEM TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/kernels/audio_spectrogram_test.cc")
-list(REMOVE_ITEM TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/kernels/internal/spectrogram.cc")
-
-add_library(tensorflow-lite STATIC ${TFLITE_SRCS})
-target_include_directories(tensorflow-lite SYSTEM PUBLIC ${TFLITE_INCLUDES})
-target_compile_definitions(tensorflow-lite PUBLIC "GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK")
-set_property(TARGET tensorflow-lite PROPERTY POSITION_INDEPENDENT_CODE ON)
-target_link_libraries(tensorflow-lite eigen-tf-1.13.1 flatbuffers::flatbuffers ${LIB_PTHREAD} dl)
-
-if(ANDROID)
- target_link_libraries(tensorflow-lite log)
- target_include_directories(tensorflow-lite PUBLIC "${NDK_DIR}/..")
-endif()
diff --git a/infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfig.cmake b/infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfig.cmake
deleted file mode 100644
index 4cd7610e6..000000000
--- a/infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfig.cmake
+++ /dev/null
@@ -1,73 +0,0 @@
-# NOTE This line prevents multiple definitions of tensorflow-lite target
-if(TARGET tensorflow-lite)
- set(TensorFlowLite_FOUND TRUE)
- return()
-endif(TARGET tensorflow-lite)
-
-if(BUILD_TENSORFLOW_LITE)
- macro(return_unless VAR)
- if(NOT ${VAR})
- set(TensorFlowLite_FOUND PARENT_SCOPE)
- return()
- endif(NOT ${VAR})
- endmacro(return_unless)
-
- # Required packages
- nnas_find_package(AbseilSource QUIET)
- return_unless(AbseilSource_FOUND)
- nnfw_find_package(TensorFlowEigen EXACT 1.13.1 QUIET)
- return_unless(TensorFlowEigen_1_13_1_FOUND)
- nnfw_find_package(FarmhashSource QUIET)
- return_unless(FarmhashSource_FOUND)
- nnfw_find_package(FlatBuffers QUIET)
- return_unless(FlatBuffers_FOUND)
- nnfw_find_package(GEMMLowpSource QUIET)
- return_unless(GEMMLowpSource_FOUND)
- nnas_find_package(TensorFlowSource EXACT 1.13.1 QUIET)
- return_unless(TensorFlowSource_FOUND)
-
- # Optional packages
- nnfw_find_package(NEON2SSESource QUIET)
-
- nnas_include(ExternalProjectTools)
- add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/TensorFlowLite" tflite)
-
- set(TensorFlowLite_FOUND TRUE)
- return()
-endif(BUILD_TENSORFLOW_LITE)
-
-# Use pre-built TensorFlow Lite
-find_path(TFLITE_INCLUDE_DIR NAMES tensorflow/lite/interpreter.h)
-find_library(TFLITE_LIB NAMES tensorflow-lite)
-
-if(NOT TFLITE_INCLUDE_DIR)
- set(TensorFlowLite_FOUND FALSE)
- return()
-endif(NOT TFLITE_INCLUDE_DIR)
-
-if(NOT TFLITE_LIB)
- set(TensorFlowLite_FOUND FALSE)
- return()
-endif(NOT TFLITE_LIB)
-
-message(STATUS "Found TensorFlow Lite: TRUE (include: ${TFLITE_INCLUDE_DIR}, lib: ${TFLITE_LIB}")
-
-# TODO Use IMPORTED target
-add_library(tensorflow-lite INTERFACE)
-target_include_directories(tensorflow-lite SYSTEM INTERFACE ${TFLITE_INCLUDE_DIR})
-target_link_libraries(tensorflow-lite INTERFACE ${TFLITE_LIB})
-find_package(Flatbuffers)
-if(Flatbuffers_FOUND)
- target_link_libraries(tensorflow-lite INTERFACE flatbuffers::flatbuffers)
-endif(Flatbuffers_FOUND)
-
-# Prefer -pthread to -lpthread
-set(THREADS_PREFER_PTHREAD_FLAG TRUE)
-set(CMAKE_THREAD_PREFER_PTHREAD TRUE)
-find_package(Threads QUIET)
-
-if(Threads_FOUND)
- target_link_libraries(tensorflow-lite INTERFACE ${CMAKE_THREAD_LIBS_INIT})
-endif(Threads_FOUND)
-
-set(TensorFlowLite_FOUND TRUE)
diff --git a/infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfigVersion.cmake b/infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfigVersion.cmake
deleted file mode 100644
index ed79ecd91..000000000
--- a/infra/nnfw/cmake/packages/TensorFlowLite-1.13.1/TensorFlowLiteConfigVersion.cmake
+++ /dev/null
@@ -1,9 +0,0 @@
-set(PACKAGE_VERSION "1.13.1")
-set(PACKAGE_VERSION_EXACT FALSE)
-set(PACKAGE_VERSION_COMPATIBLE FALSE)
-set(PACKAGE_VERSION_UNSUITABLE TRUE)
-
-if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
- set(PACKAGE_VERSION_EXACT TRUE)
- set(PACKAGE_VERSION_UNSUITABLE FALSE)
-endif(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
diff --git a/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0/CMakeLists.txt b/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0/CMakeLists.txt
deleted file mode 100644
index 20547b92d..000000000
--- a/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0/CMakeLists.txt
+++ /dev/null
@@ -1,123 +0,0 @@
-# Reference: https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/lite/tools/make/Makefile
-#
-# Tensorflow Lite library 2.3.0
-#
-set(TENSORFLOW_LITE_BASE ${TFLiteVanillaTensorFlowSource_DIR}/tensorflow/lite)
-
-file(GLOB TFLITE_CORE_SRCS "${TENSORFLOW_LITE_BASE}/*.c"
- "${TENSORFLOW_LITE_BASE}/*.cc"
- "${TENSORFLOW_LITE_BASE}/core/*.cc")
-
-file(GLOB_RECURSE TFLITE_KERNEL_SRCS "${TENSORFLOW_LITE_BASE}/kernels/*.cc")
-
-file(GLOB TFLITE_LIB_SRCS "${TENSORFLOW_LITE_BASE}/c/*.c" "${TENSORFLOW_LITE_BASE}/c/*.cc")
-
-file(GLOB TFLITE_API_SRCS "${TENSORFLOW_LITE_BASE}/core/api/*.c"
- "${TENSORFLOW_LITE_BASE}/core/api/*.cc")
-
-list(APPEND TFLITE_PROFILING_SRCS "${TENSORFLOW_LITE_BASE}/profiling/memory_info.cc")
-list(APPEND TFLITE_PROFILING_SRCS "${TENSORFLOW_LITE_BASE}/profiling/time.cc")
-
-file(GLOB TFLITE_EXPERIMENTAL_SRCS "${TENSORFLOW_LITE_BASE}/experimental/resource/*.cc")
-
-file(GLOB TFLITE_SPARSITY_SRCS "${TENSORFLOW_LITE_BASE}/tools/optimize/sparsity/*.cc")
-
-list(APPEND TFLITE_SRCS ${TFLITE_CORE_SRCS})
-list(APPEND TFLITE_SRCS ${TFLITE_KERNEL_SRCS})
-list(APPEND TFLITE_SRCS ${TFLITE_LIB_SRCS})
-list(APPEND TFLITE_SRCS ${TFLITE_API_SRCS})
-list(APPEND TFLITE_SRCS ${TFLITE_PROFILING_SRCS})
-list(APPEND TFLITE_SRCS ${TFLITE_EXPERIMENTAL_SRCS})
-list(APPEND TFLITE_SRCS ${TFLITE_SPARSITY_SRCS})
-
-# externals
-list(APPEND TFLITE_SRCS "${TFLiteVanillaFarmhashSource_DIR}/src/farmhash.cc")
-list(APPEND TFLITE_SRCS "${TFLiteVanillaFFT2DSource_DIR}/fftsg.c")
-list(APPEND TFLITE_SRCS "${TFLiteVanillaFFT2DSource_DIR}/fftsg2d.c")
-list(APPEND TFLITE_SRCS "${TFLiteVanillaFlatBuffersSource_DIR}/src/util.cpp")
-
-# externals - absl
-file(GLOB_RECURSE ABSL_SRCS "${TFLiteVanillaAbslSource_DIR}/absl/*.cc")
-file(GLOB_RECURSE ABSL_EXCLS "${TFLiteVanillaAbslSource_DIR}/absl/*test*.cc"
- "${TFLiteVanillaAbslSource_DIR}/absl/*benchmark*.cc"
- "${TFLiteVanillaAbslSource_DIR}/absl/synchronization/*.cc"
- "${TFLiteVanillaAbslSource_DIR}/absl/debugging/*.cc"
- "${TFLiteVanillaAbslSource_DIR}/absl/hash/*.cc"
- "${TFLiteVanillaAbslSource_DIR}/absl/flags/*.cc"
- "${TFLiteVanillaAbslSource_DIR}/absl/random/*.cc")
-list(REMOVE_ITEM ABSL_SRCS ${ABSL_EXCLS})
-list(APPEND TFLITE_SRCS ${ABSL_SRCS})
-
-# externals - ruy
-file(GLOB RUY_SRCS "${TFLiteVanillaRuySource_DIR}/ruy/*.cc")
-file(GLOB_RECURSE RUY_EXCLS "${TFLiteVanillaRuySource_DIR}/ruy/*test*.cc"
- "${TFLiteVanillaRuySource_DIR}/ruy/*benchmark*.cc"
- "${TFLiteVanillaRuySource_DIR}/ruy/*example*.cc")
-list(REMOVE_ITEM RUY_SRCS ${RUY_EXCLS})
-# Temporary fix for ruy compilation error.
-# TODO(b/158800055): Remove this hack once the ruy version is correctly bumped.
-list(REMOVE_ITEM RUY_SRCS "${TFLiteVanillaRuySource_DIR}/ruy/prepare_packed_matrices.cc")
-list(APPEND TFLITE_SRCS ${RUY_SRCS})
-
-
-# Build with mmap? true
-# caution: v2.3.0's Makefile has wrong code on this part. This is fixed on master branch.
-set(BUILD_WITH_MMAP TRUE)
-if(${BUILD_WITH_MMAP})
- list(REMOVE_ITEM TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/mmap_allocation_disabled.cc")
-else()
- list(REMOVE_ITEM TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/mmap_allocation.cc")
-endif()
-
-# Build with nnapi? true
-# caution: this nnapi delegate comes from tflite, not ours.
-set(BUILD_WITH_NNAPI TRUE)
-if(${BUILD_WITH_NNAPI})
- list(APPEND TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/delegates/nnapi/nnapi_delegate.cc")
- list(APPEND TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/delegates/nnapi/quant_lstm_sup.cc")
- list(APPEND TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/nnapi/nnapi_implementation.cc")
- list(APPEND TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/nnapi/nnapi_util.cc")
-else()
- list(APPEND TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/delegates/nnapi/nnapi_delegate_disabled.cc")
- list(APPEND TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/nnapi/nnapi_implementation_disabled.cc")
-endif()
-
-# ios: we don't support ios
-list(REMOVE_ITEM TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/minimal_logging_ios.cc")
-
-# android
-if(NOT ANDROID)
- list(REMOVE_ITEM TFLITE_SRCS "${TENSORFLOW_LITE_BASE}/minimal_logging_android.cc")
-endif()
-
-# exclude some source files
-file(GLOB_RECURSE TFLITE_EXCLS "${TENSORFLOW_LITE_BASE}/*test*.cc"
- "${TENSORFLOW_LITE_BASE}/*benchmark*.cc"
- "${TENSORFLOW_LITE_BASE}/*example*.cc"
- "${TENSORFLOW_LITE_BASE}/*tool*.cc")
-list(REMOVE_ITEM TFLITE_SRCS ${TFLITE_EXCLS})
-
-# include headers
-list(APPEND TFLITE_INCLUDES "${TFLiteVanillaTensorFlowSource_DIR}")
-list(APPEND TFLITE_INCLUDES "${TFLiteVanillaEigenSource_DIR}")
-list(APPEND TFLITE_INCLUDES "${TFLiteVanillaAbslSource_DIR}")
-list(APPEND TFLITE_INCLUDES "${TFLiteVanillaGEMMLowpSource_DIR}")
-list(APPEND TFLITE_INCLUDES "${TFLiteVanillaNEON2SSESource_DIR}")
-list(APPEND TFLITE_INCLUDES "${TFLiteVanillaFarmhashSource_DIR}/src")
-list(APPEND TFLITE_INCLUDES "${TFLiteVanillaFlatBuffersSource_DIR}/include")
-list(APPEND TFLITE_INCLUDES "${TFLiteVanillaFP16Source_DIR}/include")
-list(APPEND TFLITE_INCLUDES "${TFLiteVanillaRuySource_DIR}")
-
-add_library(tensorflow-lite-2.3.0 STATIC ${TFLITE_SRCS})
-target_include_directories(tensorflow-lite-2.3.0 SYSTEM PUBLIC ${TFLITE_INCLUDES})
-target_compile_definitions(tensorflow-lite-2.3.0 PUBLIC "GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DTFLITE_WITH_RUY -DTFLITE_WITH_RUY_GEMV")
-set_property(TARGET tensorflow-lite-2.3.0 PROPERTY POSITION_INDEPENDENT_CODE ON)
-target_link_libraries(tensorflow-lite-2.3.0 eigen ${LIB_PTHREAD} dl)
-if(NOT ANDROID AND ${BUILD_WITH_NNAPI})
- target_link_libraries(tensorflow-lite-2.3.0 rt)
-endif()
-
-if(ANDROID)
- target_link_libraries(tensorflow-lite-2.3.0 log)
- target_include_directories(tensorflow-lite-2.3.0 PUBLIC "${NDK_DIR}/..")
-endif()
diff --git a/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0Config.cmake b/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0Config.cmake
deleted file mode 100644
index d00ca96a6..000000000
--- a/infra/nnfw/cmake/packages/TensorFlowLite-2.3.0Config.cmake
+++ /dev/null
@@ -1,100 +0,0 @@
-if(BUILD_TENSORFLOW_LITE_2_3_0)
- macro(return_unless VAR)
- if(NOT ${VAR})
- message("${VAR} NOT TRUE")
- set(TensorFlowLite_2_3_0_FOUND PARENT_SCOPE)
- return()
- endif(NOT ${VAR})
- endmacro(return_unless)
-
- nnas_include(ExternalSourceTools)
- nnas_include(OptionTools)
-
- # Below urls come from https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/lite/tools/make/Makefile
-
- set(absl_url "https://github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz")
- ExternalSource_Download("TFLiteVanilla_Absl" ${absl_url})
- set(TFLiteVanillaAbslSource_DIR "${TFLiteVanilla_Absl_SOURCE_DIR}")
- if (NOT TFLiteVanillaAbslSource_DIR STREQUAL "")
- set(TFLiteVanillaAbslSource_FOUND TRUE)
- endif()
- return_unless(TFLiteVanillaAbslSource_FOUND)
-
- set(eigen_url "https://gitlab.com/libeigen/eigen/-/archive/386d809bde475c65b7940f290efe80e6a05878c4/eigen-386d809bde475c65b7940f290efe80e6a05878c4.tar.gz")
- ExternalSource_Download("TFLiteVanilla_Eigen" ${eigen_url})
- set(TFLiteVanillaEigenSource_DIR "${TFLiteVanilla_Eigen_SOURCE_DIR}")
- if (NOT TFLiteVanillaEigenSource_DIR STREQUAL "")
- set(TFLiteVanillaEigenSource_FOUND TRUE)
- endif()
- return_unless(TFLiteVanillaEigenSource_FOUND)
-
- set(farmhash_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz")
- ExternalSource_Download("TFLiteVanilla_Farmhash" ${farmhash_url})
- set(TFLiteVanillaFarmhashSource_DIR "${TFLiteVanilla_Farmhash_SOURCE_DIR}")
- if (NOT TFLiteVanillaFarmhashSource_DIR STREQUAL "")
- set(TFLiteVanillaFarmhashSource_FOUND TRUE)
- endif()
- return_unless(TFLiteVanillaFarmhashSource_FOUND)
-
- set(fft2d_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/petewarden/OouraFFT/archive/v1.0.tar.gz")
- ExternalSource_Download("TFLiteVanilla_FFT2D" ${fft2d_url})
- set(TFLiteVanillaFFT2DSource_DIR "${TFLiteVanilla_FFT2D_SOURCE_DIR}")
- if (NOT TFLiteVanillaFFT2DSource_DIR STREQUAL "")
- set(TFLiteVanillaFFT2DSource_FOUND TRUE)
- endif()
- return_unless(TFLiteVanillaFFT2DSource_FOUND)
-
- set(flatbuffers_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/flatbuffers/archive/v1.12.0.tar.gz")
- ExternalSource_Download("TFLiteVanilla_FlatBuffers" ${flatbuffers_url})
- set(TFLiteVanillaFlatBuffersSource_DIR "${TFLiteVanilla_FlatBuffers_SOURCE_DIR}")
- if (NOT TFLiteVanillaFlatBuffersSource_DIR STREQUAL "")
- set(TFLiteVanillaFlatBuffersSource_FOUND TRUE)
- endif()
- return_unless(TFLiteVanillaFlatBuffersSource_FOUND)
-
- set(fp16_url "https://github.com/Maratyszcza/FP16/archive/4dfe081cf6bcd15db339cf2680b9281b8451eeb3.zip")
- ExternalSource_Download("TFLiteVanilla_FP16" ${fp16_url})
- set(TFLiteVanillaFP16Source_DIR "${TFLiteVanilla_FP16_SOURCE_DIR}")
- if (NOT TFLiteVanillaFP16Source_DIR STREQUAL "")
- set(TFLiteVanillaFP16Source_FOUND TRUE)
- endif()
- return_unless(TFLiteVanillaFP16Source_FOUND)
-
- set(gemmlowp_url "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip")
- ExternalSource_Download("TFLiteVanilla_GEMMLowp" ${gemmlowp_url})
- set(TFLiteVanillaGEMMLowpSource_DIR "${TFLiteVanilla_GEMMLowp_SOURCE_DIR}")
- if (NOT TFLiteVanillaGEMMLowpSource_DIR STREQUAL "")
- set(TFLiteVanillaGEMMLowpSource_FOUND TRUE)
- endif()
- return_unless(TFLiteVanillaGEMMLowpSource_FOUND)
-
- set(neon2sse_url "https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz")
- ExternalSource_Download("TFLiteVanilla_NEON2SSE" ${neon2sse_url})
- set(TFLiteVanillaNEON2SSESource_DIR "${TFLiteVanilla_NEON2SSE_SOURCE_DIR}")
- if (NOT TFLiteVanillaNEON2SSESource_DIR STREQUAL "")
- set(TFLiteVanillaNEON2SSESource_FOUND TRUE)
- endif()
- return_unless(TFLiteVanillaNEON2SSESource_FOUND)
-
- set(tensorflow_url "https://github.com/tensorflow/tensorflow/archive/v2.3.0.tar.gz")
- ExternalSource_Download("TFLiteVanilla_TensorFlow" ${tensorflow_url})
- set(TFLiteVanillaTensorFlowSource_DIR "${TFLiteVanilla_TensorFlow_SOURCE_DIR}")
- if (NOT TFLiteVanillaTensorFlowSource_DIR STREQUAL "")
- set(TFLiteVanillaTensorFlowSource_FOUND TRUE)
- endif()
- return_unless(TFLiteVanillaTensorFlowSource_FOUND)
-
- set(ruy_url "https://github.com/google/ruy/archive/34ea9f4993955fa1ff4eb58e504421806b7f2e8f.zip")
- ExternalSource_Download("TFLiteVanilla_Ruy" ${ruy_url})
- set(TFLiteVanillaRuySource_DIR "${TFLiteVanilla_Ruy_SOURCE_DIR}")
- if (NOT TFLiteVanillaRuySource_DIR STREQUAL "")
- set(TFLiteVanillaRuySource_FOUND TRUE)
- endif()
- return_unless(TFLiteVanillaRuySource_FOUND)
-
- nnas_include(ExternalProjectTools)
- add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/TensorFlowLite-2.3.0" tflite-2.3.0)
-
- set(TensorFlowLite_2_3_0_FOUND TRUE)
- return()
-endif()
diff --git a/infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLite/CMakeLists.txt b/infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLite/CMakeLists.txt
new file mode 100644
index 000000000..cbc10d279
--- /dev/null
+++ b/infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLite/CMakeLists.txt
@@ -0,0 +1,185 @@
+# Reference: https://github.com/tensorflow/tensorflow/blob/v2.8.0/tensorflow/lite/CMakeLists.txt
+#
+# Tensorflow Lite library 2.8.0
+#
+set(TFLITE_SOURCE_DIR ${TensorFlowSource_DIR}/tensorflow/lite)
+
+# Generate TensorFlow Lite FlatBuffer code.
+# We used to have an actual compilation logic with flatc but decided to use
+# schema_generated.h since flatc doesn't work with cross compilation.
+set(TFLITE_FLATBUFFERS_SCHEMA_DIR "${TFLITE_SOURCE_DIR}/schema")
+
+macro(populate_source_vars SOURCE_DIR SOURCES_VAR)
+ cmake_parse_arguments(ARGS "RECURSE" "" "FILTER" ${ARGN})
+ if(ARGS_RECURSE)
+ set(GLOB_OP GLOB_RECURSE)
+ else()
+ set(GLOB_OP GLOB)
+ endif()
+ set(DEFAULT_FILE_FILTER ".*(_test|test_util)\\.(c|cc|h)$")
+ file(${GLOB_OP} FOUND_SOURCES "${SOURCE_DIR}/*.*")
+ list(FILTER FOUND_SOURCES INCLUDE REGEX ".*\\.(c|cc|h)$")
+ list(FILTER FOUND_SOURCES EXCLUDE REGEX "${DEFAULT_FILE_FILTER}")
+ foreach(FILE_FILTER ${ARGS_FILTER})
+ list(FILTER FOUND_SOURCES EXCLUDE REGEX "${FILE_FILTER}")
+ endforeach()
+ list(APPEND ${SOURCES_VAR} ${FOUND_SOURCES})
+endmacro()
+# Simplifies inclusion of non-test sources and headers from a directory
+# relative to TFLITE_SOURCE_DIR. See populate_source_vars() for the
+# description of arguments including and following SOURCES_VAR.
+macro(populate_tflite_source_vars RELATIVE_DIR SOURCES_VAR)
+ populate_source_vars(
+ "${TFLITE_SOURCE_DIR}/${RELATIVE_DIR}" ${SOURCES_VAR} ${ARGN}
+ )
+endmacro()
+
+# Build a list of source files to compile into the TF Lite library.
+populate_tflite_source_vars("." TFLITE_SRCS)
+
+# This particular file is excluded because the more explicit approach to enable
+# XNNPACK delegate is preferred to the weak-symbol one.
+list(FILTER TFLITE_SRCS EXCLUDE REGEX ".*tflite_with_xnnpack\\.cc$")
+
+# Exclude Flex related files.
+list(FILTER TFLITE_SRCS EXCLUDE REGEX ".*with_selected_ops\\.cc$")
+
+# Use MMAP
+list(FILTER TFLITE_SRCS EXCLUDE REGEX ".*mmap_allocation_disabled\\.cc$")
+
+if(NOT "${CMAKE_SYSTEM_NAME}" STREQUAL "Android")
+ list(FILTER TFLITE_SRCS EXCLUDE REGEX ".*minimal_logging_android\\.cc$")
+endif()
+if(NOT "${CMAKE_SYSTEM_NAME}" STREQUAL "iOS")
+ list(FILTER TFLITE_SRCS EXCLUDE REGEX ".*minimal_logging_ios\\.cc$")
+endif()
+
+populate_tflite_source_vars("core" TFLITE_CORE_SRCS)
+populate_tflite_source_vars("core/api" TFLITE_CORE_API_SRCS)
+populate_tflite_source_vars("c" TFLITE_C_SRCS)
+populate_tflite_source_vars("delegates" TFLITE_DELEGATES_SRCS)
+
+# Enable NNAPI
+populate_tflite_source_vars("delegates/nnapi"
+TFLITE_DELEGATES_NNAPI_SRCS
+FILTER "(_test_list|_disabled)\\.(cc|h)$"
+)
+populate_tflite_source_vars(
+"nnapi" TFLITE_NNAPI_SRCS FILTER "(_disabled)\\.(cc|h)$"
+)
+
+# Disable XNNPack
+
+# Enable experimental support for resource (need for build success)
+populate_tflite_source_vars("experimental/resource"
+TFLITE_EXPERIMENTAL_RESOURCE_SRCS
+)
+
+# Enable Ruy
+populate_tflite_source_vars("experimental/ruy"
+ TFLITE_EXPERIMENTAL_RUY_SRCS
+ FILTER
+ ".*(test(_fast|_slow|_special_specs))\\.(cc|h)$"
+ ".*(benchmark|tune_tool|example)\\.(cc|h)$"
+)
+populate_tflite_source_vars("experimental/ruy/profiler"
+ TFLITE_EXPERIMENTAL_RUY_PROFILER_SRCS
+ FILTER ".*(test|test_instrumented_library)\\.(cc|h)$"
+)
+list(APPEND TFLITE_TARGET_PUBLIC_OPTIONS "-DTFLITE_WITH_RUY")
+
+populate_tflite_source_vars("kernels"
+ TFLITE_KERNEL_SRCS
+ FILTER "(.*_test_util_internal|test_.*|.*_ops_wrapper)\\.(cc|h)"
+)
+populate_tflite_source_vars("kernels/internal" TFLITE_KERNEL_INTERNAL_SRCS)
+populate_tflite_source_vars("kernels/internal/optimized"
+ TFLITE_KERNEL_INTERNAL_OPT_SRCS
+)
+populate_tflite_source_vars("kernels/internal/optimized/integer_ops"
+ TFLITE_KERNEL_INTERNAL_OPT_INTEGER_OPS_SRCS
+)
+populate_tflite_source_vars("kernels/internal/optimized/sparse_ops"
+ TFLITE_KERNEL_INTERNAL_OPT_SPARSE_OPS_SRCS
+)
+populate_tflite_source_vars("kernels/internal/reference"
+ TFLITE_KERNEL_INTERNAL_REF_SRCS
+)
+populate_tflite_source_vars("kernels/internal/reference/integer_ops"
+ TFLITE_KERNEL_INTERNAL_REF_INTEGER_OPS_SRCS
+)
+populate_tflite_source_vars("kernels/internal/reference/sparse_ops"
+ TFLITE_KERNEL_INTERNAL_REF_SPARSE_OPS_SRCS
+)
+set(TFLITE_PROFILER_SRCS ${TFLITE_SOURCE_DIR}/profiling/platform_profiler.cc)
+if(CMAKE_SYSTEM_NAME MATCHES "Android")
+ list(APPEND TFLITE_PROFILER_SRCS
+ ${TFLITE_SOURCE_DIR}/profiling/atrace_profiler.cc
+ )
+endif()
+
+# Common include directories
+set(TFLITE_INCLUDE_DIRS
+ "${TENSORFLOW_SOURCE_DIR}"
+ "${TFLITE_FLATBUFFERS_SCHEMA_DIR}"
+)
+
+# include headers
+list(APPEND TFLITE_INCLUDE_DIRS "${TensorFlowSource_DIR}")
+list(APPEND TFLITE_INCLUDE_DIRS "${TensorFlowGEMMLowpSource_DIR}")
+list(APPEND TFLITE_INCLUDE_DIRS "${Fp16Source_DIR}/include")
+#list(APPEND TFLITE_INCLUDE_DIRS "${Pybind11Source_DIR}/include")
+list(APPEND TFLITE_INCLUDE_DIRS "${CpuInfoSource_DIR}")
+
+if(NEON2SSESource_FOUND)
+ list(APPEND TFLITE_INCLUDE_DIRS "${NEON2SSESource_DIR}")
+endif(NEON2SSESource_FOUND)
+
+# TFLite library
+add_library(tensorflow-lite-2.8.0 STATIC
+ ${TFLITE_CORE_API_SRCS}
+ ${TFLITE_CORE_SRCS}
+ ${TFLITE_C_SRCS}
+ ${TFLITE_DELEGATES_NNAPI_SRCS}
+ ${TFLITE_DELEGATES_SRCS}
+ ${TFLITE_EXPERIMENTAL_RESOURCE_SRCS}
+ ${TFLITE_EXPERIMENTAL_RUY_PROFILER_SRCS}
+ ${TFLITE_EXPERIMENTAL_RUY_SRCS}
+ ${TFLITE_KERNEL_INTERNAL_OPT_INTEGER_OPS_SRCS}
+ ${TFLITE_KERNEL_INTERNAL_OPT_SPARSE_OPS_SRCS}
+ ${TFLITE_KERNEL_INTERNAL_OPT_SRCS}
+ ${TFLITE_KERNEL_INTERNAL_REF_INTEGER_OPS_SRCS}
+ ${TFLITE_KERNEL_INTERNAL_REF_SPARSE_OPS_SRCS}
+ ${TFLITE_KERNEL_INTERNAL_REF_SRCS}
+ ${TFLITE_KERNEL_INTERNAL_SRCS}
+ ${TFLITE_KERNEL_SRCS}
+ ${TFLITE_NNAPI_SRCS}
+ ${TFLITE_SRCS}
+ ${TFLITE_PROFILER_SRCS}
+ ${TFLITE_SOURCE_DIR}/kernels/internal/utils/sparsity_format_converter.cc
+ ${TFLITE_SOURCE_DIR}/schema/schema_utils.cc
+ ${OouraFFTSource_DIR}/fftsg.c
+ ${OouraFFTSource_DIR}/fftsg2d.c
+)
+target_include_directories(tensorflow-lite-2.8.0
+ SYSTEM PUBLIC
+ ${TFLITE_INCLUDE_DIRS}
+)
+
+target_compile_definitions(tensorflow-lite-2.8.0 PUBLIC "GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DTFLITE_WITH_RUY -DTFLITE_WITH_RUY_GEMV -DRUY_HAVE_CPUINFO -DNNAPI_VERBOSE_VALIDATION")
+set_property(TARGET tensorflow-lite-2.8.0 PROPERTY POSITION_INDEPENDENT_CODE ON)
+target_link_libraries(tensorflow-lite-2.8.0 eigen flatbuffers::flatbuffers ruy abseil farmhash ${LIB_PTHREAD} dl)
+if(NOT ANDROID)
+ target_link_libraries(tensorflow-lite-2.8.0 rt)
+endif()
+
+# Define TF_LITE_DISABLE_X86_NEON for debug build
+# If we upgrade NEON2SSE version, we can remove below line
+if(NEON2SSESource_FOUND)
+ target_compile_definitions(tensorflow-lite-2.8.0 PRIVATE $<$<CONFIG:Debug>:TF_LITE_DISABLE_X86_NEON>)
+endif(NEON2SSESource_FOUND)
+
+if(ANDROID)
+ target_link_libraries(tensorflow-lite-2.8.0 log)
+ #target_include_directories(tensorflow-lite-2.8.0 PUBLIC "${NDK_DIR}/..")
+endif()
diff --git a/infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLiteConfig.cmake b/infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLiteConfig.cmake
new file mode 100644
index 000000000..60f7f5450
--- /dev/null
+++ b/infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLiteConfig.cmake
@@ -0,0 +1,96 @@
+# NOTE This line prevents multiple definitions of tensorflow-lite target
+if(TARGET tensorflow-lite-2.8.0)
+ set(TensorFlowLite_FOUND TRUE)
+ return()
+endif(TARGET tensorflow-lite-2.8.0)
+
+if(BUILD_TENSORFLOW_LITE)
+ macro(return_unless VAR)
+ if(NOT ${VAR})
+ message("TFLite 2.8: ${VAR} NOT TRUE")
+ set(TensorFlowLite_FOUND FALSE)
+ return()
+ endif(NOT ${VAR})
+ endmacro(return_unless)
+
+ nnas_include(ExternalSourceTools)
+ nnas_include(OptionTools)
+
+ nnas_find_package(TensorFlowSource EXACT 2.8.0 QUIET)
+ return_unless(TensorFlowSource_FOUND)
+
+ # Below urls come from https://github.com/tensorflow/tensorflow/blob/v2.8.0/tensorflow/workspace2.bzl
+ nnas_find_package(Abseil QUIET)
+ return_unless(Abseil_FOUND)
+ nnfw_find_package(Eigen QUIET)
+ return_unless(Eigen_FOUND)
+ nnas_find_package(Farmhash QUIET)
+ return_unless(Farmhash_FOUND)
+ nnfw_find_package(FlatBuffers QUIET)
+ return_unless(FlatBuffers_FOUND)
+ nnas_find_package(TensorFlowGEMMLowpSource EXACT 2.8.0 QUIET)
+ return_unless(TensorFlowGEMMLowpSource_FOUND)
+ nnas_find_package(OouraFFTSource QUIET)
+ return_unless(OouraFFTSource_FOUND)
+ nnfw_find_package(Ruy QUIET)
+ return_unless(Ruy_FOUND)
+
+ # TensorFlow Lite requires FP16 library's header only
+ nnas_find_package(Fp16Source QUIET)
+ return_unless(Fp16Source_FOUND)
+
+ # TensorFlow Lite requires Pybind11 library's header only
+ # But Pybind11 requires python3-dev package
+ # TODO Enable below by installing package on build system
+ #nnas_find_package(Pybind11Source QUIET)
+ #return_unless(Pybind11Source_FOUND)
+
+ # Optional packages
+ nnas_find_package(NEON2SSESource QUIET)
+
+ nnas_include(ExternalProjectTools)
+ add_extdirectory("${CMAKE_CURRENT_LIST_DIR}/TensorFlowLite" tflite-2.8.0)
+
+ set(TensorFlowLite_FOUND TRUE)
+ return()
+endif()
+
+# Use pre-built TensorFlow Lite
+find_path(TFLITE_INCLUDE_DIR NAMES tensorflow/lite/c/c_api.h)
+find_library(TFLITE_LIB NAMES tensorflow2-lite)
+
+if(NOT TFLITE_INCLUDE_DIR)
+ # Tizen install TensorFlow Lite 2.8 headers in /usr/include/tensorflow2
+ find_path(TFLITE_INCLUDE_DIR NAMES tensorflow/lite/c/c_api.h PATHS "/usr/include/tensorflow2")
+ if(NOT TFLITE_INCLUDE_DIR)
+ set(TensorFlowLite_FOUND FALSE)
+ return()
+ endif(NOT TFLITE_INCLUDE_DIR)
+endif(NOT TFLITE_INCLUDE_DIR)
+
+if(NOT TFLITE_LIB)
+ set(TensorFlowLite_FOUND FALSE)
+ return()
+endif(NOT TFLITE_LIB)
+
+message(STATUS "Found TensorFlow Lite: TRUE (include: ${TFLITE_INCLUDE_DIR}, lib: ${TFLITE_LIB}")
+
+# TODO Use IMPORTED target
+add_library(tensorflow-lite-2.8.0 INTERFACE)
+target_include_directories(tensorflow-lite-2.8.0 SYSTEM INTERFACE ${TFLITE_INCLUDE_DIR})
+target_link_libraries(tensorflow-lite-2.8.0 INTERFACE ${TFLITE_LIB})
+find_package(Flatbuffers)
+if(Flatbuffers_FOUND)
+ target_link_libraries(tensorflow-lite-2.8.0 INTERFACE flatbuffers::flatbuffers)
+endif(Flatbuffers_FOUND)
+
+# Prefer -pthread to -lpthread
+set(THREADS_PREFER_PTHREAD_FLAG TRUE)
+set(CMAKE_THREAD_PREFER_PTHREAD TRUE)
+find_package(Threads QUIET)
+
+if(Threads_FOUND)
+ target_link_libraries(tensorflow-lite-2.8.0 INTERFACE ${CMAKE_THREAD_LIBS_INIT})
+endif(Threads_FOUND)
+
+set(TensorFlowLite_FOUND TRUE)
diff --git a/infra/cmake/packages/TensorFlowLite-1.12/TensorFlowLiteConfigVersion.cmake b/infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLiteConfigVersion.cmake
index 4a57b655b..cd49d7b72 100644
--- a/infra/cmake/packages/TensorFlowLite-1.12/TensorFlowLiteConfigVersion.cmake
+++ b/infra/nnfw/cmake/packages/TensorFlowLite-2.8.0/TensorFlowLiteConfigVersion.cmake
@@ -1,4 +1,4 @@
-set(PACKAGE_VERSION "1.12")
+set(PACKAGE_VERSION "2.8.0")
set(PACKAGE_VERSION_EXACT FALSE)
set(PACKAGE_VERSION_COMPATIBLE FALSE)
set(PACKAGE_VERSION_UNSUITABLE TRUE)
diff --git a/infra/nnfw/cmake/packages/TensorFlowLiteGpu/CMakeLists.txt b/infra/nnfw/cmake/packages/TensorFlowLiteGpu/CMakeLists.txt
new file mode 100644
index 000000000..73264d107
--- /dev/null
+++ b/infra/nnfw/cmake/packages/TensorFlowLiteGpu/CMakeLists.txt
@@ -0,0 +1,73 @@
+#
+# Tensorflow Lite GPU delegate library 2.8.0
+#
+
+set(LIB_TENSORFLOW_GPU_DELEGATE "TensorFlowGpu")
+
+#TENSORFLOWGPU_SOURCE_DIR
+set(TENSORFLOWSOURCE_DIR ${TensorFlowSource_DIR})
+set(TENSORFLOW_LITE_BASE ${TENSORFLOWSOURCE_DIR}/tensorflow/lite)
+set(REF_TENSORFLOW_LITE_GPU_DELEGATE_SRC_BASE "${TENSORFLOW_LITE_BASE}/delegates/gpu")
+
+set(SRC_BASE "${REF_TENSORFLOW_LITE_GPU_DELEGATE_SRC_BASE}")
+file(GLOB GPU_CL_SRC_LIST "${SRC_BASE}/cl/*.cc"
+ "${SRC_BASE}/cl/kernels/*.cc"
+ "${SRC_BASE}/common/*.cc"
+ "${SRC_BASE}/common/selectors/*.cc"
+ "${SRC_BASE}/common/selectors/default/*.cc"
+ "${SRC_BASE}/common/task/*.cc"
+ "${SRC_BASE}/common/tasks/*.cc"
+ "${SRC_BASE}/common/tasks/special/*.cc"
+ "${SRC_BASE}/common/memory_management/*.cc"
+ "${SRC_BASE}/common/transformations/*.cc"
+ )
+
+file(GLOB REMOVE_TEST_SRCS "${SRC_BASE}/cl/*_test*.cc"
+ "${SRC_BASE}/cl/testing/*.cc"
+ "${SRC_BASE}/cl/kernels/*_test*.cc"
+ "${SRC_BASE}/common/*_test*.cc"
+ "${SRC_BASE}/common/tasks/*_test*.cc"
+ "${SRC_BASE}/common/transformations/*_test*.cc"
+ )
+# Not available
+file(GLOB REMOVE_SRCS "${SRC_BASE}/cl/*gl*.cc"
+ "${SRC_BASE}/cl/gpu_api_delegate.cc"
+ "${SRC_BASE}/cl/serialization.cc"
+ "${SRC_BASE}/common/lstm_parser.cc"
+ "${SRC_BASE}/common/model_builder.cc"
+ "${SRC_BASE}/common/model_builder_helper.cc"
+ "${SRC_BASE}/common/object_reader.cc"
+ "${SRC_BASE}/common/quantization_util.cc"
+ "${SRC_BASE}/common/memory_management/*_test.cc"
+ )
+
+list(APPEND GPU_CL_SRC_LIST "${TENSORFLOW_LITE_BASE}/experimental/acceleration/compatibility/android_info.cc")
+
+list(REMOVE_ITEM GPU_CL_SRC_LIST ${REMOVE_TEST_SRCS})
+list(REMOVE_ITEM GPU_CL_SRC_LIST ${REMOVE_SRCS})
+list(APPEND TFLITE_GPU_SRCS ${GPU_CL_SRC_LIST})
+
+add_library(${LIB_TENSORFLOW_GPU_DELEGATE} STATIC ${TFLITE_GPU_SRCS})
+
+target_include_directories(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE "${Opencl_Headers_DIR}")
+target_include_directories(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE "${Fp16Source_DIR}/include")
+target_include_directories(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE "${TensorFlowSource_DIR}")
+target_include_directories(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE "${TensorFlowGEMMLowpSource_DIR}")
+target_include_directories(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE "${TensorFlowEigenSource_DIR}")
+target_include_directories(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE "${VulkanSource_DIR}/include")
+target_include_directories(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE "${Opengl_HeadersSource_DIR}/api")
+target_include_directories(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE "${Egl_HeadersSource_DIR}/api")
+
+target_link_libraries(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE abseil farmhash fp16 flatbuffers)
+
+# GL codes are not used on gpu_cl
+target_compile_options(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE "-DCL_DELEGATE_NO_GL")
+target_compile_options(${LIB_TENSORFLOW_GPU_DELEGATE} PRIVATE "-DTFLITE_GPU_BINARY_RELEASE" "-DEGL_NO_X11")
+
+# deprecated-copy warning on header (gcc 9.4.0)
+if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9.4)
+ target_compile_options(${LIB_TENSORFLOW_GPU_DELEGATE} PUBLIC "-Wno-deprecated-copy")
+endif()
+
+# Applying PIC first, currently used on gpu_cl only
+set_target_properties(${LIB_TENSORFLOW_GPU_DELEGATE} PROPERTIES POSITION_INDEPENDENT_CODE ON)
diff --git a/infra/nnfw/cmake/packages/XnnpackConfig.cmake b/infra/nnfw/cmake/packages/XnnpackConfig.cmake
new file mode 100644
index 000000000..101d757ec
--- /dev/null
+++ b/infra/nnfw/cmake/packages/XnnpackConfig.cmake
@@ -0,0 +1,41 @@
+function(_Xnnpack_Build)
+ nnas_find_package(XnnpackSource QUIET)
+ nnfw_find_package(Fxdiv QUIET)
+ nnfw_find_package(CpuInfo QUIET)
+ nnfw_find_package(Pthreadpool QUIET)
+ nnfw_find_package(Psimd QUIET)
+ nnfw_find_package(Fp16 QUIET)
+
+ # NOTE This line prevents multiple definitions of cpuinfo target
+ if(TARGET XNNPACK)
+ set(XnnpackSource_DIR ${XnnpackSource_DIR} PARENT_SCOPE)
+ set(Xnnpack_FOUND TRUE PARENT_SCOPE)
+ return()
+ endif(TARGET XNNPACK)
+
+ if(NOT XnnpackSource_FOUND)
+ message(STATUS "XNNPACK: Source not found")
+ set(Xnnpack_FOUND FALSE PARENT_SCOPE)
+ return()
+ endif(NOT XnnpackSource_FOUND)
+
+ set(XNNPACK_BUILD_TESTS OFF CACHE BOOL "Build XNNPACK unit tests")
+ set(XNNPACK_BUILD_BENCHMARKS OFF CACHE BOOL "Build XNNPACK benchmarks")
+ set(XNNPACK_USE_SYSTEM_LIBS ON CACHE BOOL "Use system-provided dependency libraries")
+
+ add_extdirectory("${XnnpackSource_DIR}" XNNPACK EXCLUDE_FROM_ALL)
+ set_target_properties(XNNPACK PROPERTIES POSITION_INDEPENDENT_CODE ON)
+ # Suppress warnings generated by xnnpack
+ set_target_properties(XNNPACK PROPERTIES COMPILE_FLAGS "-Wno-deprecated-declarations")
+ set(XnnpackSource_DIR ${XnnpackSource_DIR} PARENT_SCOPE)
+ set(Xnnpack_FOUND TRUE PARENT_SCOPE)
+endfunction(_Xnnpack_Build)
+
+string(REGEX REPLACE "-flto" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
+string(REGEX REPLACE "-flto" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+
+if(BUILD_XNNPACK)
+ _Xnnpack_Build()
+else(BUILD_XNNPACK)
+ set(Xnnpack_FOUND FALSE)
+endif(BUILD_XNNPACK)
diff --git a/infra/nnfw/command/build b/infra/nnfw/command/build
index b0301d2f4..4a3601ed2 100644
--- a/infra/nnfw/command/build
+++ b/infra/nnfw/command/build
@@ -8,4 +8,4 @@ if [[ ! -d "${BUILD_PATH}" ]]; then
fi
cd ${BUILD_PATH}
-make "$@"
+cmake --build . -- "$@"
diff --git a/infra/nnfw/command/count-unittest b/infra/nnfw/command/count-unittest
index 7957f36e7..3ce7bbac3 100644
--- a/infra/nnfw/command/count-unittest
+++ b/infra/nnfw/command/count-unittest
@@ -69,6 +69,6 @@ TOTAL_NEG_TCS=$(echo "$TEST_LIST" | grep '^ neg_' | wc -l)
TOTAL_POS_TCS=$(echo "$TEST_LIST" | grep '^ neg_' -v | wc -l)
# Report stats
-echo "TOTAL NUMBER OF TEST CASES : $TOTAL_TCS"
-echo "TOTAL NUMBER OF POSTIVE TEST CASES : $TOTAL_NEG_TCS"
-echo "TOTAL NUMBER OF NEGATIVE TEST CASES : $TOTAL_POS_TCS"
+printf "TOTAL NUMBER OF TEST CASES : %5d\n" $TOTAL_TCS
+printf "TOTAL NUMBER OF POSTIVE TEST CASES : %5d\n" $TOTAL_POS_TCS
+printf "TOTAL NUMBER OF NEGATIVE TEST CASES : %5d\n" $TOTAL_NEG_TCS
diff --git a/infra/nnfw/command/prepare-model b/infra/nnfw/command/prepare-model
new file mode 100644
index 000000000..35600e152
--- /dev/null
+++ b/infra/nnfw/command/prepare-model
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+import "build.configuration"
+
+# This command is used to download test materials on host environment
+# by using test command on host
+
+# Common variables
+DRIVER_PATH=$NNFW_PROJECT_PATH/tests/scripts
+CACHE_PATH=${CACHE_PATH:-$WORKSPACE_PATH/out/test/cache}
+
+COMMAND_FILE=$DRIVER_PATH/command/prepare-model
+if [[ ! -f $COMMAND_FILE ]]; then
+ echo "ERROR: '$COMMAND' is not supported"
+ exit 255
+fi
+
+source $COMMAND_FILE $@
diff --git a/infra/nnfw/config/docker.configuration b/infra/nnfw/config/docker.configuration
index 962c02c7f..c61ab0ff2 100644
--- a/infra/nnfw/config/docker.configuration
+++ b/infra/nnfw/config/docker.configuration
@@ -1,6 +1,6 @@
#!/bin/bash
-DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnas}
+DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnfw/one-devtools}
echo "Using docker image ${DOCKER_IMAGE_NAME}"
if [ -z "`docker images ${DOCKER_IMAGE_NAME}`" ]; then
diff --git a/infra/nnfw/config/gbs.conf b/infra/nnfw/config/gbs.conf
index bad9eb204..5bb7b0ca3 100644
--- a/infra/nnfw/config/gbs.conf
+++ b/infra/nnfw/config/gbs.conf
@@ -3,20 +3,37 @@
profile = profile.tizen
[profile.tizen]
-user=obs_viewer
-obs = obs.tizen
-repos = repo.tizen_one,repo.tizen_base,repo.tizen_mobile
-buildroot = /home/GBS-ROOT/
+repos = repo.base, repo.unified
-[obs.tizen]
-url = http://api.tizen.org
+[profile.tizen_8]
+repos = repo.base_8, repo.unified_8
-[repo.tizen_mobile]
-url = http://download.tizen.org/snapshots/tizen/unified/latest/repos/standard/packages/
+[profile.tizen-dev]
+repos = repo.base-dev, repo.unified-dev
-[repo.tizen_base]
-url = http://download.tizen.org/snapshots/tizen/base/latest/repos/standard/packages/
+[profile.tizen-riscv]
+repos = repo.base-riscv, repo.unified-riscv
-[repo.tizen_one]
-url = http://nnfw.mooo.com/archive/tizen/
+[repo.unified]
+url = http://download.tizen.org/snapshots/TIZEN/Tizen-7.0/Tizen-7.0-Unified/latest/repos/standard/packages/
+[repo.base]
+url = http://download.tizen.org/snapshots/TIZEN/Tizen-7.0/Tizen-7.0-Base/latest/repos/standard/packages/
+
+[repo.unified_8]
+url = http://download.tizen.org/snapshots/TIZEN/Tizen-8.0/Tizen-8.0-Unified/latest/repos/standard/packages/
+
+[repo.base_8]
+url = http://download.tizen.org/snapshots/TIZEN/Tizen-8.0/Tizen-8.0-Base/latest/repos/standard/packages/
+
+[repo.unified-dev]
+url = http://download.tizen.org/snapshots/TIZEN/Tizen/Tizen-Unified-Dev/latest/repos/standard/packages/
+
+[repo.base-dev]
+url = http://download.tizen.org/snapshots/TIZEN/Tizen/Tizen-Base-Dev/latest/repos/standard/packages/
+
+[repo.unified-riscv]
+url = https://download.tizen.org/snapshots/TIZEN/Tizen/Tizen-Unified-RISCV/latest/repos/standard/packages/
+
+[repo.base-riscv]
+url = https://download.tizen.org/snapshots/TIZEN/Tizen/Tizen-Base-RISCV/latest/repos/standard/packages/
diff --git a/infra/onert-micro/CMakeLists.txt b/infra/onert-micro/CMakeLists.txt
new file mode 100644
index 000000000..21533c11f
--- /dev/null
+++ b/infra/onert-micro/CMakeLists.txt
@@ -0,0 +1,61 @@
+cmake_minimum_required(VERSION 3.15)
+
+project(onert-micro)
+
+enable_testing()
+
+set(CMAKE_CXX_STANDARD 14)
+
+set(CMAKE_SKIP_BUILD_RPATH FALSE)
+set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
+set(CMAKE_INSTALL_RPATH "$ORIGIN/../lib:$ORIGIN/")
+set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
+
+set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
+
+if (NOT DEFINED TARGET_ARCH)
+ set(TARGET_ARCH "armv7em")
+endif()
+
+if (NOT DEFINED TARGET_CPU)
+ set(TARGET_CPU "cortex-m7")
+endif()
+
+if (NOT DEFINED TARGET_OS)
+ set(TARGET_OS "generic")
+endif()
+
+include(utils.cmake)
+
+nnas_find_package(GTest QUIET)
+
+option(ENABLE_TEST "Build Tests using Google Test" ${GTest_FOUND})
+
+if(${ENABLE_TEST} AND NOT ${GTest_FOUND})
+ message(FATAL_ERROR "Google Test is required to enable test")
+endif(${ENABLE_TEST} AND NOT ${GTest_FOUND})
+
+option(ENABLE_COVERAGE "Build for coverage test" OFF)
+if(${ENABLE_COVERAGE} AND NOT ${ENABLE_TEST})
+ message(FATAL_ERROR "Test should be enabled to measure test coverage")
+endif(${ENABLE_COVERAGE} AND NOT ${ENABLE_TEST})
+
+if(${ENABLE_TEST})
+ include(CTest)
+endif(${ENABLE_TEST})
+
+###
+### Target
+###
+add_library(onert_micro_common INTERFACE)
+if(ENABLE_STRICT_BUILD)
+ target_compile_options(onert_micro_common INTERFACE -Werror -Wall -Wextra -Wno-reorder)
+endif(ENABLE_STRICT_BUILD)
+
+add_library(onert_micro_coverage INTERFACE)
+if(ENABLE_COVERAGE)
+ target_compile_options(onert_micro_coverage INTERFACE -g -O0 -fprofile-arcs -ftest-coverage)
+ target_link_libraries(onert_micro_coverage INTERFACE gcov)
+endif(ENABLE_COVERAGE)
+
+add_subdirectory("${NNAS_PROJECT_SOURCE_DIR}/onert-micro" "${CMAKE_BINARY_DIR}/onert-micro")
diff --git a/infra/onert-micro/cmake/ApplyCompileFlags.cmake b/infra/onert-micro/cmake/ApplyCompileFlags.cmake
new file mode 100644
index 000000000..fb99fbd26
--- /dev/null
+++ b/infra/onert-micro/cmake/ApplyCompileFlags.cmake
@@ -0,0 +1,35 @@
+#
+# Platform independent compile flag setting
+#
+# flags for build type: debug, release
+set(CMAKE_C_FLAGS_DEBUG "-O0 -g -DDEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g -DDEBUG")
+set(CMAKE_C_FLAGS_RELEASE "-O3 -DNDEBUG")
+set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG")
+
+#
+# Platform specific compile flag setting
+#
+if(EXISTS "${CMAKE_CURRENT_LIST_DIR}/buildtool/config/config_${TARGET_PLATFORM}.cmake")
+ include("${CMAKE_CURRENT_LIST_DIR}/buildtool/config/config_${TARGET_PLATFORM}.cmake")
+endif()
+
+#
+# Apply compile flags
+# note: this should be placed after cmake/buildtool/config/config_xxx.cmake files
+#
+# add common flags
+foreach(FLAG ${FLAGS_COMMON})
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FLAG}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}")
+endforeach()
+
+# add c flags
+foreach(FLAG ${FLAGS_CONLY})
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FLAG}")
+endforeach()
+
+# add cxx flags
+foreach(FLAG ${FLAGS_CXXONLY})
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}")
+endforeach()
diff --git a/infra/onert-micro/cmake/CfgOptionFlags.cmake b/infra/onert-micro/cmake/CfgOptionFlags.cmake
new file mode 100644
index 000000000..ffbc7b255
--- /dev/null
+++ b/infra/onert-micro/cmake/CfgOptionFlags.cmake
@@ -0,0 +1,18 @@
+# Platform specific configuration
+# note: this should be placed before default setting for option setting priority
+# (platform specific setting have higher priority)
+#
+include("${NNAS_PROJECT_SOURCE_DIR}/infra/onert-micro/cmake/options/options_${TARGET_PLATFORM}.cmake")
+
+###
+### Configuration
+###
+option(DOWNLOAD_RUY "Download ruy source" ON)
+option(DOWNLOAD_EIGEN "Download Eigen source" ON)
+option(DOWNLOAD_GEMMLOWP "Download GEMM low precesion library source" ON)
+option(DOWNLOAD_FLATBUFFERS "Download FlatBuffers source" ON)
+option(BUILD_FLATBUFFERS "Locally build Flatbuffers from the downloaded source" ON)
+option(DOWNLOAD_TENSORFLOW "Download TensorFlow source" ON)
+
+option(DOWNLOAD_GTEST "Download Google Test source" ON)
+option(BUILD_GTEST "Build Google Test from the downloaded source" ON)
diff --git a/infra/onert-micro/cmake/buildtool/config/arm-none-eabi-gcc.cmake b/infra/onert-micro/cmake/buildtool/config/arm-none-eabi-gcc.cmake
new file mode 100644
index 000000000..544be030a
--- /dev/null
+++ b/infra/onert-micro/cmake/buildtool/config/arm-none-eabi-gcc.cmake
@@ -0,0 +1,66 @@
+set(CMAKE_SYSTEM_NAME Generic)
+
+set(CMAKE_SYSTEM_PROCESSOR "${CPU_ARCH}")
+set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
+set(CMAKE_C_COMPILER "${C_COMPILER}")
+set(CMAKE_CXX_COMPILER "${CXX_COMPILER}")
+set(CMAKE_ASM_COMPILER "${ASM_COMPILER}")
+set(CMAKE_OBJCOPY "${OBJCOPY}")
+
+set(TARGET_CPU "cortex-m4" CACHE STRING "Target CPU")
+
+# Convert TARGET_CPU=Cortex-M33+nofp+nodsp into
+# - CMAKE_SYSTEM_PROCESSOR=cortex-m33
+# - TARGET_CPU_FEATURES=no-fp;no-dsp
+string(REPLACE "+" ";" TARGET_CPU_FEATURES ${TARGET_CPU})
+list(POP_FRONT TARGET_CPU_FEATURES CMAKE_SYSTEM_PROCESSOR)
+string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} CMAKE_SYSTEM_PROCESSOR)
+
+set(CMAKE_EXECUTABLE_SUFFIX ".elf")
+set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
+set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+
+# Select C/C++ version
+set(CMAKE_C_STANDARD 99)
+set(CMAKE_CXX_STANDARD 14)
+
+# Compile options
+add_compile_options(
+ -mcpu=${TARGET_CPU}
+ -mthumb
+ "$<$<CONFIG:DEBUG>:-gdwarf-3>"
+ "$<$<COMPILE_LANGUAGE:CXX>:-funwind-tables;-frtti;-fexceptions>")
+
+# Compile definescd
+add_compile_definitions(
+ "$<$<NOT:$<CONFIG:DEBUG>>:NDEBUG>")
+
+# Link options
+add_link_options(
+ -mcpu=${TARGET_CPU}
+ -mthumb
+ --specs=nosys.specs)
+
+# Set floating point unit
+if("${TARGET_CPU}" MATCHES "\\+fp")
+ set(FLOAT hard)
+elseif("${TARGET_CPU}" MATCHES "\\+nofp")
+ set(FLOAT soft)
+elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "cortex-m33" OR
+ "${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "cortex-m55")
+ set(FLOAT hard)
+else()
+ set(FLOAT soft)
+endif()
+
+if (FLOAT)
+ add_compile_options(-mfloat-abi=${FLOAT})
+ add_link_options(-mfloat-abi=${FLOAT})
+endif()
+
+# Compilation warnings
+add_compile_options(
+ -Wno-all
+)
diff --git a/infra/onert-micro/cmake/buildtool/config/config_linux.cmake b/infra/onert-micro/cmake/buildtool/config/config_linux.cmake
new file mode 100644
index 000000000..d7b17cfef
--- /dev/null
+++ b/infra/onert-micro/cmake/buildtool/config/config_linux.cmake
@@ -0,0 +1,11 @@
+#
+# linux common compile options
+#
+
+# Disable annoying ABI compatibility warning.
+if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
+ list(APPEND FLAGS_CXXONLY "-Wno-psabi")
+endif()
+
+# lib pthread as a variable (pthread must be disabled on android)
+set(LIB_PTHREAD pthread)
diff --git a/infra/onert-micro/cmake/buildtool/config/config_x86_64-linux.cmake b/infra/onert-micro/cmake/buildtool/config/config_x86_64-linux.cmake
new file mode 100644
index 000000000..528e48396
--- /dev/null
+++ b/infra/onert-micro/cmake/buildtool/config/config_x86_64-linux.cmake
@@ -0,0 +1,12 @@
+#
+# x86_64 linux compile options
+#
+message(STATUS "Building for x86-64 Linux")
+
+# include linux common
+include("cmake/buildtool/config/config_linux.cmake")
+
+# SIMD for x86
+set(FLAGS_COMMON ${FLAGS_COMMON}
+ "-msse4"
+ )
diff --git a/infra/onert-micro/cmake/options/options_armv7-r-generic.cmake b/infra/onert-micro/cmake/options/options_armv7-r-generic.cmake
new file mode 100644
index 000000000..d671b73f1
--- /dev/null
+++ b/infra/onert-micro/cmake/options/options_armv7-r-generic.cmake
@@ -0,0 +1,3 @@
+#
+# armv7em generic cmake options
+#
diff --git a/infra/onert-micro/cmake/options/options_armv7em-generic.cmake b/infra/onert-micro/cmake/options/options_armv7em-generic.cmake
new file mode 100644
index 000000000..d671b73f1
--- /dev/null
+++ b/infra/onert-micro/cmake/options/options_armv7em-generic.cmake
@@ -0,0 +1,3 @@
+#
+# armv7em generic cmake options
+#
diff --git a/infra/onert-micro/cmake/options/options_armv8-m-generic.cmake b/infra/onert-micro/cmake/options/options_armv8-m-generic.cmake
new file mode 100644
index 000000000..cbd70de7d
--- /dev/null
+++ b/infra/onert-micro/cmake/options/options_armv8-m-generic.cmake
@@ -0,0 +1,3 @@
+#
+# armv8-m generic cmake options
+#
diff --git a/infra/onert-micro/cmake/options/options_x86_64-linux.cmake b/infra/onert-micro/cmake/options/options_x86_64-linux.cmake
new file mode 100644
index 000000000..0fb72f18b
--- /dev/null
+++ b/infra/onert-micro/cmake/options/options_x86_64-linux.cmake
@@ -0,0 +1,3 @@
+#
+# x86_64 linux cmake options
+#
diff --git a/infra/onert-micro/utils.cmake b/infra/onert-micro/utils.cmake
new file mode 100644
index 000000000..4c78e2cb9
--- /dev/null
+++ b/infra/onert-micro/utils.cmake
@@ -0,0 +1,53 @@
+set(NNAS_PROJECT_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../.." CACHE
+ INTERNAL "Where to find nnas top-level source directory"
+ )
+
+set(NNAS_EXTERNALS_DIR
+ "${NNAS_PROJECT_SOURCE_DIR}/externals" CACHE
+ INTERNAL "Where to download external dependencies"
+ )
+set(ONERT_MICRO_OVERLAY_DIR "${CMAKE_BINARY_DIR}/overlay" CACHE
+ INTERNAL "Where locally built external dependencies are installed")
+
+# Share package build script with runtime
+set(EXT_OVERLAY_DIR ${ONERT_MICRO_OVERLAY_DIR})
+
+# This allows find_package to access configurations installed inside overlay
+list(APPEND CMAKE_PREFIX_PATH "${EXT_OVERLAY_DIR}")
+
+macro(nnas_include PREFIX)
+ include("${NNAS_PROJECT_SOURCE_DIR}/infra/cmake/modules/${PREFIX}.cmake")
+endmacro(nnas_include)
+
+macro(nnas_find_package PREFIX)
+ find_package(${PREFIX}
+ CONFIG NO_DEFAULT_PATH
+ PATHS ${NNAS_PROJECT_SOURCE_DIR}/infra/cmake/packages
+ ${ARGN})
+endmacro(nnas_find_package)
+
+macro(nnas_find_package_folder PREFIX FIND_FOLDER)
+ find_package(${PREFIX}
+ CONFIG NO_DEFAULT_PATH
+ PATHS ${NNAS_PROJECT_SOURCE_DIR}/infra/cmake/packages ${FIND_FOLDER}
+ ${ARGN})
+endmacro(nnas_find_package_folder)
+
+###
+### CMake configuration
+###
+if(NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE "Debug" CACHE STRING "Type of build" FORCE)
+endif(NOT CMAKE_BUILD_TYPE)
+message(STATUS "Use '${CMAKE_BUILD_TYPE}' configuration")
+
+# identify platform: HOST_PLATFORM, TARGET_PLATFORM and related
+# note: this should be placed before flags and options setting
+nnas_include(IdentifyPlatform)
+
+# Configuration flags
+include("${NNAS_PROJECT_SOURCE_DIR}/infra/onert-micro/cmake/CfgOptionFlags.cmake")
+
+# apply compilation flags
+# NOTE this should be after all option
+include("${NNAS_PROJECT_SOURCE_DIR}/infra/onert-micro/cmake/ApplyCompileFlags.cmake")
diff --git a/infra/packaging/build b/infra/packaging/build
index e941a724b..16bce7e0e 100644
--- a/infra/packaging/build
+++ b/infra/packaging/build
@@ -8,7 +8,10 @@ if [[ -z "${NNAS_PROJECT_PATH}" ]]; then
fi
# The default preset
-PRESET="20200630"
+PRESET="20230413"
+
+# Test is enabled by default
+DISABLE_TEST=false
EXTRA_OPTIONS=()
while [ "$#" -ne 0 ]; do
@@ -23,6 +26,10 @@ while [ "$#" -ne 0 ]; do
PRESET="$2"
shift 2
;;
+ '--notest')
+ DISABLE_TEST=true
+ shift
+ ;;
'--')
shift
while [ "$#" -ne 0 ]; do
@@ -44,6 +51,10 @@ if [[ -z "${NNAS_INSTALL_PREFIX}" ]]; then
exit 255
fi
+if [[ "${DISABLE_TEST}" == "true" ]]; then
+ EXTRA_OPTIONS+=("-DENABLE_TEST=OFF")
+fi
+
PRESET_PATH="${SCRIPT_PATH}/preset/${PRESET}"
if [[ ! -f "${PRESET_PATH}" ]]; then
diff --git a/infra/packaging/preset/20200630 b/infra/packaging/preset/20200630
index 5d1635809..a1721d941 100644
--- a/infra/packaging/preset/20200630
+++ b/infra/packaging/preset/20200630
@@ -9,7 +9,7 @@ function preset_configure()
{
REQUIRED_UNITS=()
# Common Libraries
- REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp" "stdex")
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
REQUIRED_UNITS+=("oops" "pepper-assert" "foder")
REQUIRED_UNITS+=("souschef")
REQUIRED_UNITS+=("safemain")
@@ -26,7 +26,7 @@ function preset_configure()
# Tools
REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef" "circlechef")
REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter" "circle-verify")
- REQUIRED_UNITS+=("record-minmax" "circle-quantizer")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
REQUIRED_UNITS+=("one-cmds")
REQUIRED_UNITS+=("bcq-tools")
diff --git a/infra/packaging/preset/20200731_windows b/infra/packaging/preset/20200731_windows
index 65d179eaf..078c7db47 100644
--- a/infra/packaging/preset/20200731_windows
+++ b/infra/packaging/preset/20200731_windows
@@ -4,7 +4,7 @@ function preset_configure()
{
REQUIRED_UNITS=()
# Common Libraries
- REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp" "stdex")
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
REQUIRED_UNITS+=("oops" "pepper-assert" "foder")
REQUIRED_UNITS+=("souschef")
REQUIRED_UNITS+=("safemain")
@@ -21,15 +21,15 @@ function preset_configure()
# Tools
REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef" "circlechef")
REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter" "circle-verify")
- REQUIRED_UNITS+=("record-minmax" "circle-quantizer")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
NPROC=$(cat /proc/cpuinfo | grep -c processor)
# TODO Use "nncc configure" and "nncc build"
cmake \
-G "MSYS Makefiles" \
- -DTF2NNPKG_FOR_WINDOWS=ON \
-DUSE_PROTOBUF_LEGACY_IMPORT=ON \
-DCMAKE_EXE_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
-DCMAKE_SHARED_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
diff --git a/infra/packaging/preset/20210406 b/infra/packaging/preset/20210406
new file mode 100644
index 000000000..caddb0a53
--- /dev/null
+++ b/infra/packaging/preset/20210406
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# NOTE purpose of this file is static analysis only
+# new official preset will be added when new programs are ready
+
+PRESET="20210406"
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite" "mio-circle")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef" "circlechef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter" "circle-verify")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
+ REQUIRED_UNITS+=("circle-partitioner")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+
+ NPROC=${NPROC:-$(cat /proc/cpuinfo | grep -c processor)}
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ install -t "${NNPKG_INSTALL_PREFIX}/bin" -D \
+ "${NNAS_PROJECT_PATH}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh"
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+}
diff --git a/infra/packaging/preset/20210406_windows b/infra/packaging/preset/20210406_windows
new file mode 100644
index 000000000..5d4bd8d5f
--- /dev/null
+++ b/infra/packaging/preset/20210406_windows
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite" "mio-circle")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef" "circlechef")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter" "circle-verify")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
+ REQUIRED_UNITS+=("circle-partitioner")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+
+ NPROC=$(cat /proc/cpuinfo | grep -c processor)
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -G "MSYS Makefiles" \
+ -DUSE_PROTOBUF_LEGACY_IMPORT=ON \
+ -DCMAKE_EXE_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DCMAKE_SHARED_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DENABLE_TEST=OFF \
+ -DDOWNLOAD_GTEST=OFF \
+ -DBUILD_GTEST=OFF \
+ -DCMAKE_C_COMPILER=gcc \
+ -DCMAKE_CXX_COMPILER=g++ \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ # Install libraries to bin/ for Windows release
+ mv ${NNCC_INSTALL_PREFIX}/lib/*.dll ${NNCC_INSTALL_PREFIX}/bin
+ rm -rf ${NNCC_INSTALL_PREFIX}/lib
+
+ install -t "${NNPKG_INSTALL_PREFIX}/bin" -D \
+ "${NNAS_PROJECT_PATH}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh"
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.20210406" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+
+ # Though you have to install tensorflow to run 'tf2tfliteV2',
+ # tensorflow can't be installed in mingw. First, You can install tensorflow
+ # from Window native CMD(run as administrator) with python virtual environment.
+ # And, you must copy it to "${NNAS_INSTALL_PREFIX}/bin/venv"
+}
diff --git a/infra/packaging/preset/20210706 b/infra/packaging/preset/20210706
new file mode 100644
index 000000000..ef6b6e521
--- /dev/null
+++ b/infra/packaging/preset/20210706
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# NOTE purpose of this file is static analysis only
+# new official preset will be added when new programs are ready
+
+PRESET="20210706"
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite" "mio-circle")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef" "circlechef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter" "circle-verify")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
+ REQUIRED_UNITS+=("circle-partitioner")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+
+ NPROC=${NPROC:-$(cat /proc/cpuinfo | grep -c processor)}
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ install -t "${NNPKG_INSTALL_PREFIX}/bin" -D \
+ "${NNAS_PROJECT_PATH}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh"
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+}
diff --git a/infra/packaging/preset/20210706_windows b/infra/packaging/preset/20210706_windows
new file mode 100644
index 000000000..857540870
--- /dev/null
+++ b/infra/packaging/preset/20210706_windows
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite" "mio-circle")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef" "circlechef")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter" "circle-verify")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
+ REQUIRED_UNITS+=("circle-partitioner")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+
+ NPROC=$(cat /proc/cpuinfo | grep -c processor)
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -G "MSYS Makefiles" \
+ -DUSE_PROTOBUF_LEGACY_IMPORT=ON \
+ -DCMAKE_EXE_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DCMAKE_SHARED_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DENABLE_TEST=OFF \
+ -DDOWNLOAD_GTEST=OFF \
+ -DBUILD_GTEST=OFF \
+ -DCMAKE_C_COMPILER=gcc \
+ -DCMAKE_CXX_COMPILER=g++ \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ # Install libraries to bin/ for Windows release
+ mv ${NNCC_INSTALL_PREFIX}/lib/*.dll ${NNCC_INSTALL_PREFIX}/bin
+ rm -rf ${NNCC_INSTALL_PREFIX}/lib
+
+ install -t "${NNPKG_INSTALL_PREFIX}/bin" -D \
+ "${NNAS_PROJECT_PATH}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh"
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.20210706" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+
+ # Though you have to install tensorflow to run 'tf2tfliteV2',
+ # tensorflow can't be installed in mingw. First, You can install tensorflow
+ # from Window native CMD(run as administrator) with python virtual environment.
+ # And, you must copy it to "${NNAS_INSTALL_PREFIX}/bin/venv"
+}
diff --git a/infra/packaging/preset/20210910 b/infra/packaging/preset/20210910
new file mode 100644
index 000000000..d00b1ccad
--- /dev/null
+++ b/infra/packaging/preset/20210910
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# NOTE purpose of this file is static analysis only
+# new official preset will be added when new programs are ready
+
+PRESET="20210910"
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite" "mio-tflite260" "mio-circle")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef" "circlechef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter" "circle-verify")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
+ REQUIRED_UNITS+=("circle-partitioner")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+
+ NPROC=${NPROC:-$(cat /proc/cpuinfo | grep -c processor)}
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ install -t "${NNPKG_INSTALL_PREFIX}/bin" -D \
+ "${NNAS_PROJECT_PATH}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh"
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+}
diff --git a/infra/packaging/preset/20210910_windows b/infra/packaging/preset/20210910_windows
new file mode 100644
index 000000000..642bdbd76
--- /dev/null
+++ b/infra/packaging/preset/20210910_windows
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite" "mio-tflite260" "mio-circle")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef" "circlechef")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter" "circle-verify")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
+ REQUIRED_UNITS+=("circle-partitioner")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+
+ NPROC=$(cat /proc/cpuinfo | grep -c processor)
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -G "MSYS Makefiles" \
+ -DUSE_PROTOBUF_LEGACY_IMPORT=ON \
+ -DCMAKE_EXE_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DCMAKE_SHARED_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DENABLE_TEST=OFF \
+ -DDOWNLOAD_GTEST=OFF \
+ -DBUILD_GTEST=OFF \
+ -DCMAKE_C_COMPILER=gcc \
+ -DCMAKE_CXX_COMPILER=g++ \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ # Install libraries to bin/ for Windows release
+ mv ${NNCC_INSTALL_PREFIX}/lib/*.dll ${NNCC_INSTALL_PREFIX}/bin
+ rm -rf ${NNCC_INSTALL_PREFIX}/lib
+
+ install -t "${NNPKG_INSTALL_PREFIX}/bin" -D \
+ "${NNAS_PROJECT_PATH}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh"
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.20210910" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+
+ # Though you have to install tensorflow to run 'tf2tfliteV2',
+ # tensorflow can't be installed in mingw. First, You can install tensorflow
+ # from Window native CMD(run as administrator) with python virtual environment.
+ # And, you must copy it to "${NNAS_INSTALL_PREFIX}/bin/venv"
+}
diff --git a/infra/packaging/preset/20220323 b/infra/packaging/preset/20220323
new file mode 100644
index 000000000..69251d03d
--- /dev/null
+++ b/infra/packaging/preset/20220323
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+# NOTE purpose of this file is static analysis only
+# new official preset will be added when new programs are ready
+
+PRESET="20220323"
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite280" "mio-circle04")
+ # Data I/O
+ REQUIRED_UNITS+=("dio-hdf5")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
+ REQUIRED_UNITS+=("circle-eval-diff" "circle-interpreter")
+ REQUIRED_UNITS+=("circle-partitioner" "circle-operator")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+ REQUIRED_UNITS+=("dalgona")
+ REQUIRED_UNITS+=("visq")
+
+ # Dependent modules needed for build
+ REQUIRED_UNITS+=("circlechef")
+ REQUIRED_UNITS+=("circle-verify")
+
+ NPROC=${NPROC:-$(cat /proc/cpuinfo | grep -c processor)}
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ install -t "${NNPKG_INSTALL_PREFIX}/bin" -D \
+ "${NNAS_PROJECT_PATH}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh"
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+}
diff --git a/infra/packaging/preset/20220323_windows b/infra/packaging/preset/20220323_windows
new file mode 100644
index 000000000..c5a3f0ef9
--- /dev/null
+++ b/infra/packaging/preset/20220323_windows
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite280" "mio-circle04")
+ # Data I/O
+ REQUIRED_UNITS+=("dio-hdf5")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
+ REQUIRED_UNITS+=("circle-eval-diff" "circle-interpreter")
+ REQUIRED_UNITS+=("circle-partitioner" "circle-operator")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+ REQUIRED_UNITS+=("dalgona")
+ REQUIRED_UNITS+=("visq")
+
+ # Dependent modules needed for build
+ REQUIRED_UNITS+=("circlechef")
+ REQUIRED_UNITS+=("circle-verify")
+
+ NPROC=$(cat /proc/cpuinfo | grep -c processor)
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -G "MSYS Makefiles" \
+ -DUSE_PROTOBUF_LEGACY_IMPORT=ON \
+ -DCMAKE_EXE_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DCMAKE_SHARED_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DENABLE_TEST=OFF \
+ -DDOWNLOAD_GTEST=OFF \
+ -DBUILD_GTEST=OFF \
+ -DCMAKE_C_COMPILER=gcc \
+ -DCMAKE_CXX_COMPILER=g++ \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ # Install libraries to bin/ for Windows release
+ mv ${NNCC_INSTALL_PREFIX}/lib/*.dll ${NNCC_INSTALL_PREFIX}/bin
+ rm -rf ${NNCC_INSTALL_PREFIX}/lib
+
+ install -t "${NNPKG_INSTALL_PREFIX}/bin" -D \
+ "${NNAS_PROJECT_PATH}/tools/nnpackage_tool/model2nnpkg/model2nnpkg.sh"
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.20220323" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+
+ # Though you have to install tensorflow to run 'tf2tfliteV2',
+ # tensorflow can't be installed in mingw. First, You can install tensorflow
+ # from Window native CMD(run as administrator) with python virtual environment.
+ # And, you must copy it to "${NNAS_INSTALL_PREFIX}/bin/venv"
+}
diff --git a/infra/packaging/preset/20221125 b/infra/packaging/preset/20221125
new file mode 100644
index 000000000..d798087ec
--- /dev/null
+++ b/infra/packaging/preset/20221125
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+# NOTE purpose of this file is static analysis only
+# new official preset will be added when new programs are ready
+
+PRESET="20221125"
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite280" "mio-circle04")
+ # Data I/O
+ REQUIRED_UNITS+=("dio-hdf5")
+ # Compute
+ REQUIRED_UNITS+=("luci-compute")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Python interface for circle schema
+ REQUIRED_UNITS+=("pics")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
+ REQUIRED_UNITS+=("circle-eval-diff" "circle-interpreter")
+ REQUIRED_UNITS+=("circle-partitioner" "circle-operator")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+ REQUIRED_UNITS+=("dalgona")
+ REQUIRED_UNITS+=("visq")
+ REQUIRED_UNITS+=("circle-opselector")
+
+ # Dependent modules needed for build
+ REQUIRED_UNITS+=("circlechef")
+ REQUIRED_UNITS+=("circle-verify")
+
+ NPROC=${NPROC:-$(cat /proc/cpuinfo | grep -c processor)}
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+}
diff --git a/infra/packaging/preset/20221125_windows b/infra/packaging/preset/20221125_windows
new file mode 100644
index 000000000..75c64260a
--- /dev/null
+++ b/infra/packaging/preset/20221125_windows
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+PRESET="20221125"
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite280" "mio-circle04")
+ # Data I/O
+ REQUIRED_UNITS+=("dio-hdf5")
+ # Compute
+ REQUIRED_UNITS+=("luci-compute")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Python interface for circle schema
+ REQUIRED_UNITS+=("pics")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5")
+ REQUIRED_UNITS+=("circle-eval-diff" "circle-interpreter")
+ REQUIRED_UNITS+=("circle-partitioner" "circle-operator")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+ REQUIRED_UNITS+=("dalgona")
+ REQUIRED_UNITS+=("visq")
+
+ # Dependent modules needed for build
+ REQUIRED_UNITS+=("circlechef")
+ REQUIRED_UNITS+=("circle-verify")
+
+ NPROC=$(cat /proc/cpuinfo | grep -c processor)
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -G "MSYS Makefiles" \
+ -DUSE_PROTOBUF_LEGACY_IMPORT=ON \
+ -DCMAKE_EXE_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DCMAKE_SHARED_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DENABLE_TEST=OFF \
+ -DDOWNLOAD_GTEST=OFF \
+ -DBUILD_GTEST=OFF \
+ -DCMAKE_C_COMPILER=gcc \
+ -DCMAKE_CXX_COMPILER=g++ \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ # Install libraries to bin/ for Windows release
+ mv ${NNCC_INSTALL_PREFIX}/lib/*.dll ${NNCC_INSTALL_PREFIX}/bin
+ rm -rf ${NNCC_INSTALL_PREFIX}/lib
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+
+ # Though you have to install tensorflow to run 'tf2tfliteV2',
+ # tensorflow can't be installed in mingw. First, You can install tensorflow
+ # from Window native CMD(run as administrator) with python virtual environment.
+ # And, you must copy it to "${NNAS_INSTALL_PREFIX}/bin/venv"
+}
diff --git a/infra/packaging/preset/20230413 b/infra/packaging/preset/20230413
new file mode 100644
index 000000000..85ce6cbc6
--- /dev/null
+++ b/infra/packaging/preset/20230413
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+# NOTE purpose of this file is static analysis only
+# new official preset will be added when new programs are ready
+
+PRESET="20230413"
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite280" "mio-circle05" "mio-tflite2121" "mio-circle06")
+ # Data I/O
+ REQUIRED_UNITS+=("dio-hdf5")
+ # Compute
+ REQUIRED_UNITS+=("luci-compute")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Python interface for circle schema
+ REQUIRED_UNITS+=("pics")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5" "circle-mpqsolver")
+ REQUIRED_UNITS+=("circle-eval-diff" "circle-interpreter")
+ REQUIRED_UNITS+=("circle-partitioner" "circle-operator")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+ REQUIRED_UNITS+=("dalgona")
+ REQUIRED_UNITS+=("visq")
+ REQUIRED_UNITS+=("circle-opselector")
+
+ # Dependent modules needed for build
+ REQUIRED_UNITS+=("circlechef")
+ REQUIRED_UNITS+=("circle-verify")
+
+ NPROC=${NPROC:-$(cat /proc/cpuinfo | grep -c processor)}
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+}
diff --git a/infra/packaging/preset/20230413_windows b/infra/packaging/preset/20230413_windows
new file mode 100644
index 000000000..8015de86d
--- /dev/null
+++ b/infra/packaging/preset/20230413_windows
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+PRESET="20230413"
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite280" "mio-circle05" "mio-tflite2121" "mio-circle06")
+ # Data I/O
+ REQUIRED_UNITS+=("dio-hdf5")
+ # Compute
+ REQUIRED_UNITS+=("luci-compute")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Python interface for circle schema
+ REQUIRED_UNITS+=("pics")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5" "circle-mpqsolver")
+ REQUIRED_UNITS+=("circle-eval-diff" "circle-interpreter")
+ REQUIRED_UNITS+=("circle-partitioner" "circle-operator")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+ REQUIRED_UNITS+=("dalgona")
+ REQUIRED_UNITS+=("visq")
+
+ # Dependent modules needed for build
+ REQUIRED_UNITS+=("circlechef")
+ REQUIRED_UNITS+=("circle-verify")
+
+ NPROC=$(cat /proc/cpuinfo | grep -c processor)
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -G "MSYS Makefiles" \
+ -DUSE_PROTOBUF_LEGACY_IMPORT=ON \
+ -DCMAKE_EXE_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DCMAKE_SHARED_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DENABLE_TEST=OFF \
+ -DDOWNLOAD_GTEST=OFF \
+ -DBUILD_GTEST=OFF \
+ -DCMAKE_C_COMPILER=gcc \
+ -DCMAKE_CXX_COMPILER=g++ \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ # Install libraries to bin/ for Windows release
+ mv ${NNCC_INSTALL_PREFIX}/lib/*.dll ${NNCC_INSTALL_PREFIX}/bin
+ rm -rf ${NNCC_INSTALL_PREFIX}/lib
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+
+ # Though you have to install tensorflow to run 'tf2tfliteV2',
+ # tensorflow can't be installed in mingw. First, You can install tensorflow
+ # from Window native CMD(run as administrator) with python virtual environment.
+ # And, you must copy it to "${NNAS_INSTALL_PREFIX}/bin/venv"
+}
diff --git a/infra/packaging/preset/20230907 b/infra/packaging/preset/20230907
new file mode 100644
index 000000000..44bc4e00f
--- /dev/null
+++ b/infra/packaging/preset/20230907
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+# NOTE purpose of this file is static analysis only
+# new official preset will be added when new programs are ready
+
+PRESET="20230907"
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite2121" "mio-circle06")
+ # Data I/O
+ REQUIRED_UNITS+=("dio-hdf5")
+ # Compute
+ REQUIRED_UNITS+=("luci-compute")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Python interface for circle schema
+ REQUIRED_UNITS+=("pics")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5" "circle-mpqsolver")
+ REQUIRED_UNITS+=("circle-eval-diff" "circle-interpreter")
+ REQUIRED_UNITS+=("circle-partitioner" "circle-operator")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+ REQUIRED_UNITS+=("dalgona")
+ REQUIRED_UNITS+=("visq")
+ REQUIRED_UNITS+=("circle-opselector")
+
+ # Dependent modules needed for build
+ REQUIRED_UNITS+=("circlechef")
+ REQUIRED_UNITS+=("circle-verify")
+
+ NPROC=${NPROC:-$(cat /proc/cpuinfo | grep -c processor)}
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+}
diff --git a/infra/packaging/preset/20230907_windows b/infra/packaging/preset/20230907_windows
new file mode 100644
index 000000000..5dcb36ab0
--- /dev/null
+++ b/infra/packaging/preset/20230907_windows
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+PRESET="20230907"
+
+function preset_configure()
+{
+ REQUIRED_UNITS=()
+ # Common Libraries
+ REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+ REQUIRED_UNITS+=("oops" "pepper-assert" "pepper-csv2vec" "foder" "crew")
+ REQUIRED_UNITS+=("souschef")
+ REQUIRED_UNITS+=("safemain")
+ REQUIRED_UNITS+=("arser")
+ REQUIRED_UNITS+=("vconone")
+ # Hermes Logging Framework
+ REQUIRED_UNITS+=("hermes" "hermes-std")
+ # loco IR and related utilities
+ REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+ # Flatbuffer I/O
+ REQUIRED_UNITS+=("mio-tflite2121" "mio-circle06")
+ # Data I/O
+ REQUIRED_UNITS+=("dio-hdf5")
+ # Compute
+ REQUIRED_UNITS+=("luci-compute")
+ # Circle compiler library (.circle -> .circle)
+ REQUIRED_UNITS+=("luci")
+ # Python interface for circle schema
+ REQUIRED_UNITS+=("pics")
+ # Tools
+ REQUIRED_UNITS+=("tflite2circle" "circle2circle" "tflchef")
+ REQUIRED_UNITS+=("circle-tensordump" "circledump")
+ REQUIRED_UNITS+=("tf2tfliteV2" "luci-interpreter")
+ REQUIRED_UNITS+=("luci-eval-driver")
+ REQUIRED_UNITS+=("record-minmax" "circle-quantizer" "rawdata2hdf5" "circle-mpqsolver")
+ REQUIRED_UNITS+=("circle-eval-diff" "circle-interpreter")
+ REQUIRED_UNITS+=("circle-partitioner" "circle-operator")
+ REQUIRED_UNITS+=("one-cmds")
+ REQUIRED_UNITS+=("bcq-tools")
+ REQUIRED_UNITS+=("dalgona")
+ REQUIRED_UNITS+=("visq")
+
+ # Dependent modules needed for build
+ REQUIRED_UNITS+=("circlechef")
+ REQUIRED_UNITS+=("circle-verify")
+
+ NPROC=$(cat /proc/cpuinfo | grep -c processor)
+
+ # TODO Use "nncc configure" and "nncc build"
+ cmake \
+ -G "MSYS Makefiles" \
+ -DUSE_PROTOBUF_LEGACY_IMPORT=ON \
+ -DCMAKE_EXE_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DCMAKE_SHARED_LINKER_FLAGS="-Wl,--allow-multiple-definition" \
+ -DENABLE_TEST=OFF \
+ -DDOWNLOAD_GTEST=OFF \
+ -DBUILD_GTEST=OFF \
+ -DCMAKE_C_COMPILER=gcc \
+ -DCMAKE_CXX_COMPILER=g++ \
+ -DCMAKE_INSTALL_PREFIX="${NNCC_INSTALL_PREFIX}" \
+ -DCMAKE_BUILD_TYPE=release \
+ -DBUILD_WHITELIST=$(join_by ";" "${REQUIRED_UNITS[@]}") \
+ -DEXTERNALS_BUILD_THREADS=$((NPROC/2)) \
+ ${EXTRA_OPTIONS[@]} \
+ "${NNAS_PROJECT_PATH}/infra/nncc"
+}
+
+function preset_install()
+{
+ # Install libraries to bin/ for Windows release
+ mv ${NNCC_INSTALL_PREFIX}/lib/*.dll ${NNCC_INSTALL_PREFIX}/bin
+ rm -rf ${NNCC_INSTALL_PREFIX}/lib
+
+ # Install tf2nnpkg
+ install -T -m 755 -D "${SCRIPT_PATH}/res/tf2nnpkg.${PRESET}" "${NNAS_INSTALL_PREFIX}/bin/tf2nnpkg"
+
+ # Though you have to install tensorflow to run 'tf2tfliteV2',
+ # tensorflow can't be installed in mingw. First, You can install tensorflow
+ # from Window native CMD(run as administrator) with python virtual environment.
+ # And, you must copy it to "${NNAS_INSTALL_PREFIX}/bin/venv"
+}
diff --git a/infra/packaging/res/tf2nnpkg.20200630 b/infra/packaging/res/tf2nnpkg.20200630
index 7846fd388..b7091541a 100644
--- a/infra/packaging/res/tf2nnpkg.20200630
+++ b/infra/packaging/res/tf2nnpkg.20200630
@@ -92,16 +92,39 @@ OUTPUT=$(awk -F, '/^output/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' '
INPUT_SHAPES=$(grep ^input ${INFO_FILE} | cut -d "[" -f2 | cut -d "]" -f1 | tr -d ' ' | xargs | tr ' ' ':')
+# Generate BCQ information metadata
+# If model has no BCQ information or invalid information, pb file is not changed.
+"${ROOT}/bin/generate_bcq_metadata" \
+--input_path "${GRAPHDEF_FILE}" \
+--output_path "${TMPDIR}/${MODEL_NAME}_withmeta.pb" \
+--output_arrays "${OUTPUT}"
+
+# Generate BCQ information nodes as output_arrays
+# If model has no BCQ information, output_arrays would be empty.
+"${ROOT}/bin/generate_bcq_output_arrays" \
+--input_path "${TMPDIR}/${MODEL_NAME}_withmeta.pb" \
+--metadata_path "${TMPDIR}/${MODEL_NAME}_metadata_arrays.txt" \
+--output_arrays_path "${TMPDIR}/${MODEL_NAME}_output_arrays.txt"
+
# generate tflite file
-python "${ROOT}/bin/tf2tfliteV2.py" ${TF_INTERFACE} --input_path ${GRAPHDEF_FILE} \
---output_path "${TMPDIR}/${MODEL_NAME}.tflite" \
---input_arrays ${INPUT} --input_shapes ${INPUT_SHAPES} \
---output_arrays ${OUTPUT}
+TF2TFLITE_CONVERT_SCRIPT="python ${ROOT}/bin/tf2tfliteV2.py ${TF_INTERFACE} "
+TF2TFLITE_CONVERT_SCRIPT+="--input_path ${TMPDIR}/${MODEL_NAME}_withmeta.pb "
+TF2TFLITE_CONVERT_SCRIPT+="--input_arrays ${INPUT} "
+TF2TFLITE_CONVERT_SCRIPT+="--output_path ${TMPDIR}/${MODEL_NAME}.tflite "
+TF2TFLITE_CONVERT_SCRIPT+="--output_arrays "
+TF2TFLITE_CONVERT_SCRIPT+="$(cat ${TMPDIR}/${MODEL_NAME}_metadata_arrays.txt)"
+TF2TFLITE_CONVERT_SCRIPT+="${OUTPUT}"
+TF2TFLITE_CONVERT_SCRIPT+="$(cat ${TMPDIR}/${MODEL_NAME}_output_arrays.txt) "
+if [ ! -z ${INPUT_SHAPES} ]; then
+ TF2TFLITE_CONVERT_SCRIPT+="--input_shapes ${INPUT_SHAPES} "
+fi
+
+${TF2TFLITE_CONVERT_SCRIPT}
# convert .tflite to .circle
"${ROOT}/bin/tflite2circle" "${TMPDIR}/${MODEL_NAME}.tflite" "${TMPDIR}/${MODEL_NAME}.tmp.circle"
# optimize
-"${ROOT}/bin/circle2circle" --all "${TMPDIR}/${MODEL_NAME}.tmp.circle" "${TMPDIR}/${MODEL_NAME}.circle"
+"${ROOT}/bin/circle2circle" --O1 "${TMPDIR}/${MODEL_NAME}.tmp.circle" "${TMPDIR}/${MODEL_NAME}.circle"
"${ROOT}/bin/model2nnpkg.sh" -o "${OUTPUT_DIR}" "${TMPDIR}/${MODEL_NAME}.circle"
diff --git a/infra/packaging/res/tf2nnpkg.20210406 b/infra/packaging/res/tf2nnpkg.20210406
new file mode 100644
index 000000000..0d44818a1
--- /dev/null
+++ b/infra/packaging/res/tf2nnpkg.20210406
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+set -e
+
+ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
+
+command_exists() {
+ if [ "$#" -le 0 ]; then
+ return 1
+ fi
+ command -v "$@" > /dev/null 2>&1
+}
+
+usage()
+{
+ echo "Convert TensorFlow model to nnpackage."
+ echo "Usage: tf2nnpkg"
+ echo " --info <path/to/info>"
+ echo " --graphdef <path/to/pb>"
+ echo " -o <path/to/nnpkg/directory>"
+ echo " --v2 (optional) Use TF 2.x interface"
+ exit 255
+}
+
+TF_INTERFACE="--v1"
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '--info')
+ export INFO_FILE="$2"
+ shift 2
+ ;;
+ '--graphdef')
+ export GRAPHDEF_FILE="$2"
+ shift 2
+ ;;
+ '-o')
+ export OUTPUT_DIR="$2"
+ shift 2
+ ;;
+ '--v2')
+ TF_INTERFACE="--v2"
+ shift
+ ;;
+ *)
+ echo "${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${GRAPHDEF_FILE} ] || [ ! -e ${GRAPHDEF_FILE} ]; then
+ echo "pb is not found. Please check --graphdef is correct."
+ exit 2
+fi
+
+if [ -z ${INFO_FILE} ] || [ ! -e ${INFO_FILE} ]; then
+ echo "info is not found. Please check --info is correct."
+ exit 2
+fi
+
+if [ -z ${OUTPUT_DIR} ]; then
+ echo "output directory is not specifed. Please check -o is correct.."
+ exit 2
+fi
+
+FILE_BASE=$(basename ${GRAPHDEF_FILE})
+MODEL_NAME="${FILE_BASE%.*}"
+TMPDIR=$(mktemp -d)
+trap "{ rm -rf $TMPDIR; }" EXIT
+
+# activate python virtual environment
+VIRTUALENV_LINUX="${ROOT}/bin/venv/bin/activate"
+VIRTUALENV_WINDOWS="${ROOT}/bin/venv/Scripts/activate"
+
+if [ -e ${VIRTUALENV_LINUX} ]; then
+ source ${VIRTUALENV_LINUX}
+elif [ -e ${VIRTUALENV_WINDOWS} ]; then
+ source ${VIRTUALENV_WINDOWS}
+fi
+
+# parse inputs, outputs from info file
+INPUT=$(awk -F, '/^input/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+OUTPUT=$(awk -F, '/^output/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+
+INPUT_SHAPES=$(grep ^input ${INFO_FILE} | cut -d "[" -f2 | cut -d "]" -f1 | tr -d ' ' | xargs | tr ' ' ':')
+
+ONE_IMPORT_BCQ_SCRIPT="${ROOT}/bin/one-import-bcq ${TF_INTERFACE} "
+ONE_IMPORT_BCQ_SCRIPT+="-i ${GRAPHDEF_FILE} "
+ONE_IMPORT_BCQ_SCRIPT+="-o ${TMPDIR}/${MODEL_NAME}.tmp.circle "
+ONE_IMPORT_BCQ_SCRIPT+="-I ${INPUT} "
+ONE_IMPORT_BCQ_SCRIPT+="-O ${OUTPUT} "
+if [ ! -z ${INPUT_SHAPES} ]; then
+ ONE_IMPORT_BCQ_SCRIPT+="-s ${INPUT_SHAPES} "
+fi
+
+${ONE_IMPORT_BCQ_SCRIPT}
+
+# optimize
+"${ROOT}/bin/circle2circle" --O1 "${TMPDIR}/${MODEL_NAME}.tmp.circle" "${TMPDIR}/${MODEL_NAME}.circle"
+
+"${ROOT}/bin/model2nnpkg.sh" -o "${OUTPUT_DIR}" "${TMPDIR}/${MODEL_NAME}.circle"
diff --git a/infra/packaging/res/tf2nnpkg.20210706 b/infra/packaging/res/tf2nnpkg.20210706
new file mode 100644
index 000000000..0d44818a1
--- /dev/null
+++ b/infra/packaging/res/tf2nnpkg.20210706
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+set -e
+
+ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
+
+command_exists() {
+ if [ "$#" -le 0 ]; then
+ return 1
+ fi
+ command -v "$@" > /dev/null 2>&1
+}
+
+usage()
+{
+ echo "Convert TensorFlow model to nnpackage."
+ echo "Usage: tf2nnpkg"
+ echo " --info <path/to/info>"
+ echo " --graphdef <path/to/pb>"
+ echo " -o <path/to/nnpkg/directory>"
+ echo " --v2 (optional) Use TF 2.x interface"
+ exit 255
+}
+
+TF_INTERFACE="--v1"
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '--info')
+ export INFO_FILE="$2"
+ shift 2
+ ;;
+ '--graphdef')
+ export GRAPHDEF_FILE="$2"
+ shift 2
+ ;;
+ '-o')
+ export OUTPUT_DIR="$2"
+ shift 2
+ ;;
+ '--v2')
+ TF_INTERFACE="--v2"
+ shift
+ ;;
+ *)
+ echo "${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${GRAPHDEF_FILE} ] || [ ! -e ${GRAPHDEF_FILE} ]; then
+ echo "pb is not found. Please check --graphdef is correct."
+ exit 2
+fi
+
+if [ -z ${INFO_FILE} ] || [ ! -e ${INFO_FILE} ]; then
+ echo "info is not found. Please check --info is correct."
+ exit 2
+fi
+
+if [ -z ${OUTPUT_DIR} ]; then
+ echo "output directory is not specifed. Please check -o is correct.."
+ exit 2
+fi
+
+FILE_BASE=$(basename ${GRAPHDEF_FILE})
+MODEL_NAME="${FILE_BASE%.*}"
+TMPDIR=$(mktemp -d)
+trap "{ rm -rf $TMPDIR; }" EXIT
+
+# activate python virtual environment
+VIRTUALENV_LINUX="${ROOT}/bin/venv/bin/activate"
+VIRTUALENV_WINDOWS="${ROOT}/bin/venv/Scripts/activate"
+
+if [ -e ${VIRTUALENV_LINUX} ]; then
+ source ${VIRTUALENV_LINUX}
+elif [ -e ${VIRTUALENV_WINDOWS} ]; then
+ source ${VIRTUALENV_WINDOWS}
+fi
+
+# parse inputs, outputs from info file
+INPUT=$(awk -F, '/^input/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+OUTPUT=$(awk -F, '/^output/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+
+INPUT_SHAPES=$(grep ^input ${INFO_FILE} | cut -d "[" -f2 | cut -d "]" -f1 | tr -d ' ' | xargs | tr ' ' ':')
+
+ONE_IMPORT_BCQ_SCRIPT="${ROOT}/bin/one-import-bcq ${TF_INTERFACE} "
+ONE_IMPORT_BCQ_SCRIPT+="-i ${GRAPHDEF_FILE} "
+ONE_IMPORT_BCQ_SCRIPT+="-o ${TMPDIR}/${MODEL_NAME}.tmp.circle "
+ONE_IMPORT_BCQ_SCRIPT+="-I ${INPUT} "
+ONE_IMPORT_BCQ_SCRIPT+="-O ${OUTPUT} "
+if [ ! -z ${INPUT_SHAPES} ]; then
+ ONE_IMPORT_BCQ_SCRIPT+="-s ${INPUT_SHAPES} "
+fi
+
+${ONE_IMPORT_BCQ_SCRIPT}
+
+# optimize
+"${ROOT}/bin/circle2circle" --O1 "${TMPDIR}/${MODEL_NAME}.tmp.circle" "${TMPDIR}/${MODEL_NAME}.circle"
+
+"${ROOT}/bin/model2nnpkg.sh" -o "${OUTPUT_DIR}" "${TMPDIR}/${MODEL_NAME}.circle"
diff --git a/infra/packaging/res/tf2nnpkg.20210910 b/infra/packaging/res/tf2nnpkg.20210910
new file mode 100644
index 000000000..0d44818a1
--- /dev/null
+++ b/infra/packaging/res/tf2nnpkg.20210910
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+set -e
+
+ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
+
+command_exists() {
+ if [ "$#" -le 0 ]; then
+ return 1
+ fi
+ command -v "$@" > /dev/null 2>&1
+}
+
+usage()
+{
+ echo "Convert TensorFlow model to nnpackage."
+ echo "Usage: tf2nnpkg"
+ echo " --info <path/to/info>"
+ echo " --graphdef <path/to/pb>"
+ echo " -o <path/to/nnpkg/directory>"
+ echo " --v2 (optional) Use TF 2.x interface"
+ exit 255
+}
+
+TF_INTERFACE="--v1"
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '--info')
+ export INFO_FILE="$2"
+ shift 2
+ ;;
+ '--graphdef')
+ export GRAPHDEF_FILE="$2"
+ shift 2
+ ;;
+ '-o')
+ export OUTPUT_DIR="$2"
+ shift 2
+ ;;
+ '--v2')
+ TF_INTERFACE="--v2"
+ shift
+ ;;
+ *)
+ echo "${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${GRAPHDEF_FILE} ] || [ ! -e ${GRAPHDEF_FILE} ]; then
+ echo "pb is not found. Please check --graphdef is correct."
+ exit 2
+fi
+
+if [ -z ${INFO_FILE} ] || [ ! -e ${INFO_FILE} ]; then
+ echo "info is not found. Please check --info is correct."
+ exit 2
+fi
+
+if [ -z ${OUTPUT_DIR} ]; then
+ echo "output directory is not specifed. Please check -o is correct.."
+ exit 2
+fi
+
+FILE_BASE=$(basename ${GRAPHDEF_FILE})
+MODEL_NAME="${FILE_BASE%.*}"
+TMPDIR=$(mktemp -d)
+trap "{ rm -rf $TMPDIR; }" EXIT
+
+# activate python virtual environment
+VIRTUALENV_LINUX="${ROOT}/bin/venv/bin/activate"
+VIRTUALENV_WINDOWS="${ROOT}/bin/venv/Scripts/activate"
+
+if [ -e ${VIRTUALENV_LINUX} ]; then
+ source ${VIRTUALENV_LINUX}
+elif [ -e ${VIRTUALENV_WINDOWS} ]; then
+ source ${VIRTUALENV_WINDOWS}
+fi
+
+# parse inputs, outputs from info file
+INPUT=$(awk -F, '/^input/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+OUTPUT=$(awk -F, '/^output/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+
+INPUT_SHAPES=$(grep ^input ${INFO_FILE} | cut -d "[" -f2 | cut -d "]" -f1 | tr -d ' ' | xargs | tr ' ' ':')
+
+ONE_IMPORT_BCQ_SCRIPT="${ROOT}/bin/one-import-bcq ${TF_INTERFACE} "
+ONE_IMPORT_BCQ_SCRIPT+="-i ${GRAPHDEF_FILE} "
+ONE_IMPORT_BCQ_SCRIPT+="-o ${TMPDIR}/${MODEL_NAME}.tmp.circle "
+ONE_IMPORT_BCQ_SCRIPT+="-I ${INPUT} "
+ONE_IMPORT_BCQ_SCRIPT+="-O ${OUTPUT} "
+if [ ! -z ${INPUT_SHAPES} ]; then
+ ONE_IMPORT_BCQ_SCRIPT+="-s ${INPUT_SHAPES} "
+fi
+
+${ONE_IMPORT_BCQ_SCRIPT}
+
+# optimize
+"${ROOT}/bin/circle2circle" --O1 "${TMPDIR}/${MODEL_NAME}.tmp.circle" "${TMPDIR}/${MODEL_NAME}.circle"
+
+"${ROOT}/bin/model2nnpkg.sh" -o "${OUTPUT_DIR}" "${TMPDIR}/${MODEL_NAME}.circle"
diff --git a/infra/packaging/res/tf2nnpkg.20220323 b/infra/packaging/res/tf2nnpkg.20220323
new file mode 100644
index 000000000..5f43b2386
--- /dev/null
+++ b/infra/packaging/res/tf2nnpkg.20220323
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+set -e
+
+ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
+
+command_exists() {
+ if [ "$#" -le 0 ]; then
+ return 1
+ fi
+ command -v "$@" > /dev/null 2>&1
+}
+
+usage()
+{
+ echo "Convert TensorFlow model to nnpackage."
+ echo "Usage: tf2nnpkg"
+ echo " --info <path/to/info>"
+ echo " --graphdef <path/to/pb>"
+ echo " -o <path/to/nnpkg/directory>"
+ echo " --v2 (optional) Use TF 2.x interface"
+ exit 255
+}
+
+TF_INTERFACE="--v1"
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '--info')
+ export INFO_FILE="$2"
+ shift 2
+ ;;
+ '--graphdef')
+ export GRAPHDEF_FILE="$2"
+ shift 2
+ ;;
+ '-o')
+ export OUTPUT_DIR="$2"
+ shift 2
+ ;;
+ '--v2')
+ TF_INTERFACE="--v2"
+ shift
+ ;;
+ *)
+ echo "${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${GRAPHDEF_FILE} ] || [ ! -e ${GRAPHDEF_FILE} ]; then
+ echo "pb is not found. Please check --graphdef is correct."
+ exit 2
+fi
+
+if [ -z ${INFO_FILE} ] || [ ! -e ${INFO_FILE} ]; then
+ echo "info is not found. Please check --info is correct."
+ exit 2
+fi
+
+if [ -z ${OUTPUT_DIR} ]; then
+ echo "output directory is not specifed. Please check -o is correct.."
+ exit 2
+fi
+
+FILE_BASE=$(basename ${GRAPHDEF_FILE})
+MODEL_NAME="${FILE_BASE%.*}"
+TMPDIR=$(mktemp -d)
+trap "{ rm -rf $TMPDIR; }" EXIT
+
+# activate python virtual environment
+VIRTUALENV_LINUX="${ROOT}/bin/venv/bin/activate"
+VIRTUALENV_WINDOWS="${ROOT}/bin/venv/Scripts/activate"
+
+if [ -e ${VIRTUALENV_LINUX} ]; then
+ source ${VIRTUALENV_LINUX}
+elif [ -e ${VIRTUALENV_WINDOWS} ]; then
+ source ${VIRTUALENV_WINDOWS}
+fi
+
+# parse inputs, outputs from info file
+INPUT=$(awk -F, '/^input/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+OUTPUT=$(awk -F, '/^output/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+
+INPUT_SHAPES=$(grep ^input ${INFO_FILE} | cut -d "[" -f2 | cut -d "]" -f1 | tr -d ' ' | xargs | tr ' ' ':')
+
+ONE_IMPORT_BCQ_SCRIPT="${ROOT}/bin/one-import-bcq ${TF_INTERFACE} "
+ONE_IMPORT_BCQ_SCRIPT+="-i ${GRAPHDEF_FILE} "
+ONE_IMPORT_BCQ_SCRIPT+="-o ${TMPDIR}/${MODEL_NAME}.tmp.circle "
+ONE_IMPORT_BCQ_SCRIPT+="-I ${INPUT} "
+ONE_IMPORT_BCQ_SCRIPT+="-O ${OUTPUT} "
+if [ ! -z ${INPUT_SHAPES} ]; then
+ ONE_IMPORT_BCQ_SCRIPT+="-s ${INPUT_SHAPES} "
+fi
+
+${ONE_IMPORT_BCQ_SCRIPT}
+
+# optimize
+"${ROOT}/bin/circle2circle" --resolve_customop_add "${TMPDIR}/${MODEL_NAME}.tmp.circle" "${TMPDIR}/${MODEL_NAME}.circle"
+
+"${ROOT}/bin/model2nnpkg.sh" -o "${OUTPUT_DIR}" "${TMPDIR}/${MODEL_NAME}.circle"
diff --git a/infra/packaging/res/tf2nnpkg.20221125 b/infra/packaging/res/tf2nnpkg.20221125
new file mode 100644
index 000000000..a7446e6fe
--- /dev/null
+++ b/infra/packaging/res/tf2nnpkg.20221125
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+set -e
+
+ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
+
+command_exists() {
+ if [ "$#" -le 0 ]; then
+ return 1
+ fi
+ command -v "$@" > /dev/null 2>&1
+}
+
+usage()
+{
+ echo "Convert TensorFlow model to nnpackage."
+ echo "Usage: tf2nnpkg"
+ echo " --info <path/to/info>"
+ echo " --graphdef <path/to/pb>"
+ echo " -o <path/to/nnpkg/directory>"
+ echo " --v2 (optional) Use TF 2.x interface"
+ exit 255
+}
+
+TF_INTERFACE="--v1"
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '--info')
+ export INFO_FILE="$2"
+ shift 2
+ ;;
+ '--graphdef')
+ export GRAPHDEF_FILE="$2"
+ shift 2
+ ;;
+ '-o')
+ export OUTPUT_DIR="$2"
+ shift 2
+ ;;
+ '--v2')
+ TF_INTERFACE="--v2"
+ shift
+ ;;
+ *)
+ echo "${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${GRAPHDEF_FILE} ] || [ ! -e ${GRAPHDEF_FILE} ]; then
+ echo "pb is not found. Please check --graphdef is correct."
+ exit 2
+fi
+
+if [ -z ${INFO_FILE} ] || [ ! -e ${INFO_FILE} ]; then
+ echo "info is not found. Please check --info is correct."
+ exit 2
+fi
+
+if [ -z ${OUTPUT_DIR} ]; then
+ echo "output directory is not specifed. Please check -o is correct.."
+ exit 2
+fi
+
+FILE_BASE=$(basename ${GRAPHDEF_FILE})
+MODEL_NAME="${FILE_BASE%.*}"
+TMPDIR=$(mktemp -d)
+trap "{ rm -rf $TMPDIR; }" EXIT
+
+# activate python virtual environment
+VIRTUALENV_LINUX="${ROOT}/bin/venv/bin/activate"
+VIRTUALENV_WINDOWS="${ROOT}/bin/venv/Scripts/activate"
+
+if [ -e ${VIRTUALENV_LINUX} ]; then
+ source ${VIRTUALENV_LINUX}
+elif [ -e ${VIRTUALENV_WINDOWS} ]; then
+ source ${VIRTUALENV_WINDOWS}
+fi
+
+# parse inputs, outputs from info file
+INPUT=$(awk -F, '/^input/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+OUTPUT=$(awk -F, '/^output/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+
+INPUT_SHAPES=$(grep ^input ${INFO_FILE} | cut -d "[" -f2 | cut -d "]" -f1 | tr -d ' ' | xargs | tr ' ' ':')
+
+ONE_IMPORT_BCQ_SCRIPT="${ROOT}/bin/one-import-bcq ${TF_INTERFACE} "
+ONE_IMPORT_BCQ_SCRIPT+="-i ${GRAPHDEF_FILE} "
+ONE_IMPORT_BCQ_SCRIPT+="-o ${TMPDIR}/${MODEL_NAME}.tmp.circle "
+ONE_IMPORT_BCQ_SCRIPT+="-I ${INPUT} "
+ONE_IMPORT_BCQ_SCRIPT+="-O ${OUTPUT} "
+if [ ! -z ${INPUT_SHAPES} ]; then
+ ONE_IMPORT_BCQ_SCRIPT+="-s ${INPUT_SHAPES} "
+fi
+
+${ONE_IMPORT_BCQ_SCRIPT}
+
+# optimize
+"${ROOT}/bin/circle2circle" --resolve_customop_add "${TMPDIR}/${MODEL_NAME}.tmp.circle" "${TMPDIR}/${MODEL_NAME}.circle"
+
+"${ROOT}/bin/model2nnpkg" -o "${OUTPUT_DIR}" -m "${TMPDIR}/${MODEL_NAME}.circle"
diff --git a/infra/packaging/res/tf2nnpkg.20230413 b/infra/packaging/res/tf2nnpkg.20230413
new file mode 100644
index 000000000..a7446e6fe
--- /dev/null
+++ b/infra/packaging/res/tf2nnpkg.20230413
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+set -e
+
+ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
+
+command_exists() {
+ if [ "$#" -le 0 ]; then
+ return 1
+ fi
+ command -v "$@" > /dev/null 2>&1
+}
+
+usage()
+{
+ echo "Convert TensorFlow model to nnpackage."
+ echo "Usage: tf2nnpkg"
+ echo " --info <path/to/info>"
+ echo " --graphdef <path/to/pb>"
+ echo " -o <path/to/nnpkg/directory>"
+ echo " --v2 (optional) Use TF 2.x interface"
+ exit 255
+}
+
+TF_INTERFACE="--v1"
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '--info')
+ export INFO_FILE="$2"
+ shift 2
+ ;;
+ '--graphdef')
+ export GRAPHDEF_FILE="$2"
+ shift 2
+ ;;
+ '-o')
+ export OUTPUT_DIR="$2"
+ shift 2
+ ;;
+ '--v2')
+ TF_INTERFACE="--v2"
+ shift
+ ;;
+ *)
+ echo "${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${GRAPHDEF_FILE} ] || [ ! -e ${GRAPHDEF_FILE} ]; then
+ echo "pb is not found. Please check --graphdef is correct."
+ exit 2
+fi
+
+if [ -z ${INFO_FILE} ] || [ ! -e ${INFO_FILE} ]; then
+ echo "info is not found. Please check --info is correct."
+ exit 2
+fi
+
+if [ -z ${OUTPUT_DIR} ]; then
+ echo "output directory is not specifed. Please check -o is correct.."
+ exit 2
+fi
+
+FILE_BASE=$(basename ${GRAPHDEF_FILE})
+MODEL_NAME="${FILE_BASE%.*}"
+TMPDIR=$(mktemp -d)
+trap "{ rm -rf $TMPDIR; }" EXIT
+
+# activate python virtual environment
+VIRTUALENV_LINUX="${ROOT}/bin/venv/bin/activate"
+VIRTUALENV_WINDOWS="${ROOT}/bin/venv/Scripts/activate"
+
+if [ -e ${VIRTUALENV_LINUX} ]; then
+ source ${VIRTUALENV_LINUX}
+elif [ -e ${VIRTUALENV_WINDOWS} ]; then
+ source ${VIRTUALENV_WINDOWS}
+fi
+
+# parse inputs, outputs from info file
+INPUT=$(awk -F, '/^input/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+OUTPUT=$(awk -F, '/^output/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+
+INPUT_SHAPES=$(grep ^input ${INFO_FILE} | cut -d "[" -f2 | cut -d "]" -f1 | tr -d ' ' | xargs | tr ' ' ':')
+
+ONE_IMPORT_BCQ_SCRIPT="${ROOT}/bin/one-import-bcq ${TF_INTERFACE} "
+ONE_IMPORT_BCQ_SCRIPT+="-i ${GRAPHDEF_FILE} "
+ONE_IMPORT_BCQ_SCRIPT+="-o ${TMPDIR}/${MODEL_NAME}.tmp.circle "
+ONE_IMPORT_BCQ_SCRIPT+="-I ${INPUT} "
+ONE_IMPORT_BCQ_SCRIPT+="-O ${OUTPUT} "
+if [ ! -z ${INPUT_SHAPES} ]; then
+ ONE_IMPORT_BCQ_SCRIPT+="-s ${INPUT_SHAPES} "
+fi
+
+${ONE_IMPORT_BCQ_SCRIPT}
+
+# optimize
+"${ROOT}/bin/circle2circle" --resolve_customop_add "${TMPDIR}/${MODEL_NAME}.tmp.circle" "${TMPDIR}/${MODEL_NAME}.circle"
+
+"${ROOT}/bin/model2nnpkg" -o "${OUTPUT_DIR}" -m "${TMPDIR}/${MODEL_NAME}.circle"
diff --git a/infra/packaging/res/tf2nnpkg.20230907 b/infra/packaging/res/tf2nnpkg.20230907
new file mode 100644
index 000000000..a7446e6fe
--- /dev/null
+++ b/infra/packaging/res/tf2nnpkg.20230907
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+set -e
+
+ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
+
+command_exists() {
+ if [ "$#" -le 0 ]; then
+ return 1
+ fi
+ command -v "$@" > /dev/null 2>&1
+}
+
+usage()
+{
+ echo "Convert TensorFlow model to nnpackage."
+ echo "Usage: tf2nnpkg"
+ echo " --info <path/to/info>"
+ echo " --graphdef <path/to/pb>"
+ echo " -o <path/to/nnpkg/directory>"
+ echo " --v2 (optional) Use TF 2.x interface"
+ exit 255
+}
+
+TF_INTERFACE="--v1"
+
+# Parse command-line arguments
+#
+while [ "$#" -ne 0 ]; do
+ CUR="$1"
+
+ case $CUR in
+ '--help')
+ usage
+ ;;
+ '--info')
+ export INFO_FILE="$2"
+ shift 2
+ ;;
+ '--graphdef')
+ export GRAPHDEF_FILE="$2"
+ shift 2
+ ;;
+ '-o')
+ export OUTPUT_DIR="$2"
+ shift 2
+ ;;
+ '--v2')
+ TF_INTERFACE="--v2"
+ shift
+ ;;
+ *)
+ echo "${CUR}"
+ shift
+ ;;
+ esac
+done
+
+if [ -z ${GRAPHDEF_FILE} ] || [ ! -e ${GRAPHDEF_FILE} ]; then
+ echo "pb is not found. Please check --graphdef is correct."
+ exit 2
+fi
+
+if [ -z ${INFO_FILE} ] || [ ! -e ${INFO_FILE} ]; then
+ echo "info is not found. Please check --info is correct."
+ exit 2
+fi
+
+if [ -z ${OUTPUT_DIR} ]; then
+ echo "output directory is not specifed. Please check -o is correct.."
+ exit 2
+fi
+
+FILE_BASE=$(basename ${GRAPHDEF_FILE})
+MODEL_NAME="${FILE_BASE%.*}"
+TMPDIR=$(mktemp -d)
+trap "{ rm -rf $TMPDIR; }" EXIT
+
+# activate python virtual environment
+VIRTUALENV_LINUX="${ROOT}/bin/venv/bin/activate"
+VIRTUALENV_WINDOWS="${ROOT}/bin/venv/Scripts/activate"
+
+if [ -e ${VIRTUALENV_LINUX} ]; then
+ source ${VIRTUALENV_LINUX}
+elif [ -e ${VIRTUALENV_WINDOWS} ]; then
+ source ${VIRTUALENV_WINDOWS}
+fi
+
+# parse inputs, outputs from info file
+INPUT=$(awk -F, '/^input/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+OUTPUT=$(awk -F, '/^output/ { print $2 }' ${INFO_FILE} | cut -d: -f1 | tr -d ' ' | paste -d, -s)
+
+INPUT_SHAPES=$(grep ^input ${INFO_FILE} | cut -d "[" -f2 | cut -d "]" -f1 | tr -d ' ' | xargs | tr ' ' ':')
+
+ONE_IMPORT_BCQ_SCRIPT="${ROOT}/bin/one-import-bcq ${TF_INTERFACE} "
+ONE_IMPORT_BCQ_SCRIPT+="-i ${GRAPHDEF_FILE} "
+ONE_IMPORT_BCQ_SCRIPT+="-o ${TMPDIR}/${MODEL_NAME}.tmp.circle "
+ONE_IMPORT_BCQ_SCRIPT+="-I ${INPUT} "
+ONE_IMPORT_BCQ_SCRIPT+="-O ${OUTPUT} "
+if [ ! -z ${INPUT_SHAPES} ]; then
+ ONE_IMPORT_BCQ_SCRIPT+="-s ${INPUT_SHAPES} "
+fi
+
+${ONE_IMPORT_BCQ_SCRIPT}
+
+# optimize
+"${ROOT}/bin/circle2circle" --resolve_customop_add "${TMPDIR}/${MODEL_NAME}.tmp.circle" "${TMPDIR}/${MODEL_NAME}.circle"
+
+"${ROOT}/bin/model2nnpkg" -o "${OUTPUT_DIR}" -m "${TMPDIR}/${MODEL_NAME}.circle"
diff --git a/infra/scripts/build-tcm.sh b/infra/scripts/build-tcm.sh
index 38533c1f9..768cff762 100755
--- a/infra/scripts/build-tcm.sh
+++ b/infra/scripts/build-tcm.sh
@@ -2,13 +2,16 @@
#
# STEP 1
# Download latest TCM tool from
-# https://github.sec.samsung.net/RS-TCM/tca-standalone/releases/download/v0.0.8/tca-standalone-0.0.8.jar
+# https://github.sec.samsung.net/RS-TCM/tca-standalone/releases/download/1.0.2/tca-standalone-1.0.2.jar
#
# STEP 2
# Create symbolic link `./src` for source directory to be analyzed which has `.ahub` configuration.
#
# STEP 3
-# run this `build-tcm.sh` script.
+# run this script in `build-tcm.sh [test_target]` format.
+# ex) $ build_tcm.sh # to analyze both NN Runtime and NN Compiler
+# ex) $ build_tcm.sh NN_Runtime # to analyze NN Runtime only
+# ex) $ build_tcm.sh NN_Compiler # to analyze NN Compiler only
#
# See the following link for additional details.
# https://github.sec.samsung.net/RS-TCM/tca-standalone/wiki/Tutorials-CPP-Gtest
@@ -16,9 +19,10 @@
echo ${PROJECT_DIR:=${PWD}}
-java -jar $PROJECT_DIR/tca-standalone-0.0.8.jar \
+java -jar $PROJECT_DIR/tca-standalone-1.0.2.jar \
--outdir=$PROJECT_DIR/tcm-output \
--config=$PROJECT_DIR/src/.ahub/tcchecker-tca/config.yaml \
--local=$PROJECT_DIR/src \
--logfile=$PROJECT_DIR/tcm-output/tcm.log \
--debug
+ $@
diff --git a/infra/scripts/build_android_runtime_release.sh b/infra/scripts/build_android_runtime_release.sh
deleted file mode 100755
index fe933c648..000000000
--- a/infra/scripts/build_android_runtime_release.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return
-
-CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_PATH="$CURRENT_PATH/../../"
-
-# prepare pre-built armcompute library
-# android build requires pre-built armcompute library
-if [ ! -n "$EXT_ACL_FOLDER" ]; then
- echo "Please set EXT_ACL_FOLDER to use pre-built armcompute library"
- exit 1
-fi
-
-# prepare ndk
-if [ ! -n "$NDK_DIR" ]; then
- export NDK_DIR=$ROOT_PATH/tools/cross/ndk/r20/ndk
- echo "It will use default external path"
-fi
-
-export TARGET_OS=android
-export CROSS_BUILD=1
-make -f Makefile.template
diff --git a/infra/scripts/common.sh b/infra/scripts/common.sh
index a10aac271..0beaf6766 100755
--- a/infra/scripts/common.sh
+++ b/infra/scripts/common.sh
@@ -31,11 +31,11 @@ function CheckTestPrepared()
{
# Model download server setting
if [[ -z "${MODELFILE_SERVER}" ]]; then
- echo "[WARNING] Model file server is not set"
- echo " Try to use pre-downloaed model"
+ echo "Model file server is not set. Try to use default setting."
else
echo "Model Server: ${MODELFILE_SERVER}"
fi
+ $INSTALL_PATH/test/onert-test prepare-model
}
# $1: (required) backend
@@ -50,10 +50,10 @@ function TFLiteModelVerification()
export BACKENDS=$1
if [[ "$2" == "" ]]; then
- $INSTALL_PATH/test/onert-test verify-tflite --api=nnapi \
+ $INSTALL_PATH/test/onert-test verify-tflite \
--reportdir=$ROOT_PATH/$3
else
- $INSTALL_PATH/test/onert-test verify-tflite --api=nnapi \
+ $INSTALL_PATH/test/onert-test verify-tflite \
--list=$2 \
--reportdir=$ROOT_PATH/$3
fi
@@ -74,7 +74,7 @@ function NNAPIGTest()
# Backup original nnapi_gtest.skip
# TODO Pass skiplist to test-driver.sh
- SKIPLIST_FILE="${INSTALL_PATH}/unittest/nnapi_gtest.skip"
+ SKIPLIST_FILE="${INSTALL_PATH}/nnapi-gtest/nnapi_gtest.skip"
BACKUP_FILE="${SKIPLIST_FILE}.backup"
if [[ "$2" != "" ]]; then
cp ${SKIPLIST_FILE} ${BACKUP_FILE}
@@ -84,7 +84,7 @@ function NNAPIGTest()
export BACKENDS=$1
$INSTALL_PATH/test/onert-test unittest \
--reportdir=$ROOT_PATH/$3 \
- --unittestdir=$INSTALL_PATH/unittest
+ --unittestdir=$INSTALL_PATH/nnapi-gtest
unset BACKENDS
# TODO Pass skiplist to test-driver.sh
@@ -129,27 +129,3 @@ function NNPackageTest()
popd > /dev/null
}
-
-# $1: (required) backend
-# $2: (required) test list file relative path from nnfw root directory
-# pass empty string if there is no skiplist
-# $3: (required) relative path to report from nnfw root directory
-function TFLiteLoaderTest()
-{
- [[ $# -ne 3 ]] && echo "TFLiteLoaderTest: Invalid function argument setting" && exit 1
-
- pushd ${ROOT_PATH} > /dev/null
-
- export BACKENDS=$1
- if [[ "$2" == "" ]]; then
- $INSTALL_PATH/test/onert-test verify-tflite --api=loader \
- --reportdir=$ROOT_PATH/$3
- else
- $INSTALL_PATH/test/onert-test verify-tflite --api=loader \
- --list=$2 \
- --reportdir=$ROOT_PATH/$3
- fi
- unset BACKENDS
-
- popd > /dev/null
-}
diff --git a/infra/scripts/compiler_modules.sh b/infra/scripts/compiler_modules.sh
index a0323e0a0..8b361a7ea 100644
--- a/infra/scripts/compiler_modules.sh
+++ b/infra/scripts/compiler_modules.sh
@@ -1,23 +1,38 @@
#!/bin/bash
+# NOTE this file is sourced from, for the purpose of
+# - configure_compiler_coverage.sh: to get test coverage for release criteria
+
# Don't run this script
[[ "${BASH_SOURCE[0]}" == "${0}" ]] && echo "Please don't execute ${BASH_SOURCE[0]}, source it" && return
-DEBUG_BUILD_ITEMS="angkor;cwrap;pepper-str;pepper-strcast;pp;stdex"
-DEBUG_BUILD_ITEMS+=";oops;pepper-assert"
+DEBUG_BUILD_ITEMS="angkor;cwrap;pepper-str;pepper-strcast;pp"
+DEBUG_BUILD_ITEMS+=";oops;pepper-assert;pepper-csv2vec"
DEBUG_BUILD_ITEMS+=";hermes;hermes-std"
DEBUG_BUILD_ITEMS+=";loco;locop;locomotiv;logo-core;logo"
-DEBUG_BUILD_ITEMS+=";foder;souschef;arser;vconone"
-DEBUG_BUILD_ITEMS+=";safemain;mio-circle;mio-tflite"
+DEBUG_BUILD_ITEMS+=";foder;crew;souschef;arser;vconone"
+DEBUG_BUILD_ITEMS+=";safemain;mio-circle05;mio-tflite280;mio-circle06;mio-tflite2121;dio-hdf5"
+DEBUG_BUILD_ITEMS+=";luci-compute"
DEBUG_BUILD_ITEMS+=";tflite2circle"
DEBUG_BUILD_ITEMS+=";luci"
DEBUG_BUILD_ITEMS+=";luci-interpreter"
-DEBUG_BUILD_ITEMS+=";luci-value-test"
+DEBUG_BUILD_ITEMS+=";luci-eval-driver;luci-pass-value-test;luci-value-test"
DEBUG_BUILD_ITEMS+=";circle2circle;record-minmax;circle-quantizer"
+DEBUG_BUILD_ITEMS+=";circle-eval-diff"
+DEBUG_BUILD_ITEMS+=";circle-partitioner;circle-part-driver;circle-operator"
DEBUG_BUILD_ITEMS+=";circle-verify"
+DEBUG_BUILD_ITEMS+=";circle-tensordump;circle-opselector"
DEBUG_BUILD_ITEMS+=";tflchef;circlechef"
DEBUG_BUILD_ITEMS+=";common-artifacts"
DEBUG_BUILD_ITEMS+=";circle2circle-dredd-recipe-test"
DEBUG_BUILD_ITEMS+=";record-minmax-conversion-test"
DEBUG_BUILD_ITEMS+=";tf2tfliteV2;tf2tfliteV2-conversion-test"
DEBUG_BUILD_ITEMS+=";tflite2circle-conversion-test"
+DEBUG_BUILD_ITEMS+=";pota-quantization-value-test;pics"
+DEBUG_BUILD_ITEMS+=";circle-part-value-test"
+DEBUG_BUILD_ITEMS+=";circle-quantizer-dredd-recipe-test"
+DEBUG_BUILD_ITEMS+=";circle-operator-test"
+DEBUG_BUILD_ITEMS+=";circle-interpreter;circle-interpreter-test"
+DEBUG_BUILD_ITEMS+=";dalgona;dalgona-test"
+DEBUG_BUILD_ITEMS+=";visq"
+DEBUG_BUILD_ITEMS+=";circle-mpqsolver"
diff --git a/infra/scripts/docker_build_cross_aarch64_runtime.sh b/infra/scripts/docker_build_cross_aarch64_runtime.sh
deleted file mode 100755
index 011d14c18..000000000
--- a/infra/scripts/docker_build_cross_aarch64_runtime.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-
-[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return
-
-CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_PATH="$CURRENT_PATH/../../"
-
-# prepare rootfs
-if [ -z "$ROOTFS_DIR" ] || [ ! -d $ROOTFS_DIR ]; then
- echo "It will use default rootfs path"
-else
- DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs"
- DOCKER_ENV_VARS+=" -e ROOTFS_DIR=/opt/rootfs"
-fi
-
-# mount volume (or directory) for externals
-if [ -n "$EXTERNAL_VOLUME" ]; then
- DOCKER_VOLUMES+=" -v $EXTERNAL_VOLUME:/externals"
- DOCKER_ENV_VARS+=" -e EXTERNAL_VOLUME=/externals"
-else
- echo "It will use default external path"
-fi
-
-# docker image name
-if [[ -z $DOCKER_IMAGE_NAME ]]; then
- echo "It will use default docker image name"
-fi
-
-# Mirror server setting
-if [[ -z $EXTERNAL_DOWNLOAD_SERVER ]]; then
- echo "It will not use mirror server"
-fi
-
-DOCKER_ENV_VARS+=" -e TARGET_ARCH=aarch64"
-DOCKER_ENV_VARS+=" -e CROSS_BUILD=1"
-
-set -e
-
-pushd $ROOT_PATH > /dev/null
-
-# TODO use command instead of makefile
-export DOCKER_ENV_VARS
-export DOCKER_VOLUMES
-CMD="cp -nv Makefile.template Makefile && \
- make all install build_test_suite"
-./nnfw docker-run bash -c "$CMD"
-
-popd > /dev/null
diff --git a/infra/scripts/docker_build_cross_arm_runtime.sh b/infra/scripts/docker_build_cross_arm_runtime.sh
deleted file mode 100755
index 551fb5700..000000000
--- a/infra/scripts/docker_build_cross_arm_runtime.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-
-[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return
-
-CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_PATH="$CURRENT_PATH/../../"
-
-# prepare rootfs
-if [ -z "$ROOTFS_DIR" ] || [ ! -d $ROOTFS_DIR ]; then
- echo "It will use default rootfs path"
-else
- DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs"
- DOCKER_ENV_VARS+=" -e ROOTFS_DIR=/opt/rootfs"
-fi
-
-# mount volume (or directory) for externals
-if [ -n "$EXTERNAL_VOLUME" ]; then
- DOCKER_VOLUMES+=" -v $EXTERNAL_VOLUME:/externals"
- DOCKER_ENV_VARS+=" -e EXTERNAL_VOLUME=/externals"
-else
- echo "It will use default external path"
-fi
-
-# docker image name
-if [[ -z $DOCKER_IMAGE_NAME ]]; then
- echo "It will use default docker image name"
-fi
-
-# Mirror server setting
-if [[ -z $EXTERNAL_DOWNLOAD_SERVER ]]; then
- echo "It will not use mirror server"
-fi
-
-DOCKER_ENV_VARS+=" -e TARGET_ARCH=armv7l"
-DOCKER_ENV_VARS+=" -e CROSS_BUILD=1"
-
-set -e
-
-pushd $ROOT_PATH > /dev/null
-
-# TODO use command instead of makefile
-export DOCKER_ENV_VARS
-export DOCKER_VOLUMES
-CMD="cp -nv Makefile.template Makefile && \
- make all install build_test_suite"
-./nnfw docker-run bash -c "$CMD"
-
-popd > /dev/null
diff --git a/infra/scripts/docker_build_cross_arm_runtime_release.sh b/infra/scripts/docker_build_cross_arm_runtime_release.sh
deleted file mode 100755
index 876f318f4..000000000
--- a/infra/scripts/docker_build_cross_arm_runtime_release.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-
-[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return
-
-CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_PATH="$CURRENT_PATH/../../"
-
-# prepare rootfs
-if [ -z "$ROOTFS_DIR" ] || [ ! -d $ROOTFS_DIR ]; then
- echo "It will use default rootfs path"
-else
- DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs"
- DOCKER_ENV_VARS+=" -e ROOTFS_DIR=/opt/rootfs"
-fi
-
-# mount volume (or directory) for externals
-if [ -n "$EXTERNAL_VOLUME" ]; then
- DOCKER_VOLUMES+=" -v $EXTERNAL_VOLUME:/externals"
- DOCKER_ENV_VARS+=" -e EXTERNAL_VOLUME=/externals"
-else
- echo "It will use default external path"
-fi
-
-# docker image name
-if [[ -z $DOCKER_IMAGE_NAME ]]; then
- echo "It will use default docker image name"
-fi
-
-# Mirror server setting
-if [[ -z $EXTERNAL_DOWNLOAD_SERVER ]]; then
- echo "It will not use mirror server"
-fi
-
-DOCKER_ENV_VARS+=" -e TARGET_ARCH=armv7l"
-DOCKER_ENV_VARS+=" -e CROSS_BUILD=1"
-DOCKER_ENV_VARS+=" -e BUILD_TYPE=release"
-
-set -e
-
-pushd $ROOT_PATH > /dev/null
-
-# TODO use command instead of makefile
-export DOCKER_ENV_VARS
-export DOCKER_VOLUMES
-CMD="cp -nv Makefile.template Makefile && \
- make all install build_test_suite"
-./nnfw docker-run bash -c "$CMD"
-
-popd > /dev/null
diff --git a/infra/scripts/docker_build_cross_coverage.sh b/infra/scripts/docker_build_cross_coverage.sh
deleted file mode 100755
index f42251baa..000000000
--- a/infra/scripts/docker_build_cross_coverage.sh
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-
-[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return
-
-CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_PATH="$CURRENT_PATH/../../"
-
-# prepare rootfs
-if [ -z "$ROOTFS_DIR" ] || [ ! -d $ROOTFS_DIR ]; then
- echo "It will use default rootfs path"
-else
- DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs"
- DOCKER_ENV_VARS+=" -e ROOTFS_DIR=/opt/rootfs"
-fi
-
-# mount volume (or directory) for externals
-if [ -n "$EXTERNAL_VOLUME" ]; then
- DOCKER_VOLUMES+=" -v $EXTERNAL_VOLUME:/externals"
- DOCKER_ENV_VARS+=" -e EXTERNAL_VOLUME=/externals"
-else
- echo "It will use default external path"
-fi
-
-# docker image name
-if [[ -z $DOCKER_IMAGE_NAME ]]; then
- echo "It will use default docker image name"
-fi
-
-# Mirror server setting
-if [[ -z $EXTERNAL_DOWNLOAD_SERVER ]]; then
- echo "It will not use mirror server"
-fi
-
-NNAS_WORKSPACE=${NNAS_WORKSPACE:-build}
-if [[ -z "${ARCHIVE_PATH}" ]]; then
- ARCHIVE_PATH=${NNAS_WORKSPACE}/archive
-fi
-
-DOCKER_ENV_VARS+=" -e TARGET_ARCH=armv7l"
-DOCKER_ENV_VARS+=" -e CROSS_BUILD=1"
-DOCKER_ENV_VARS+=" -e COVERAGE_BUILD=1"
-
-set -e
-
-pushd $ROOT_PATH > /dev/null
-
-# TODO use command instead of makefile
-export DOCKER_ENV_VARS
-export DOCKER_VOLUMES
-CMD="cp -nv Makefile.template Makefile && \
- make all install build_coverage_suite"
-./nnfw docker-run bash -c "$CMD"
-
-mkdir -p ${ARCHIVE_PATH}
-# TODO change workspace usage in makefile
-mv Product/out/coverage-suite.tar.gz ${ARCHIVE_PATH}/
-
-popd > /dev/null
diff --git a/infra/scripts/docker_build_nncc.sh b/infra/scripts/docker_build_nncc.sh
index 6cdfdf01b..dd9d0bd9b 100755
--- a/infra/scripts/docker_build_nncc.sh
+++ b/infra/scripts/docker_build_nncc.sh
@@ -2,6 +2,10 @@
[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return
+unset RELEASE_VERSION
+# TODO need more better argument parsing
+RELEASE_VERSION="$1"
+
CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_PATH="$CURRENT_PATH/../../"
@@ -23,18 +27,20 @@ else
fi
# prepare tensorflow
-if [ -d $TENSORFLOW_PREFIX ]; then
+if [ -n "$TENSORFLOW_PREFIX" ]; then
DOCKER_OPTS+=" -v $TENSORFLOW_PREFIX:/opt/tensorflow"
CONFIG_OPTIONS+=" -DTENSORFLOW_PREFIX=/opt/tensorflow"
fi
# prepare onnx
-if [ -d $ONNXRUNTIME_PREFIX ]; then
+if [ -n "$ONNXRUNTIME_PREFIX" ]; then
DOCKER_OPTS+=" -v $ONNXRUNTIME_PREFIX:/opt/onnxruntime"
CONFIG_OPTIONS+=" -DONNXRUNTIME_PREFIX=/opt/onnxruntime"
fi
# docker image name
+# - for bionic, use DOCKER_IMAGE_NAME="nnfw/one-devtools:bionic"
+# - for focal, use DOCKER_IMAGE_NAME="nnfw/one-devtools:focal"
if [[ -z $DOCKER_IMAGE_NAME ]]; then
echo "It will use default docker image name"
fi
@@ -54,20 +60,18 @@ pushd $ROOT_PATH > /dev/null
mkdir -p ${NNCC_INSTALL_PREFIX}
./nncc docker-run ./nnas create-package --prefix "${PWD}/${NNCC_INSTALL_PREFIX}" -- "${CONFIG_OPTIONS}"
-# create python virtual environment
-./nncc docker-run python3 -m venv "${NNCC_INSTALL_PREFIX}/bin/venv"
+mkdir -p ${ARCHIVE_PATH}
+tar -zcf ${ARCHIVE_PATH}/nncc-package.tar.gz -C ${NNCC_INSTALL_PREFIX} \
+ --exclude test --exclude tflchef* --exclude circle-tensordump --exclude circledump ./
+tar -zcf ${ARCHIVE_PATH}/nncc-test-package.tar.gz -C ${NNCC_INSTALL_PREFIX} ./test
-# TODO remove version number of 'pip==20.2.1 setuptools==49.3.0'
-# NOTE adding version is for temporary hotfix of setuptools 50.x.y version
-./nncc docker-run "${NNCC_INSTALL_PREFIX}/bin/venv/bin/python" \
- -m pip --default-timeout=1000 --trusted-host pypi.org --trusted-host files.pythonhost.org \
- install -U pip==20.2.1 setuptools==49.3.0
-./nncc docker-run "${NNCC_INSTALL_PREFIX}/bin/venv/bin/python" \
- -m pip --default-timeout=1000 --trusted-host pypi.org --trusted-host files.pythonhost.org \
- install tensorflow-cpu==2.3.0
+if [ -z ${RELEASE_VERSION} ] || [ ${RELEASE_VERSION} == "nightly" ]; then
+ ./nncc docker-run /bin/bash -c \
+ 'dch -v $(${PWD}/${NNCC_INSTALL_PREFIX}/bin/one-version)~$(date "+%y%m%d%H") "nightly release" -D $(lsb_release --short --codename)'
+ ./nncc docker-run dch -r ''
+fi
-mkdir -p ${ARCHIVE_PATH}
-tar -zcf ${ARCHIVE_PATH}/nncc-package.tar.gz -C ${NNCC_INSTALL_PREFIX} --exclude "bin/venv" ./
-tar -zcf ${ARCHIVE_PATH}/nncc-venv-package.tar.gz -C ${NNCC_INSTALL_PREFIX} bin/venv
+./nncc docker-run debuild --preserve-env --no-lintian -us -uc \
+ -b --buildinfo-option=-ubuild --changes-option=-ubuild
popd > /dev/null
diff --git a/infra/scripts/docker_build_test_x64.sh b/infra/scripts/docker_build_test_x64.sh
deleted file mode 100755
index 16fcf3fa7..000000000
--- a/infra/scripts/docker_build_test_x64.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-
-[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return
-
-CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_PATH="$CURRENT_PATH/../../"
-
-# mount volume (or directory) for externals
-if [ -n "$EXTERNAL_VOLUME" ]; then
- DOCKER_VOLUMES+=" -v $EXTERNAL_VOLUME:/externals"
- DOCKER_ENV_VARS+=" -e EXTERNAL_VOLUME=/externals"
-else
- echo "It will use default external path"
-fi
-
-# docker image name
-if [[ -z $DOCKER_IMAGE_NAME ]]; then
- echo "It will use default docker image name"
-fi
-
-# Mirror server setting
-if [[ -z $EXTERNAL_DOWNLOAD_SERVER ]]; then
- echo "It will not use mirror server"
-fi
-
-set -e
-
-pushd $ROOT_PATH > /dev/null
-
-export DOCKER_ENV_VARS
-export DOCKER_VOLUMES
-# Disable nnpackage_run build: mismatch between buildtool for CI and installed hdf5
-CMD="export OPTIONS='-DBUILD_NNPACKAGE_RUN=OFF' && \
- export BUILD_TYPE=Release && \
- cp -nv Makefile.template Makefile && \
- make all install build_test_suite"
-./nnfw docker-run bash -c "$CMD"
-
-# Model download server setting
-if [[ -z $MODELFILE_SERVER ]]; then
- echo "Need model file server setting"
- exit 1
-fi
-
-export DOCKER_ENV_VARS=" -e MODELFILE_SERVER=$MODELFILE_SERVER"
-./nnfw docker-run-user ./infra/scripts/test_ubuntu_runtime.sh --backend cpu
-./nnfw docker-run-user ./infra/scripts/test_ubuntu_runtime.sh --interp
-
-popd > /dev/null
diff --git a/infra/scripts/docker_build_tizen_cross.sh b/infra/scripts/docker_build_tizen_cross.sh
deleted file mode 100755
index ee0f183f1..000000000
--- a/infra/scripts/docker_build_tizen_cross.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-
-[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return
-
-CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_PATH="$CURRENT_PATH/../../"
-
-# prepare rootfs
-if [ -z "$ROOTFS_DIR" ] || [ ! -d $ROOTFS_DIR ]; then
- echo "It will use default rootfs path"
-else
- DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs"
- DOCKER_ENV_VARS+=" -e ROOTFS_DIR=/opt/rootfs"
-fi
-
-# mount volume (or directory) for externals
-if [ -n "$EXTERNAL_VOLUME" ]; then
- DOCKER_VOLUMES+=" -v $EXTERNAL_VOLUME:/externals"
- DOCKER_ENV_VARS+=" -e EXTERNAL_VOLUME=/externals"
-else
- echo "It will use default external path"
-fi
-
-# docker image name
-if [[ -z $DOCKER_IMAGE_NAME ]]; then
- echo "It will use default docker image name"
-fi
-
-DOCKER_ENV_VARS+=" -e TARGET_ARCH=armv7l"
-DOCKER_ENV_VARS+=" -e CROSS_BUILD=1"
-DOCKER_ENV_VARS+=" -e TARGET_OS=tizen"
-DOCKER_ENV_VARS+=" -e BUILD_TYPE=release"
-
-# Mirror server setting
-if [[ -z $EXTERNAL_DOWNLOAD_SERVER ]]; then
- echo "It will not use mirror server"
-fi
-
-set -e
-
-pushd $ROOT_PATH > /dev/null
-
-export DOCKER_ENV_VARS
-export DOCKER_VOLUMES
-CMD="export OPTIONS+=' -DGENERATE_RUNTIME_NNAPI_TESTS=ON' && \
- cp -nv Makefile.template Makefile && \
- make all install build_test_suite"
-./nnfw docker-run bash -c "$CMD"
-
-popd > /dev/null
diff --git a/infra/scripts/docker_build_tizen_gbs.sh b/infra/scripts/docker_build_tizen_gbs.sh
deleted file mode 100755
index 2d508f4c7..000000000
--- a/infra/scripts/docker_build_tizen_gbs.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return
-
-CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_PATH="$CURRENT_PATH/../../"
-
-GBS_RPM_DIR=$ROOT_PATH/Product/out/rpm
-mkdir -p $GBS_RPM_DIR
-DOCKER_VOLUMES=" -v $GBS_RPM_DIR:/opt/rpm"
-
-if [[ -z $DOCKER_IMAGE_NAME ]]; then
- echo "It will use default docker image name for tizen gbs build"
- DOCKER_IMAGE_NAME="nnfw_docker_tizen"
-fi
-
-DOCKER_ENV_VARS=" --privileged"
-
-set -e
-
-pushd $ROOT_PATH > /dev/null
-
-CMD="gbs -c $ROOT_PATH/infra/nnfw/config/gbs.conf build \
- -A armv7l --profile=profile.tizen --clean --include-all --define '$GBS_DEFINE' && \
- cp -rf /home/GBS-ROOT/local/repos/tizen/armv7l/RPMS/*.rpm /opt/rpm/"
-
-export DOCKER_ENV_VARS
-export DOCKER_VOLUMES
-./nnfw docker-run bash -c "$CMD"
-
-popd > /dev/null
diff --git a/infra/scripts/docker_collect_nnpkg_resources.sh b/infra/scripts/docker_collect_nnpkg_resources.sh
index 55adaa15d..8a73dd380 100755
--- a/infra/scripts/docker_collect_nnpkg_resources.sh
+++ b/infra/scripts/docker_collect_nnpkg_resources.sh
@@ -28,18 +28,20 @@ else
fi
# prepare tensorflow
-if [ -d $TENSORFLOW_PREFIX ]; then
+if [ -n "$TENSORFLOW_PREFIX" ]; then
DOCKER_OPTS+=" -v $TENSORFLOW_PREFIX:/opt/tensorflow"
CONFIG_OPTIONS+=" -DTENSORFLOW_PREFIX=/opt/tensorflow"
fi
# prepare onnx
-if [ -d $ONNXRUNTIME_PREFIX ]; then
+if [ -n "$ONNXRUNTIME_PREFIX" ]; then
DOCKER_OPTS+=" -v $ONNXRUNTIME_PREFIX:/opt/onnxruntime"
CONFIG_OPTIONS+=" -DONNXRUNTIME_PREFIX=/opt/onnxruntime"
fi
# docker image name
+# - for bionic, use DOCKER_IMAGE_NAME="nnfw/one-devtools:bionic"
+# - for focal, use DOCKER_IMAGE_NAME="nnfw/one-devtools:focal"
if [[ -z $DOCKER_IMAGE_NAME ]]; then
echo "It will use default docker image name"
fi
@@ -59,16 +61,19 @@ pushd $ROOT_PATH > /dev/null
REQUIRED_UNITS=()
# Common Libraries
-REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp" "stdex")
-REQUIRED_UNITS+=("oops" "safemain" "foder" "arser" "vconone")
+REQUIRED_UNITS+=("angkor" "cwrap" "pepper-str" "pepper-strcast" "pp")
+REQUIRED_UNITS+=("pepper-csv2vec")
+REQUIRED_UNITS+=("oops" "safemain" "foder" "crew" "arser" "vconone")
# Hermes Logging Framework
REQUIRED_UNITS+=("hermes" "hermes-std")
# loco IR and related utilities
REQUIRED_UNITS+=("loco" "locop" "locomotiv" "logo-core" "logo")
+# Compute
+REQUIRED_UNITS+=("luci-compute")
# Circle compiler library (.circle -> .circle)
REQUIRED_UNITS+=("luci")
# Flatbuffer I/O
-REQUIRED_UNITS+=("mio-tflite" "mio-circle")
+REQUIRED_UNITS+=("mio-tflite280" "mio-circle05" "mio-tflite2121" "mio-circle06")
# Tools
REQUIRED_UNITS+=("tflite2circle" "circle2circle" "luci-interpreter")
REQUIRED_UNITS+=("souschef" "tflchef" "circlechef" "circle-verify")
diff --git a/infra/scripts/docker_coverage_report.sh b/infra/scripts/docker_coverage_report.sh
deleted file mode 100755
index 677462d63..000000000
--- a/infra/scripts/docker_coverage_report.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-# coverage test data: ${ARCHIVE_PATH}/coverage-data.tar.gz
-
-[[ "${BASH_SOURCE[0]}" != "${0}" ]] && echo "Please don't source ${BASH_SOURCE[0]}, execute it" && return
-
-CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-ROOT_PATH="$CURRENT_PATH/../../"
-
-# docker image name
-if [[ -z $DOCKER_IMAGE_NAME ]]; then
- echo "It will use default docker image name"
-fi
-
-NNAS_WORKSPACE=${NNAS_WORKSPACE:-build}
-if [[ -z "${ARCHIVE_PATH}" ]]; then
- ARCHIVE_PATH=${NNAS_WORKSPACE}/archive
-fi
-
-set -e
-
-pushd $ROOT_PATH > /dev/null
-
-tar -zxf ${ARCHIVE_PATH}/coverage-data.tar.gz
-
-CMD="GCOV_PATH=arm-linux-gnueabihf-gcov NNAS_WORKSPACE=Product ./nnas gen-coverage-report runtime compute &&
- tar -zcf coverage/coverage_report.tar.gz coverage/html &&
- python runtime/3rdparty/lcov-to-cobertura-xml/lcov_cobertura.py coverage/coverage.info -o coverage/nnfw_coverage.xml"
-
-./nnfw docker-run-user bash -c "$CMD"
-
-popd > /dev/null
diff --git a/infra/scripts/test_arm_nnpkg.sh b/infra/scripts/test_arm_nnpkg.sh
index d00eb730f..74fae6bd8 100755
--- a/infra/scripts/test_arm_nnpkg.sh
+++ b/infra/scripts/test_arm_nnpkg.sh
@@ -10,7 +10,4 @@ do
NNPackageTest ${BACKEND} "Product/out/test/list/nnpkg_test_list.armv7l-linux.${BACKEND}"
done
-# Interpreter test
-export DISABLE_COMPILE=1
-NNPackageTest "interp" "Product/out/test/list/nnpkg_test_list.noarch.interp"
unset DISABLE_COMPILE
diff --git a/infra/scripts/test_coverage.sh b/infra/scripts/test_coverage.sh
index 12a9942ab..97043ceed 100755
--- a/infra/scripts/test_coverage.sh
+++ b/infra/scripts/test_coverage.sh
@@ -6,7 +6,7 @@
set -eo pipefail
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
-CheckTestPrepared
+pushd $ROOT_PATH > /dev/null
NNAS_WORKSPACE=${NNAS_WORKSPACE:-build}
if [[ -z "${ARCHIVE_PATH}" ]]; then
@@ -14,30 +14,27 @@ if [[ -z "${ARCHIVE_PATH}" ]]; then
echo "Default archive directory including nncc package and resources: ${ARCHIVE_PATH}"
fi
-pushd $ROOT_PATH > /dev/null
-
tar -zxf ${ARCHIVE_PATH}/coverage-suite.tar.gz -C ./
+CheckTestPrepared
+
if [[ ! -e $ROOT_PATH/tests/scripts/build_path_depth.txt ]]; then
echo "Cannot find prefix strip file"
exit 1
fi
export GCOV_PREFIX_STRIP=`cat $ROOT_PATH/tests/scripts/build_path_depth.txt`
-./infra/scripts/test_ubuntu_runtime.sh --backend acl_cl --tflite-loader
+TENSOR_LOGGING=trace_log.txt ./infra/scripts/test_ubuntu_runtime.sh --backend acl_cl --nnapi-frontend
./infra/scripts/test_ubuntu_runtime.sh --backend acl_neon
./infra/scripts/test_ubuntu_runtime.sh --backend cpu
# Enable all logs (mixed backend)
-TENSOR_LOGGING=trace_log.txt ONERT_LOG_ENABLE=1 GRAPH_DOT_DUMP=1 ./infra/scripts/test_ubuntu_runtime_mixed.sh
+ONERT_LOG_ENABLE=1 GRAPH_DOT_DUMP=1 ./infra/scripts/test_ubuntu_runtime_mixed.sh
# Enable trace event (acl_cl default backend)
export TRACE_FILEPATH=trace.json
-TFLiteModelVerification "acl_cl" "Product/out/test/list/frameworktest_list.armv7l.acl_cl.txt" "report/acl_cl/trace"
+TFLiteModelVerification "acl_cl" "Product/out/test/list/tflite_comparator.armv7l.acl_cl.list" "report/acl_cl/trace"
unset TRACE_FILEPATH
-# Interpreter
-./infra/scripts/test_ubuntu_runtime.sh --interp
-
# nnpackage test suite
if [[ -e ${ARCHIVE_PATH}/nnpkg-test-suite.tar.gz ]]; then
tar -zxf ${ARCHIVE_PATH}/nnpkg-test-suite.tar.gz -C ./
diff --git a/infra/scripts/test_ubuntu_npud.sh b/infra/scripts/test_ubuntu_npud.sh
new file mode 100755
index 000000000..3b3304240
--- /dev/null
+++ b/infra/scripts/test_ubuntu_npud.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+set -eo pipefail
+
+CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+ROOT_PATH="$(cd ${CURRENT_PATH}/../../ && pwd)"
+
+# Install path on CI
+INSTALL_PATH="$ROOT_PATH/Product/out"
+MODEL_PATH="${INSTALL_PATH}/npud-gtest/models"
+
+# Install dbus configuration file
+DBUS_CONF="${INSTALL_PATH}/share/org.tizen.npud.conf"
+mkdir -p /usr/share/dbus-1/system.d/
+cp ${DBUS_CONF} /usr/share/dbus-1/system.d/
+
+service dbus restart
+
+function TestPrepared()
+{
+ if [[ -z "${MODELFILE}" ]]; then
+ echo "Model file is not set. Try to use default setting."
+ exit 1
+ fi
+
+ mkdir -p ${MODEL_PATH}
+ if [[ "${MODELFILE: -7}" == ".tar.gz" ]]; then
+ curl -o model.tar.gz -kLsSO ${MODELFILE}
+ tar -zxf model.tar.gz -C ${MODEL_PATH}
+ else
+ echo "The file format is not supported."
+ echo "Supported format: tar.gz"
+ exit 1
+ fi
+}
+
+function TestCleanUp()
+{
+ rm -rf ${MODEL_PATH}
+}
+
+function NpudTest()
+{
+ pushd ${ROOT_PATH} > /dev/null
+
+ $INSTALL_PATH/npud-gtest/npud_gtest
+ EXITCODE=$?
+ if [ ${EXITCODE} -ne 0 ]; then
+ exit ${EXITCODE}
+ fi
+
+ popd > /dev/null
+}
+
+TestPrepared
+
+DEVICE_MODULE_PATH=${INSTALL_PATH}/lib GTEST_MODEL_PATH=${MODEL_PATH} NpudTest
+
+TestCleanUp
diff --git a/infra/scripts/test_ubuntu_runtime.sh b/infra/scripts/test_ubuntu_runtime.sh
index f250df5a0..9a98e5bd3 100755
--- a/infra/scripts/test_ubuntu_runtime.sh
+++ b/infra/scripts/test_ubuntu_runtime.sh
@@ -3,11 +3,10 @@
set -eo pipefail
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
+: ${TEST_ARCH:=$(uname -m | tr '[:upper:]' '[:lower:]')}
BACKEND="cpu"
-TEST_ARCH=$(uname -m | tr '[:upper:]' '[:lower:]')
TEST_OS="linux"
TEST_PLATFORM="$TEST_ARCH-$TEST_OS"
-TFLITE_LOADER="0"
LINEAR_ONLY="0"
RUN_INTERP="0"
@@ -17,7 +16,6 @@ function Usage()
echo ""
echo "Options:"
echo " --backend <BACKEND> Runtime backend to test (default: ${BACKEND})"
- echo " --tflite-loader Enable TFLite Loader test"
echo " --linear-only Use Linear executor only"
}
@@ -37,18 +35,10 @@ do
BACKEND=$(echo ${1#*=} | tr '[:upper:]' '[:lower:]')
shift
;;
- --tflite-loader)
- TFLITE_LOADER="1"
- shift
- ;;
--linear-only)
LINEAR_ONLY="1"
shift
;;
- --interp)
- RUN_INTERP="1"
- shift;
- ;;
*)
# Ignore
shift
@@ -58,52 +48,26 @@ done
CheckTestPrepared
-if [ $RUN_INTERP = "1" ]; then
- TEST_PLATFORM="noarch"
- TEST_ARCH="noarch"
- BACKEND="interp"
- echo "[[ Interpreter test ]]"
-else
- echo "[[ ${TEST_PLATFORM}: ${BACKEND} backend test ]]"
-fi
+echo "[[ ${TEST_PLATFORM}: ${BACKEND} backend test ]]"
-UNITTEST_SKIPLIST="Product/out/unittest/nnapi_gtest.skip.${TEST_PLATFORM}.${BACKEND}"
-FRAMEWORK_TESTLIST="Product/out/test/list/frameworktest_list.${TEST_ARCH}.${BACKEND}.txt"
+UNITTEST_SKIPLIST="Product/out/nnapi-gtest/nnapi_gtest.skip.${TEST_PLATFORM}.${BACKEND}"
+TFLITE_TESTLIST="Product/out/test/list/tflite_comparator.${TEST_ARCH}.${BACKEND}.list"
REPORT_BASE="report/${BACKEND}"
EXECUTORS=("Linear" "Dataflow" "Parallel")
if [ $LINEAR_ONLY = "1" ]; then
EXECUTORS=("Linear")
fi
-if [ $RUN_INTERP = "1" ]; then
- EXECUTORS=("Interpreter")
-fi
for EXECUTOR in "${EXECUTORS[@]}";
do
echo "[EXECUTOR]: ${EXECUTOR}"
REPORT_PATH="${REPORT_BASE}/${EXECUTOR}"
- if [ $EXECUTOR = "Interpreter" ]; then
- export DISABLE_COMPILE=1
- BACKEND=""
- else
- export EXECUTOR="${EXECUTOR}"
- fi
+ export EXECUTOR="${EXECUTOR}"
NNAPIGTest "${BACKEND}" "${UNITTEST_SKIPLIST}" "${REPORT_PATH}"
- TFLiteModelVerification "${BACKEND}" "${FRAMEWORK_TESTLIST}" "${REPORT_PATH}"
+ TFLiteModelVerification "${BACKEND}" "${TFLITE_TESTLIST}" "${REPORT_PATH}"
- if [ $EXECUTOR = "Interpreter" ]; then
- unset DISABLE_COMPILE
- else
- unset EXECUTOR
- fi
+ unset EXECUTOR
done
-
-# Current support acl_cl backend testlist only
-# TODO Support more backends
-TFLITE_LOADER_TESTLIST="Product/out/test/list/tflite_loader_list.${TEST_ARCH}.txt"
-if [[ $TFLITE_LOADER = "1" ]]; then
- TFLiteLoaderTest "${BACKEND}" "${TFLITE_LOADER_TESTLIST}" "${REPORT_BASE}/loader/${EXECUTOR}"
-fi
diff --git a/infra/scripts/test_ubuntu_runtime_mixed.sh b/infra/scripts/test_ubuntu_runtime_mixed.sh
index 24fde8896..a6fd2a41d 100755
--- a/infra/scripts/test_ubuntu_runtime_mixed.sh
+++ b/infra/scripts/test_ubuntu_runtime_mixed.sh
@@ -6,7 +6,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
CheckTestPrepared
# TODO Get argument for mix configuration
-TEST_ARCH=$(uname -m | tr '[:upper:]' '[:lower:]')
+: ${TEST_ARCH:=$(uname -m | tr '[:upper:]' '[:lower:]')}
TEST_OS="linux"
# nnfw_api_gtest
@@ -17,8 +17,7 @@ pushd ${ROOT_PATH} > /dev/null
echo ""
echo "==== Run standalone unittest begin ===="
echo ""
-Product/out/test/onert-test prepare-model --model=nnpackage
-Product/out/test/onert-test unittest --unittestdir=Product/out/unittest_standalone
+Product/out/test/onert-test unittest --unittestdir=Product/out/unittest
echo ""
echo "==== Run standalone unittest end ===="
echo ""
@@ -33,14 +32,14 @@ popd > /dev/null
BACKENDS=(acl_cl acl_neon cpu)
# Get the intersect of framework test list files
-TESTLIST_PREFIX="Product/out/test/list/frameworktest_list.${TEST_ARCH}"
-SKIPLIST_PREFIX="Product/out/unittest/nnapi_gtest.skip.${TEST_ARCH}-${TEST_OS}"
-sort $TESTLIST_PREFIX.${BACKENDS[0]}.txt > $TESTLIST_PREFIX.intersect.txt
+TESTLIST_PREFIX="Product/out/test/list/tflite_comparator.${TEST_ARCH}"
+SKIPLIST_PREFIX="Product/out/nnapi-gtest/nnapi_gtest.skip.${TEST_ARCH}-${TEST_OS}"
+sort $TESTLIST_PREFIX.${BACKENDS[0]}.list > $TESTLIST_PREFIX.intersect.list
sort $SKIPLIST_PREFIX.${BACKENDS[0]} > $SKIPLIST_PREFIX.union
for BACKEND in "${BACKENDS[@]:1}"; do
- comm -12 <(sort $TESTLIST_PREFIX.intersect.txt) <(sort $TESTLIST_PREFIX.$BACKEND.txt) > $TESTLIST_PREFIX.intersect.next.txt
+ comm -12 <(sort $TESTLIST_PREFIX.intersect.list) <(sort $TESTLIST_PREFIX.$BACKEND.list) > $TESTLIST_PREFIX.intersect.next.list
comm <(sort $SKIPLIST_PREFIX.union) <(sort $SKIPLIST_PREFIX.$BACKEND) | tr -d "[:blank:]" > $SKIPLIST_PREFIX.union.next
- mv $TESTLIST_PREFIX.intersect.next.txt $TESTLIST_PREFIX.intersect.txt
+ mv $TESTLIST_PREFIX.intersect.next.list $TESTLIST_PREFIX.intersect.list
mv $SKIPLIST_PREFIX.union.next $SKIPLIST_PREFIX.union
done
popd > /dev/null
@@ -56,8 +55,9 @@ echo "GeneratedTests.squeeze_relaxed" >> $SKIPLIST_PREFIX.union
# Run the test
export OP_BACKEND_Conv2D="cpu"
-export OP_BACKEND_MaxPool2D="acl_cl"
-export OP_BACKEND_AvgPool2D="acl_neon"
+export OP_BACKEND_Pool2D="acl_cl"
+export OP_BACKEND_FullyConnected="acl_neon"
export ACL_LAYOUT="NCHW"
-NNAPIGTest "acl_cl;acl_neon;cpu" "Product/out/unittest/nnapi_gtest.skip.${TEST_ARCH}-${TEST_OS}.union" "report/mixed"
-TFLiteModelVerification "acl_cl;acl_neon;cpu" "${TESTLIST_PREFIX}.intersect.txt" "report/mixed"
+export RUY_THREADS=4
+NNAPIGTest "acl_cl;acl_neon;cpu" "Product/out/nnapi-gtest/nnapi_gtest.skip.${TEST_ARCH}-${TEST_OS}.union" "report/mixed"
+TFLiteModelVerification "acl_cl;acl_neon;cpu" "${TESTLIST_PREFIX}.intersect.list" "report/mixed"
diff --git a/infra/scripts/tizen_xu4_test.sh b/infra/scripts/tizen_xu4_test.sh
index 05e55848c..5610756b7 100755
--- a/infra/scripts/tizen_xu4_test.sh
+++ b/infra/scripts/tizen_xu4_test.sh
@@ -25,26 +25,18 @@ function install_model()
{
# download tflite model files
pushd $HOST_HOME
- tests/scripts/models/run_test.sh --download=on --run=off
+ TEMP_PATH=$(mktemp -d)
+ CACHE_PATH=$TEMP_PATH/cache
+ mkdir -p $CACHE_PATH
+ ./nnfw prepare-model --cachedir=$CACHE_PATH
# TODO Since this command removes model file(.zip),
# We must always download the file unlike model file(.tflite).
# Because caching applies only to tflite file.
- find tests -name "*.zip" -exec rm {} \;
- tar -zcf cache.tar.gz -C tests/scripts/models cache
- $SDB_CMD push cache.tar.gz $TEST_ROOT/.
- rm -rf cache.tar.gz
- $SDB_CMD shell tar -zxf $TEST_ROOT/cache.tar.gz -C $TEST_ROOT/Product/out/test/models
-
- # download api test model file for nnfw_api_gtest
- MODEL_CACHE_DIR=$(mktemp -d)
- tests/scripts/models/run_test.sh --download=on --run=off \
- --configdir=tests/scripts/models/nnfw_api_gtest \
- --cachedir=$MODEL_CACHE_DIR
- tar -zcf $MODEL_CACHE_DIR/api_model_test.tar.gz -C $MODEL_CACHE_DIR .
- $SDB_CMD push $MODEL_CACHE_DIR/api_model_test.tar.gz $TEST_ROOT/Product/out/unittest_standalone/nnfw_api_gtest_models/
- $SDB_CMD shell tar -zxf $TEST_ROOT/Product/out/unittest_standalone/nnfw_api_gtest_models/api_model_test.tar.gz \
- -C $TEST_ROOT/Product/out/unittest_standalone/nnfw_api_gtest_models/
- rm -rf $MODEL_CACHE_DIR
+ find $CACHE_PATH -name "*.zip" -exec rm {} \;
+ tar -zcf $TEMP_PATH/cache.tar.gz -C $TEMP_PATH cache
+ $SDB_CMD push $TEMP_PATH/cache.tar.gz $TEST_ROOT/
+ rm -rf $TEMP_PATH
+ $SDB_CMD shell tar -zxf $TEST_ROOT/cache.tar.gz -C $TEST_ROOT/Product/out/test
popd
}
@@ -160,11 +152,10 @@ if [ $RUN_TEST = "0" ]; then
fi
if [ -z "${GCOV_DIR}" ]; then
- ${SDB_CMD} shell /bin/bash -c "IGNORE_MD5=1 ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend acl_cl --tflite-loader"
- ${SDB_CMD} shell /bin/bash -c "IGNORE_MD5=1 ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend acl_neon"
- ${SDB_CMD} shell /bin/bash -c "IGNORE_MD5=1 ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend cpu"
- ${SDB_CMD} shell /bin/bash -c "IGNORE_MD5=1 ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime_mixed.sh"
- ${SDB_CMD} shell /bin/bash -c "IGNORE_MD5=1 ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --interp"
+ ${SDB_CMD} shell /bin/bash -c "IGNORE_MD5=1 TEST_ARCH=armv7l ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend acl_cl --tflite-loader"
+ ${SDB_CMD} shell /bin/bash -c "IGNORE_MD5=1 TEST_ARCH=armv7l ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend acl_neon"
+ ${SDB_CMD} shell /bin/bash -c "IGNORE_MD5=1 TEST_ARCH=armv7l ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend cpu"
+ ${SDB_CMD} shell /bin/bash -c "IGNORE_MD5=1 TEST_ARCH=armv7l ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime_mixed.sh"
else
mkdir -p ${GCOV_DIR}
rm -rf ${GCOV_DIR}/*
@@ -176,11 +167,10 @@ else
GCOV_DATA_PATH="/opt/usr/nnfw-gcov"
# TODO For coverage check, we run acl_cl and mixed test
- ${SDB_CMD} shell /bin/bash -c "GCOV_PREFIX_STRIP=${GCOV_PREFIX_STRIP} IGNORE_MD5=1 ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend acl_cl --tflite-loader"
- ${SDB_CMD} shell /bin/bash -c "GCOV_PREFIX_STRIP=${GCOV_PREFIX_STRIP} IGNORE_MD5=1 ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend acl_neon"
- ${SDB_CMD} shell /bin/bash -c "GCOV_PREFIX_STRIP=${GCOV_PREFIX_STRIP} IGNORE_MD5=1 ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend cpu"
- ${SDB_CMD} shell /bin/bash -c "GCOV_PREFIX_STRIP=${GCOV_PREFIX_STRIP} IGNORE_MD5=1 ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime_mixed.sh"
- ${SDB_CMD} shell /bin/bash -c "GCOV_PREFIX_STRIP=${GCOV_PREFIX_STRIP} IGNORE_MD5=1 ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --interp"
+ ${SDB_CMD} shell /bin/bash -c "GCOV_PREFIX_STRIP=${GCOV_PREFIX_STRIP} IGNORE_MD5=1 TEST_ARCH=armv7l ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend acl_cl --tflite-loader"
+ ${SDB_CMD} shell /bin/bash -c "GCOV_PREFIX_STRIP=${GCOV_PREFIX_STRIP} IGNORE_MD5=1 TEST_ARCH=armv7l ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend acl_neon"
+ ${SDB_CMD} shell /bin/bash -c "GCOV_PREFIX_STRIP=${GCOV_PREFIX_STRIP} IGNORE_MD5=1 TEST_ARCH=armv7l ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime.sh --backend cpu"
+ ${SDB_CMD} shell /bin/bash -c "GCOV_PREFIX_STRIP=${GCOV_PREFIX_STRIP} IGNORE_MD5=1 TEST_ARCH=armv7l ${TEST_ROOT}/infra/scripts/test_ubuntu_runtime_mixed.sh"
# More test to check coverage
${SDB_CMD} shell "rm -rf ${GCOV_DATA_PATH} && mkdir -p ${GCOV_DATA_PATH}"
diff --git a/infra/scripts/unittest_compiler_xml.sh b/infra/scripts/unittest_compiler_xml.sh
index 46d3bc813..6e9e8ad7f 100755
--- a/infra/scripts/unittest_compiler_xml.sh
+++ b/infra/scripts/unittest_compiler_xml.sh
@@ -7,7 +7,9 @@ set -eo pipefail
CURRENT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_PATH="$CURRENT_PATH/../../"
NNCC_WORKSPACE=${NNCC_WORKSPACE:-${ROOT_PATH}build}
-UNITTEST_REPORT_DIR=${NNCC_WORKSPACE}/unittest_compiler_xml
+
+# Use fixed absolute report dir for CI
+UNITTEST_REPORT_DIR=${ROOT_PATH}build/unittest_compiler_xml
for i in "$@"
do
@@ -25,5 +27,10 @@ fi
for TEST_BIN in `find ${NNCC_WORKSPACE}/compiler -type f -executable -name *_test`; do
TEST_NAME="$(basename -- $TEST_BIN)"
- LUGI_LOG=999 $TEST_BIN --gtest_output="xml:$UNITTEST_REPORT_DIR/$TEST_NAME.xml"
+ TEST_DIR="$(dirname $TEST_BIN)"
+
+ # Execute on test directory to find related file
+ pushd $TEST_DIR > /dev/null
+ LUGI_LOG=999 ./$TEST_NAME --gtest_output="xml:$UNITTEST_REPORT_DIR/$TEST_NAME.xml"
+ popd > /dev/null
done