summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/command/build7
-rwxr-xr-xscripts/command/common.sh2
-rwxr-xr-xscripts/command/docker_build.sh10
-rwxr-xr-xscripts/command/docker_build_cross_arm_ubuntu.sh38
-rwxr-xr-xscripts/command/docker_build_cross_arm_ubuntu_without_aclbuild.sh62
-rwxr-xr-xscripts/command/docker_build_tizen_cross.sh48
-rwxr-xr-xscripts/command/docker_build_ubuntu_svace.sh87
-rwxr-xr-xscripts/command/docker_coverage_report.sh38
-rwxr-xr-xscripts/command/docker_cross_test_coverage_build.sh63
-rwxr-xr-xscripts/command/docker_gbs_build.sh36
-rwxr-xr-xscripts/command/docker_run.sh24
-rwxr-xr-xscripts/command/docker_run_test.sh50
-rwxr-xr-xscripts/command/format-checker.sh89
-rw-r--r--scripts/command/gbs.conf21
-rwxr-xr-xscripts/command/gen_coverage_report.sh56
-rw-r--r--scripts/command/imported_url.txt3
-rwxr-xr-xscripts/command/lcov-to-covertura-xml.sh414
-rwxr-xr-xscripts/command/tizen_xu4_test.sh135
-rw-r--r--scripts/docker/Dockerfile14
-rw-r--r--scripts/docker/Dockerfile_tizen8
20 files changed, 1205 insertions, 0 deletions
diff --git a/scripts/command/build b/scripts/command/build
new file mode 100644
index 000000000..5219001c3
--- /dev/null
+++ b/scripts/command/build
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# NOTE 'run' sets NNFW_PROJECT_PATH and invokes this script
+
+# TODO Support command-line options
+# TODO Implement build steps inside this script
+make -C "${NNFW_PROJECT_PATH}"
diff --git a/scripts/command/common.sh b/scripts/command/common.sh
new file mode 100755
index 000000000..ec2ca7bb8
--- /dev/null
+++ b/scripts/command/common.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+export DOCKER_IMAGE_NAME=nnfw_docker
diff --git a/scripts/command/docker_build.sh b/scripts/command/docker_build.sh
new file mode 100755
index 000000000..c5545f6c7
--- /dev/null
+++ b/scripts/command/docker_build.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+source $SCRIPT_ROOT/common.sh
+
+docker build --build-arg http_proxy="$http_proxy" \
+ --build-arg https_proxy="$https_proxy" \
+ -t $DOCKER_IMAGE_NAME \
+ - < $SCRIPT_ROOT/../docker/Dockerfile
diff --git a/scripts/command/docker_build_cross_arm_ubuntu.sh b/scripts/command/docker_build_cross_arm_ubuntu.sh
new file mode 100755
index 000000000..fb3ac81c5
--- /dev/null
+++ b/scripts/command/docker_build_cross_arm_ubuntu.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+source $SCRIPT_ROOT/common.sh
+
+HOST_HOME=$SCRIPT_ROOT/../..
+DOCKER_HOME=/home
+
+GIT_SSL_NO_VERIFY=1
+
+DOCKER_VOLUMES+=" -v $HOST_HOME:$DOCKER_HOME"
+
+DOCKER_ENV_VARS+=" -e http_proxy"
+DOCKER_ENV_VARS+=" -e no_proxy"
+DOCKER_ENV_VARS+=" -e GIT_SSL_NO_VERIFY"
+
+DOCKER_RUN_OPTS="--rm"
+DOCKER_RUN_OPTS+=" -w $DOCKER_HOME"
+DOCKER_RUN_OPTS+=" -it"
+
+CMD="export TARGET_ARCH=armv7l && export CROSS_BUILD=1 && export BENCHMARK_ACL_BUILD=1 && make acl && make && make install"
+
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME sh -c "$CMD"
+BUILD_RESULT=$?
+
+# Newly created files during above docker run can have different ownership.
+# This may cause some problems, for example, some jenkins slaves or developers
+# can't remove built files due to lack of permission.
+# To address this issue, let's change owner of all files
+# in NNFW to owner of NNFW.
+NNFW_OWNER_UID=$(stat -c "%u" $HOST_HOME)
+NNFW_OWNER_GID=$(stat -c "%g" $HOST_HOME)
+
+CMD="chown -R $NNFW_OWNER_UID:$NNFW_OWNER_GID $DOCKER_HOME"
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME $CMD
+
+exit $BUILD_RESULT
diff --git a/scripts/command/docker_build_cross_arm_ubuntu_without_aclbuild.sh b/scripts/command/docker_build_cross_arm_ubuntu_without_aclbuild.sh
new file mode 100755
index 000000000..dcaaa97a8
--- /dev/null
+++ b/scripts/command/docker_build_cross_arm_ubuntu_without_aclbuild.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+source $SCRIPT_ROOT/common.sh
+
+HOST_HOME=$SCRIPT_ROOT/../..
+DOCKER_HOME=/home
+
+GIT_SSL_NO_VERIFY=1
+
+DOCKER_VOLUMES+=" -v $HOST_HOME:$DOCKER_HOME"
+
+DOCKER_ENV_VARS+=" -e http_proxy"
+DOCKER_ENV_VARS+=" -e no_proxy"
+DOCKER_ENV_VARS+=" -e GIT_SSL_NO_VERIFY"
+
+DOCKER_ENV_VARS+=" -e TARGET_ARCH=armv7l"
+DOCKER_ENV_VARS+=" -e CROSS_BUILD=1"
+DOCKER_ENV_VARS+=" -e ROOTFS_DIR=/opt/rootfs"
+DOCKER_ENV_VARS+=" -e EXT_ACL_FOLDER=/opt/libarmcl"
+
+DOCKER_RUN_OPTS="--rm"
+DOCKER_RUN_OPTS+=" -w $DOCKER_HOME"
+
+# prepare armcl library
+if [[ ! -d $ARMCL_DIR ]]; then
+ echo "cannot find armcl"
+ exit 1
+fi
+
+# prepare rootfs
+if [[ ! -d $ROOTFS_DIR ]]; then
+ echo "cannot find rootfs"
+ exit 1
+fi
+
+DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs"
+DOCKER_VOLUMES+=" -v $ARMCL_DIR:/opt/libarmcl"
+
+if [ -n "$DOCKER_INTERACTIVE" ]; then
+ DOCKER_RUN_OPTS+=" -it"
+ CMD="/bin/bash"
+else
+ CMD="make external_acl && export BENCHMARK_ACL_BUILD=1 && make && make install && make build_test_suite"
+fi
+
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME sh -c "$CMD"
+BUILD_RESULT=$?
+
+# Newly created files during above docker run can have different ownership.
+# This may cause some problems, for example, some jenkins slaves or developers
+# can't remove built files due to lack of permission.
+# To address this issue, let's change owner of all files
+# in NNFW to owner of NNFW.
+NNFW_OWNER_UID=$(stat -c "%u" $HOST_HOME)
+NNFW_OWNER_GID=$(stat -c "%g" $HOST_HOME)
+
+CMD="chown -R $NNFW_OWNER_UID:$NNFW_OWNER_GID $DOCKER_HOME"
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME $CMD
+
+exit $BUILD_RESULT
diff --git a/scripts/command/docker_build_tizen_cross.sh b/scripts/command/docker_build_tizen_cross.sh
new file mode 100755
index 000000000..a37a58cd9
--- /dev/null
+++ b/scripts/command/docker_build_tizen_cross.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+# default DOCKER_IMAGE_NAME=nnfw_docker
+DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnfw_docker}
+echo "Using docker image $DOCKER_IMAGE_NAME"
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+HOST_HOME=$SCRIPT_ROOT/../..
+DOCKER_HOME=/home
+
+GIT_SSL_NO_VERIFY=1
+
+DOCKER_VOLUMES+=" -v $HOST_HOME:$DOCKER_HOME"
+
+DOCKER_ENV_VARS+=" -e http_proxy"
+DOCKER_ENV_VARS+=" -e no_proxy"
+DOCKER_ENV_VARS+=" -e GIT_SSL_NO_VERIFY"
+
+DOCKER_ENV_VARS+=" -e TARGET_ARCH=armv7l"
+DOCKER_ENV_VARS+=" -e CROSS_BUILD=1"
+DOCKER_ENV_VARS+=" -e ROOTFS_DIR=/opt/rootfs"
+DOCKER_ENV_VARS+=" -e TARGET_OS=tizen"
+
+DOCKER_RUN_OPTS="--rm"
+DOCKER_RUN_OPTS+=" -w $DOCKER_HOME"
+
+# prepare rootfs
+if [[ ! -d $ROOTFS_DIR ]]; then
+ echo "cannot find rootfs"
+ exit 1
+fi
+
+DOCKER_VOLUMES+=" -v $ROOTFS_DIR/:/opt/rootfs"
+
+CMD="make && make install && make build_test_suite"
+
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME sh -c "$CMD"
+BUILD_RESULT=$?
+
+# change owner of root dir and delete rootfs dir
+NNFW_OWNER_UID=$(stat -c "%u" $HOST_HOME)
+NNFW_OWNER_GID=$(stat -c "%g" $HOST_HOME)
+
+CMD="chown -R $NNFW_OWNER_UID:$NNFW_OWNER_GID $DOCKER_HOME"
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME sh -c "$CMD"
+
+exit $BUILD_RESULT
diff --git a/scripts/command/docker_build_ubuntu_svace.sh b/scripts/command/docker_build_ubuntu_svace.sh
new file mode 100755
index 000000000..a17e4da38
--- /dev/null
+++ b/scripts/command/docker_build_ubuntu_svace.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnfw_docker}
+
+HOST_HOME=$SCRIPT_ROOT/../..
+DOCKER_HOME=/home
+
+GIT_SSL_NO_VERIFY=1
+
+DOCKER_VOLUMES+=" -v $HOST_HOME:$DOCKER_HOME"
+
+DOCKER_ENV_VARS+=" -e http_proxy"
+DOCKER_ENV_VARS+=" -e no_proxy"
+DOCKER_ENV_VARS+=" -e GIT_SSL_NO_VERIFY"
+
+DOCKER_ENV_VARS+=" -e TARGET_ARCH=armv7l"
+DOCKER_ENV_VARS+=" -e CROSS_BUILD=1"
+DOCKER_ENV_VARS+=" -e ROOTFS_DIR=/opt/rootfs"
+DOCKER_ENV_VARS+=" -e EXT_ACL_FOLDER=/opt/libarmcl"
+
+DOCKER_RUN_OPTS="--rm"
+DOCKER_RUN_OPTS+=" -w $DOCKER_HOME"
+
+TMP_DIR=$HOST_HOME/tmp
+
+if [ ! -d $SVACE_ANALYZER_DIR ]; then
+ echo "cannot find svace-analyzer"
+ exit 1
+fi
+
+which $SVACE_ANALYZER_DIR/bin/svace
+if [[ $? -ne 0 ]]; then
+ echo "cannot find svace-analyzer"
+ exit 1
+fi
+
+pushd $HOST_HOME
+
+# prepare armcl library
+if [[ ! -d $ARMCL_DIR ]]; then
+ echo "cannot find armcl"
+ exit 1
+fi
+
+# prepare rootfs
+if [[ ! -d $ROOTFS_DIR ]]; then
+ echo "cannot find rootfs"
+ exit 1
+fi
+
+# prepare svace
+if [[ ! -f $SVACE_POLICY_FILE ]]; then
+ echo "cannot find svace policy"
+ exit 1
+fi
+
+DOCKER_VOLUMES+=" -v $SVACE_ANALYZER_DIR:/opt/svace-analyzer"
+DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs"
+DOCKER_VOLUMES+=" -v $ARMCL_DIR:/opt/libarmcl"
+
+if [ -n "$DOCKER_INTERACTIVE" ]; then
+ DOCKER_RUN_OPTS+=" -it"
+ CMD="/bin/bash"
+else
+ CMD="make external_acl tflite && /opt/svace-analyzer/bin/svace init && /opt/svace-analyzer/bin/svace build make runtime testbuild"
+fi
+
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME sh -c "$CMD"
+BUILD_RESULT=$?
+
+# Newly created files during above docker run can have different ownership.
+# This may cause some problems, for example, some jenkins slaves or developers
+# can't remove built files due to lack of permission.
+# To address this issue, let's change owner of all files
+# in NNFW to owner of NNFW.
+NNFW_OWNER_UID=$(stat -c "%u" $HOST_HOME)
+NNFW_OWNER_GID=$(stat -c "%g" $HOST_HOME)
+
+CMD="chown -R $NNFW_OWNER_UID:$NNFW_OWNER_GID $DOCKER_HOME"
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME $CMD
+
+$SVACE_ANALYZER_DIR/bin/svace analyze --warning $SVACE_POLICY_FILE
+
+popd
+
+exit $BUILD_RESULT
diff --git a/scripts/command/docker_coverage_report.sh b/scripts/command/docker_coverage_report.sh
new file mode 100755
index 000000000..577f5db37
--- /dev/null
+++ b/scripts/command/docker_coverage_report.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnfw_docker}
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+HOST_HOME=$SCRIPT_ROOT/../..
+DOCKER_HOME=/home
+
+GIT_SSL_NO_VERIFY=1
+
+DOCKER_VOLUMES+=" -v $HOST_HOME:$DOCKER_HOME"
+
+DOCKER_ENV_VARS+=" -e http_proxy"
+DOCKER_ENV_VARS+=" -e no_proxy"
+DOCKER_ENV_VARS+=" -e GIT_SSL_NO_VERIFY"
+
+DOCKER_RUN_OPTS="--rm"
+DOCKER_RUN_OPTS+=" -w $DOCKER_HOME"
+
+
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME sh -c "./scripts/command/gen_coverage_report.sh"
+
+BUILD_RESULT=$?
+
+# Newly created files during above docker run can have different ownership.
+# This may cause some problems, for example, some jenkins slaves or developers
+# can't remove built files due to lack of permission.
+# To address this issue, let's change owner of all files
+# in NNFW to owner of NNFW.
+NNFW_OWNER_UID=$(stat -c "%u" $HOST_HOME)
+NNFW_OWNER_GID=$(stat -c "%g" $HOST_HOME)
+
+CMD="chown -R $NNFW_OWNER_UID:$NNFW_OWNER_GID $DOCKER_HOME"
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME $CMD
+
+exit $BUILD_RESULT
+
diff --git a/scripts/command/docker_cross_test_coverage_build.sh b/scripts/command/docker_cross_test_coverage_build.sh
new file mode 100755
index 000000000..f954c481c
--- /dev/null
+++ b/scripts/command/docker_cross_test_coverage_build.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnfw_docker}
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+HOST_HOME=$SCRIPT_ROOT/../..
+DOCKER_HOME=/home
+
+GIT_SSL_NO_VERIFY=1
+
+DOCKER_VOLUMES+=" -v $HOST_HOME:$DOCKER_HOME"
+
+DOCKER_ENV_VARS+=" -e http_proxy"
+DOCKER_ENV_VARS+=" -e no_proxy"
+DOCKER_ENV_VARS+=" -e GIT_SSL_NO_VERIFY"
+
+DOCKER_ENV_VARS+=" -e TARGET_ARCH=armv7l"
+DOCKER_ENV_VARS+=" -e CROSS_BUILD=1"
+DOCKER_ENV_VARS+=" -e ROOTFS_DIR=/opt/rootfs"
+DOCKER_ENV_VARS+=" -e EXT_ACL_FOLDER=/opt/libarmcl"
+DOCKER_ENV_VARS+=" -e COVERAGE_BUILD=1"
+
+DOCKER_RUN_OPTS="--rm"
+DOCKER_RUN_OPTS+=" -w $DOCKER_HOME"
+
+# prepare armcl library
+if [[ ! -d $ARMCL_DIR ]]; then
+ echo "cannot find armcl"
+ exit 1
+fi
+
+# prepare rootfs
+if [[ ! -d $ROOTFS_DIR ]]; then
+ echo "cannot find rootfs"
+ exit 1
+fi
+
+DOCKER_VOLUMES+=" -v $ROOTFS_DIR:/opt/rootfs"
+DOCKER_VOLUMES+=" -v $ARMCL_DIR:/opt/libarmcl"
+
+if [ -n "$DOCKER_INTERACTIVE" ]; then
+ DOCKER_RUN_OPTS+=" -it"
+ CMD="/bin/bash"
+else
+ CMD="make external_acl && make && make install && make build_coverage_suite"
+fi
+
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME sh -c "$CMD"
+BUILD_RESULT=$?
+
+# Newly created files during above docker run can have different ownership.
+# This may cause some problems, for example, some jenkins slaves or developers
+# can't remove built files due to lack of permission.
+# To address this issue, let's change owner of all files
+# in NNFW to owner of NNFW.
+NNFW_OWNER_UID=$(stat -c "%u" $HOST_HOME)
+NNFW_OWNER_GID=$(stat -c "%g" $HOST_HOME)
+
+CMD="chown -R $NNFW_OWNER_UID:$NNFW_OWNER_GID $DOCKER_HOME"
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME $CMD
+
+exit $BUILD_RESULT
diff --git a/scripts/command/docker_gbs_build.sh b/scripts/command/docker_gbs_build.sh
new file mode 100755
index 000000000..03979e6ee
--- /dev/null
+++ b/scripts/command/docker_gbs_build.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+MY_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+NNFW_ROOT=$MY_PATH/../..
+DOCKER_NNFW_HOME=/home/nnfw
+DOCKER_RPM_HOME=/home/rpm
+
+if [ "${GBS_RPM_DIR}" == "" ];
+then
+ GBS_RPM_DIR=$NNFW_ROOT/Product/out/rpm
+ mkdir -p ${GBS_RPM_DIR}
+fi
+
+if [ -z ${DOCKER_IMAGE} ];
+then
+ # use default docker image
+ DOCKER_IMAGE=nnfw_docker_tizen:latest
+fi
+
+
+DOCKER_VOLUMES+=" -v ${GBS_RPM_DIR}:${DOCKER_RPM_HOME} -v $NNFW_ROOT:${DOCKER_NNFW_HOME}"
+DOCKER_RUN_OPTS+=" --rm"
+DOCKER_RUN_OPTS+=" -w ${DOCKER_NNFW_HOME}"
+
+CMD="gbs -c ${DOCKER_NNFW_HOME}/scripts/command/gbs.conf build -A armv7l --profile=profile.tizen --clean --include-all --define '${GBS_DEFINE}' &&
+ cp -rf /home/GBS-ROOT/local/repos/tizen/armv7l/RPMS/*.rpm ${DOCKER_RPM_HOME}/."
+docker run $DOCKER_RUN_OPTS $DOCKER_VOLUMES ${DOCKER_ENV_VARS:-} ${DOCKER_IMAGE} sh -c "$CMD"
+BUILD_RESULT=$?
+
+# change owner of root dir and delete rootfs dir
+NNFW_OWNER_UID=$(stat -c "%u" $NNFW_ROOT)
+NNFW_OWNER_GID=$(stat -c "%g" $NNFW_ROOT)
+
+CMD="chown -R $NNFW_OWNER_UID:$NNFW_OWNER_GID ${DOCKER_NNFW_HOME} && chown -R $NNFW_OWNER_UID:$NNFW_OWNER_GID ${DOCKER_RPM_HOME}"
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE sh -c "$CMD"
+exit $BUILD_RESULT
diff --git a/scripts/command/docker_run.sh b/scripts/command/docker_run.sh
new file mode 100755
index 000000000..f4834f2ff
--- /dev/null
+++ b/scripts/command/docker_run.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+source $SCRIPT_ROOT/common.sh
+
+HOST_HOME=$SCRIPT_ROOT/../..
+DOCKER_HOME=/home
+
+GIT_SSL_NO_VERIFY=1
+
+DOCKER_VOLUMES+=" -v $HOST_HOME:$DOCKER_HOME"
+
+DOCKER_ENV_VARS+=" -e http_proxy"
+DOCKER_ENV_VARS+=" -e no_proxy"
+DOCKER_ENV_VARS+=" -e GIT_SSL_NO_VERIFY"
+
+DOCKER_RUN_OPTS="--rm"
+DOCKER_RUN_OPTS+=" -w $DOCKER_HOME"
+DOCKER_RUN_OPTS+=" -it"
+
+CMD="/bin/bash"
+
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME $CMD
diff --git a/scripts/command/docker_run_test.sh b/scripts/command/docker_run_test.sh
new file mode 100755
index 000000000..75edd3428
--- /dev/null
+++ b/scripts/command/docker_run_test.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+# default DOCKER_IMAGE_NAME=nnfw_docker
+DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME:-nnfw_docker}
+echo "Using docker image $DOCKER_IMAGE_NAME"
+
+if [ -z "`docker images | grep $DOCKER_IMAGE_NAME`" ]; then
+ echo "Need docker image!"
+ exit 1
+fi
+
+HOST_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )"
+
+DOCKER_PATH=/home/npuci/nnfw
+
+export GIT_SSL_NO_VERIFY=1
+
+DOCKER_VOLUMES=" -v /dev/null:/dev/raw1394"
+DOCKER_VOLUMES+=" -v $HOST_PATH:$DOCKER_PATH"
+
+DOCKER_ENV_VARS+=" -e http_proxy"
+DOCKER_ENV_VARS+=" -e no_proxy"
+DOCKER_ENV_VARS+=" -e GIT_SSL_NO_VERIFY"
+
+DOCKER_RUN_OPTS="--rm"
+DOCKER_RUN_OPTS+=" -w $DOCKER_PATH"
+
+CMD="make install"
+
+if [ "$DOCKER_INTERACTIVE" ]; then
+ DOCKER_RUN_OPTS+=" -it"
+ CMD="/bin/bash"
+fi
+
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME $CMD
+
+BUILD_RESULT=$?
+
+# Newly created files during above docker run can have different ownership.
+# This may cause some problems, for example, some jenkins slaves or developers
+# can't remove built files due to lack of permission.
+# To address this issue, let's change owner of all files
+# in NPU_Compiler to owner of NPU_Compiler.
+NPU_COMPILER_OWNER_UID=$(stat -c "%u" $HOST_PATH)
+NPU_COMPILER_OWNER_GID=$(stat -c "%g" $HOST_PATH)
+
+CMD="chown -R $NPU_COMPILER_OWNER_UID:$NPU_COMPILER_OWNER_GID $DOCKER_PATH"
+docker run $DOCKER_RUN_OPTS $DOCKER_ENV_VARS $DOCKER_VOLUMES $DOCKER_IMAGE_NAME $CMD
+
+exit $BUILD_RESULT
diff --git a/scripts/command/format-checker.sh b/scripts/command/format-checker.sh
new file mode 100755
index 000000000..5186b7476
--- /dev/null
+++ b/scripts/command/format-checker.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+function check_tools() {
+ which clang-format-3.9
+ if [[ $? -ne 0 ]]; then
+ echo "Error: clang-format-3.9 is not available."
+ echo " Please install clang-format-3.9."
+ exit 1
+ fi
+
+ which yapf
+ if [[ $? -ne 0 ]]; then
+ echo "Error: yapf is not available."
+ echo " Please install yapf."
+ exit 1
+ fi
+}
+
+function check_cpp_files() {
+ DIRECTORIES_TO_BE_TESTED=$1
+
+ # Check c++ files
+ CPP_FILES_IN_COMPILER=$(find "${DIRECTORIES_TO_BE_TESTED[@]}" -iname '*.h' -o -iname '*.cpp' -o -iname '*.cc')
+
+ if [[ ${#CPP_FILES_IN_COMPILER} -eq 0 ]]; then
+ echo "No cpp files to be checked"
+ return
+ fi
+
+ CPP_FILES_TO_BE_TESTED=$(git ls-files $CPP_FILES_IN_COMPILER)
+ if [[ ${#CPP_FILES_TO_BE_TESTED} -eq 0 ]]; then
+ echo "No changed cpp files to be checked"
+ return
+ fi
+
+ clang-format-3.9 -i $CPP_FILES_TO_BE_TESTED
+}
+
+function check_python_files() {
+ DIRECTORIES_TO_BE_TESTED=$1
+
+ # Check python files
+ PYTHON_FILES_IN_COMPILER=$(find "${DIRECTORIES_TO_BE_TESTED[@]}" -iname '*.py')
+
+ if [[ ${#PYTHON_FILES_IN_COMPILER} -eq 0 ]]; then
+ echo "No python files to be checked"
+ return
+ fi
+
+ PYTHON_FILES_TO_BE_TESTED=$(git ls-files $PYTHON_FILES_IN_COMPILER)
+ if [[ ${#PYTHON_FILES_TO_BE_TESTED} -eq 0 ]]; then
+ echo "No changed python files to be checked"
+ return
+ fi
+
+ yapf -i --style='{based_on_style: pep8, column_limit: 90}' $PYTHON_FILES_TO_BE_TESTED
+}
+
+echo "Make sure commit all changes before running this checker."
+
+check_tools
+
+DIRECTORIES_TO_BE_TESTED=()
+
+for DIR_TO_BE_TESTED in $(find -name '.FORMATCHECKED' -exec dirname {} \;); do
+ DIRECTORIES_TO_BE_TESTED+=("$DIR_TO_BE_TESTED")
+done
+
+if [[ ${#DIRECTORIES_TO_BE_TESTED[@]} -eq 0 ]]; then
+ echo "No directories to be checked"
+ exit 0
+fi
+
+check_cpp_files $DIRECTORIES_TO_BE_TESTED
+check_python_files $DIRECTORIES_TO_BE_TESTED
+
+git diff > format.patch
+PATCHFILE_SIZE=$(stat -c%s format.patch)
+if [[ $PATCHFILE_SIZE -ne 0 ]]; then
+ echo "[FAILED] Format checker failed and update code to follow convention."
+ echo " You can find changes in format.patch"
+ exit 1
+else
+ echo "[PASSED] Format checker succeed."
+ exit 0
+fi
+
+echo "Error: Something went wrong."
+exit 1
diff --git a/scripts/command/gbs.conf b/scripts/command/gbs.conf
new file mode 100644
index 000000000..af555c399
--- /dev/null
+++ b/scripts/command/gbs.conf
@@ -0,0 +1,21 @@
+[general]
+#Current profile name which should match a profile section name
+profile = profile.tizen
+
+[profile.tizen]
+user=obs_viewer
+passwdx = QlpoOTFBWSZTWWV18UwAAAKDgAAAkiCZgCAAMQZMQQDJ6jQwAvxdyRThQkGV18Uw
+obs = obs.tizen
+repos = repo.tizen_base,repo.tizen_mobile
+buildroot = /home/GBS-ROOT/
+
+[obs.tizen]
+url = http://api.tizen.org
+
+[repo.tizen_mobile]
+url = http://download.tizen.org/snapshots/tizen/unified/latest/repos/standard/packages/
+
+[repo.tizen_base]
+url = http://download.tizen.org/snapshots/tizen/base/latest/repos/standard/packages/
+
+
diff --git a/scripts/command/gen_coverage_report.sh b/scripts/command/gen_coverage_report.sh
new file mode 100755
index 000000000..32177919a
--- /dev/null
+++ b/scripts/command/gen_coverage_report.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+# This file is based on https://github.sec.samsung.net/STAR/nncc/pull/80
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+NNFW_ROOT=$SCRIPT_ROOT/../..
+
+LCOV_PATH=$(command -v lcov)
+GENHTML_PATH=$(command -v genhtml)
+
+if [[ -z "${LCOV_PATH}" ]]; then
+ echo "ERROR: 'lcov' is not found"
+ exit 255
+fi
+
+if [[ -z "${GENHTML_PATH}" ]]; then
+ echo "ERROR: 'genhtml' is not found"
+ exit 255
+fi
+
+OUTPUT_PATH="$1"
+
+if [[ -z "${OUTPUT_PATH}" ]]; then
+ OUTPUT_PATH="$NNFW_ROOT/coverage"
+fi
+
+if [[ -e "${OUTPUT_PATH}" ]]; then
+ echo "ERROR: '${OUTPUT_PATH}' already exists"
+ exit 255
+fi
+
+mkdir -p "${OUTPUT_PATH}"
+
+RAW_COVERAGE_INFO_PATH="${OUTPUT_PATH}/coverage.raw.info"
+LIBS_COVERAGE_INFO_PATH="${OUTPUT_PATH}/coverage.libs.info"
+INCLUDE_COVERAGE_INFO_PATH="${OUTPUT_PATH}/coverage.include.info"
+RUNTIMES_COVERAGE_INFO_PATH="${OUTPUT_PATH}/coverage.runtimes.info"
+TOOLS_COVERAGE_INFO_PATH="${OUTPUT_PATH}/coverage.tools.info"
+FINAL_COVERAGE_INFO_PATH="${OUTPUT_PATH}/coverage.info"
+HTML_PATH="${OUTPUT_PATH}/html"
+COVERTURA_PATH="${OUTPUT_PATH}/nnfw_coverage.xml"
+
+"${LCOV_PATH}" -c -d "${NNFW_ROOT}" -o "${RAW_COVERAGE_INFO_PATH}"
+"${LCOV_PATH}" -e "${RAW_COVERAGE_INFO_PATH}" -o "${LIBS_COVERAGE_INFO_PATH}" '/home/libs/*'
+"${LCOV_PATH}" -e "${RAW_COVERAGE_INFO_PATH}" -o "${INCLUDE_COVERAGE_INFO_PATH}" '/home/include/*'
+"${LCOV_PATH}" -e "${RAW_COVERAGE_INFO_PATH}" -o "${RUNTIMES_COVERAGE_INFO_PATH}" '/home/runtimes/*'
+"${LCOV_PATH}" -e "${RAW_COVERAGE_INFO_PATH}" -o "${TOOLS_COVERAGE_INFO_PATH}" '/home/tools/*'
+"${LCOV_PATH}" -a "${LIBS_COVERAGE_INFO_PATH}" -a "${INCLUDE_COVERAGE_INFO_PATH}" \
+ -a "${RUNTIMES_COVERAGE_INFO_PATH}" -a "${TOOLS_COVERAGE_INFO_PATH}" \
+ -o "${FINAL_COVERAGE_INFO_PATH}"
+"${LCOV_PATH}" -r "${FINAL_COVERAGE_INFO_PATH}" -o "${FINAL_COVERAGE_INFO_PATH}" '/home/runtimes/tests/*'
+"${LCOV_PATH}" -r "${FINAL_COVERAGE_INFO_PATH}" -o "${FINAL_COVERAGE_INFO_PATH}" '/home/runtimes/nn/depend/*'
+"${GENHTML_PATH}" "${FINAL_COVERAGE_INFO_PATH}" --output-directory "${HTML_PATH}"
+
+tar -zcf "${OUTPUT_PATH}"/coverage_report.tar.gz "${HTML_PATH}"
+$SCRIPT_ROOT/lcov-to-covertura-xml.sh "${FINAL_COVERAGE_INFO_PATH}" -o "${COVERTURA_PATH}"
diff --git a/scripts/command/imported_url.txt b/scripts/command/imported_url.txt
new file mode 100644
index 000000000..b3a27cff5
--- /dev/null
+++ b/scripts/command/imported_url.txt
@@ -0,0 +1,3 @@
+# This file contains urls of files which is imported from public origin.
+
+1. lcov-to-covertura-xml.sh : https://github.com/eriwen/lcov-to-cobertura-xml
diff --git a/scripts/command/lcov-to-covertura-xml.sh b/scripts/command/lcov-to-covertura-xml.sh
new file mode 100755
index 000000000..7aae6d115
--- /dev/null
+++ b/scripts/command/lcov-to-covertura-xml.sh
@@ -0,0 +1,414 @@
+#!/usr/bin/env python
+
+# Copyright 2011-2012 Eric Wendelin
+#
+# This is free software, licensed under the Apache License, Version 2.0,
+# available in the accompanying LICENSE.txt file.
+
+"""
+Converts lcov line coverage output to Cobertura-compatible XML for CI
+"""
+
+import re
+import sys
+import os
+import time
+import subprocess
+from xml.dom import minidom
+from optparse import OptionParser
+
+from distutils.spawn import find_executable
+
+CPPFILT = "c++filt"
+HAVE_CPPFILT = False
+
+if find_executable(CPPFILT) is not None:
+ HAVE_CPPFILT = True
+
+VERSION = '1.6'
+__all__ = ['LcovCobertura']
+
+
+class Demangler(object):
+ def __init__(self):
+ self.pipe = subprocess.Popen(
+ CPPFILT, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+
+ def demangle(self, name):
+ self.pipe.stdin.write(name + "\n")
+ return self.pipe.stdout.readline().rstrip()
+
+
+class LcovCobertura(object):
+ """
+ Converts code coverage report files in lcov format to Cobertura's XML
+ report format so that CI servers like Jenkins can aggregate results and
+ determine build stability etc.
+
+ >>> from lcov_cobertura import LcovCobertura
+ >>> LCOV_INPUT = 'your lcov input'
+ >>> converter = LcovCobertura(LCOV_INPUT)
+ >>> cobertura_xml = converter.convert()
+ >>> print(cobertura_xml)
+ """
+
+ def __init__(self, lcov_data, base_dir='.', excludes=None, demangle=False):
+ """
+ Create a new :class:`LcovCobertura` object using the given `lcov_data`
+ and `options`.
+
+ :param lcov_data: Path to LCOV data file
+ :type lcov_data: string
+ :param base_dir: Path upon which to base all sources
+ :type base_dir: string
+ :param excludes: list of regexes to packages as excluded
+ :type excludes: [string]
+ :param demangle: whether to demangle function names using c++filt
+ :type demangle: bool
+ """
+
+ if not excludes:
+ excludes = []
+ self.lcov_data = lcov_data
+ self.base_dir = base_dir
+ self.excludes = excludes
+ if demangle:
+ demangler = Demangler()
+ self.format = demangler.demangle
+ else:
+ self.format = lambda x: x
+
+ def convert(self):
+ """
+ Convert lcov file to cobertura XML using options from this instance.
+ """
+ coverage_data = self.parse()
+ return self.generate_cobertura_xml(coverage_data)
+
+ def parse(self):
+ """
+ Generate a data structure representing it that can be serialized in any
+ logical format.
+ """
+
+ coverage_data = {
+ 'packages': {},
+ 'summary': {'lines-total': 0, 'lines-covered': 0,
+ 'branches-total': 0, 'branches-covered': 0},
+ 'timestamp': str(int(time.time()))
+ }
+ package = None
+ current_file = None
+ file_lines_total = 0
+ file_lines_covered = 0
+ file_lines = {}
+ file_methods = {}
+ file_branches_total = 0
+ file_branches_covered = 0
+
+ for line in self.lcov_data.split('\n'):
+ if line.strip() == 'end_of_record':
+ if current_file is not None:
+ package_dict = coverage_data['packages'][package]
+ package_dict['lines-total'] += file_lines_total
+ package_dict['lines-covered'] += file_lines_covered
+ package_dict['branches-total'] += file_branches_total
+ package_dict['branches-covered'] += file_branches_covered
+ file_dict = package_dict['classes'][current_file]
+ file_dict['lines-total'] = file_lines_total
+ file_dict['lines-covered'] = file_lines_covered
+ file_dict['lines'] = dict(file_lines)
+ file_dict['methods'] = dict(file_methods)
+ file_dict['branches-total'] = file_branches_total
+ file_dict['branches-covered'] = file_branches_covered
+ coverage_data['summary']['lines-total'] += file_lines_total
+ coverage_data['summary']['lines-covered'] += file_lines_covered
+ coverage_data['summary']['branches-total'] += file_branches_total
+ coverage_data['summary']['branches-covered'] += file_branches_covered
+
+ line_parts = line.split(':', 1)
+ input_type = line_parts[0]
+
+ if input_type == 'SF':
+ # Get file name
+ file_name = line_parts[-1].strip()
+ relative_file_name = os.path.relpath(file_name, self.base_dir)
+ package = '.'.join(relative_file_name.split(os.path.sep)[0:-1])
+ class_name = '.'.join(relative_file_name.split(os.path.sep))
+ if package not in coverage_data['packages']:
+ coverage_data['packages'][package] = {
+ 'classes': {}, 'lines-total': 0, 'lines-covered': 0,
+ 'branches-total': 0, 'branches-covered': 0
+ }
+ coverage_data['packages'][package]['classes'][
+ relative_file_name] = {
+ 'name': class_name, 'lines': {}, 'lines-total': 0,
+ 'lines-covered': 0, 'branches-total': 0,
+ 'branches-covered': 0
+ }
+ package = package
+ current_file = relative_file_name
+ file_lines_total = 0
+ file_lines_covered = 0
+ file_lines.clear()
+ file_methods.clear()
+ file_branches_total = 0
+ file_branches_covered = 0
+ elif input_type == 'DA':
+ # DA:2,0
+ (line_number, line_hits) = line_parts[-1].strip().split(',')
+ line_number = int(line_number)
+ if line_number not in file_lines:
+ file_lines[line_number] = {
+ 'branch': 'false', 'branches-total': 0,
+ 'branches-covered': 0
+ }
+ file_lines[line_number]['hits'] = line_hits
+ # Increment lines total/covered for class and package
+ try:
+ if int(line_hits) > 0:
+ file_lines_covered += 1
+ except:
+ pass
+ file_lines_total += 1
+ elif input_type == 'BRDA':
+ # BRDA:1,1,2,0
+ (line_number, block_number, branch_number, branch_hits) = line_parts[-1].strip().split(',')
+ line_number = int(line_number)
+ if line_number not in file_lines:
+ file_lines[line_number] = {
+ 'branch': 'true', 'branches-total': 0,
+ 'branches-covered': 0, 'hits': 0
+ }
+ file_lines[line_number]['branch'] = 'true'
+ file_lines[line_number]['branches-total'] += 1
+ file_branches_total += 1
+ if branch_hits != '-' and int(branch_hits) > 0:
+ file_lines[line_number]['branches-covered'] += 1
+ file_branches_covered += 1
+ elif input_type == 'BRF':
+ file_branches_total = int(line_parts[1])
+ elif input_type == 'BRH':
+ file_branches_covered = int(line_parts[1])
+ elif input_type == 'FN':
+ # FN:5,(anonymous_1)
+ function_line, function_name = line_parts[-1].strip().split(',')
+ file_methods[function_name] = [function_line, '0']
+ elif input_type == 'FNDA':
+ # FNDA:0,(anonymous_1)
+ (function_hits, function_name) = line_parts[-1].strip().split(',')
+ if function_name not in file_methods:
+ file_methods[function_name] = ['0', '0']
+ file_methods[function_name][-1] = function_hits
+
+ # Exclude packages
+ excluded = [x for x in coverage_data['packages'] for e in self.excludes
+ if re.match(e, x)]
+ for package in excluded:
+ del coverage_data['packages'][package]
+
+ # Compute line coverage rates
+ for package_data in list(coverage_data['packages'].values()):
+ package_data['line-rate'] = self._percent(
+ package_data['lines-total'],
+ package_data['lines-covered'])
+ package_data['branch-rate'] = self._percent(
+ package_data['branches-total'],
+ package_data['branches-covered'])
+
+ return coverage_data
+
+ def generate_cobertura_xml(self, coverage_data):
+ """
+ Given parsed coverage data, return a String cobertura XML representation.
+
+ :param coverage_data: Nested dict representing coverage information.
+ :type coverage_data: dict
+ """
+
+ dom_impl = minidom.getDOMImplementation()
+ doctype = dom_impl.createDocumentType("coverage", None,
+ "http://cobertura.sourceforge.net/xml/coverage-04.dtd")
+ document = dom_impl.createDocument(None, "coverage", doctype)
+ root = document.documentElement
+ summary = coverage_data['summary']
+ self._attrs(root, {
+ 'branch-rate': self._percent(summary['branches-total'],
+ summary['branches-covered']),
+ 'branches-covered': str(summary['branches-covered']),
+ 'branches-valid': str(summary['branches-total']),
+ 'complexity': '0',
+ 'line-rate': self._percent(summary['lines-total'],
+ summary['lines-covered']),
+ 'lines-covered': str(summary['lines-covered']),
+ 'lines-valid': str(summary['lines-total']),
+ 'timestamp': coverage_data['timestamp'],
+ 'version': '2.0.3'
+ })
+
+ sources = self._el(document, 'sources', {})
+ source = self._el(document, 'source', {})
+ source.appendChild(document.createTextNode(self.base_dir))
+ sources.appendChild(source)
+
+ root.appendChild(sources)
+
+ packages_el = self._el(document, 'packages', {})
+
+ packages = coverage_data['packages']
+ for package_name, package_data in list(packages.items()):
+ package_el = self._el(document, 'package', {
+ 'line-rate': package_data['line-rate'],
+ 'branch-rate': package_data['branch-rate'],
+ 'name': package_name,
+ 'complexity': '0',
+ })
+ classes_el = self._el(document, 'classes', {})
+ for class_name, class_data in list(package_data['classes'].items()):
+ class_el = self._el(document, 'class', {
+ 'branch-rate': self._percent(class_data['branches-total'],
+ class_data['branches-covered']),
+ 'complexity': '0',
+ 'filename': class_name,
+ 'line-rate': self._percent(class_data['lines-total'],
+ class_data['lines-covered']),
+ 'name': class_data['name']
+ })
+
+ # Process methods
+ methods_el = self._el(document, 'methods', {})
+ for method_name, (line, hits) in list(class_data['methods'].items()):
+ method_el = self._el(document, 'method', {
+ 'name': self.format(method_name),
+ 'signature': '',
+ 'line-rate': '1.0' if int(hits) > 0 else '0.0',
+ 'branch-rate': '1.0' if int(hits) > 0 else '0.0',
+ })
+ method_lines_el = self._el(document, 'lines', {})
+ method_line_el = self._el(document, 'line', {
+ 'hits': hits,
+ 'number': line,
+ 'branch': 'false',
+ })
+ method_lines_el.appendChild(method_line_el)
+ method_el.appendChild(method_lines_el)
+ methods_el.appendChild(method_el)
+
+ # Process lines
+ lines_el = self._el(document, 'lines', {})
+ lines = list(class_data['lines'].keys())
+ lines.sort()
+ for line_number in lines:
+ line_el = self._el(document, 'line', {
+ 'branch': class_data['lines'][line_number]['branch'],
+ 'hits': str(class_data['lines'][line_number]['hits']),
+ 'number': str(line_number)
+ })
+ if class_data['lines'][line_number]['branch'] == 'true':
+ total = int(class_data['lines'][line_number]['branches-total'])
+ covered = int(class_data['lines'][line_number]['branches-covered'])
+ percentage = int((covered * 100.0) / total)
+ line_el.setAttribute('condition-coverage',
+ '{0}% ({1}/{2})'.format(
+ percentage, covered, total))
+ lines_el.appendChild(line_el)
+
+ class_el.appendChild(methods_el)
+ class_el.appendChild(lines_el)
+ classes_el.appendChild(class_el)
+ package_el.appendChild(classes_el)
+ packages_el.appendChild(package_el)
+ root.appendChild(packages_el)
+
+ return document.toprettyxml()
+
+ def _el(self, document, name, attrs):
+ """
+ Create an element within document with given name and attributes.
+
+ :param document: Document element
+ :type document: Document
+ :param name: Element name
+ :type name: string
+ :param attrs: Attributes for element
+ :type attrs: dict
+ """
+ return self._attrs(document.createElement(name), attrs)
+
+ def _attrs(self, element, attrs):
+ """
+ Set attributes on given element.
+
+ :param element: DOM Element
+ :type element: Element
+ :param attrs: Attributes for element
+ :type attrs: dict
+ """
+ for attr, val in list(attrs.items()):
+ element.setAttribute(attr, val)
+ return element
+
+ def _percent(self, lines_total, lines_covered):
+ """
+ Get the percentage of lines covered in the total, with formatting.
+
+ :param lines_total: Total number of lines in given module
+ :type lines_total: number
+ :param lines_covered: Number of lines covered by tests in module
+ :type lines_covered: number
+ """
+
+ if lines_total == 0:
+ return '0.0'
+ return str(float(float(lines_covered) / float(lines_total)))
+
+
+def main(argv=None):
+ """
+ Converts LCOV coverage data to Cobertura-compatible XML for reporting.
+
+ Usage:
+ lcov_cobertura.py lcov-file.dat
+ lcov_cobertura.py lcov-file.dat -b src/dir -e test.lib -o path/out.xml
+
+ By default, XML output will be written to ./coverage.xml
+ """
+ if argv is None:
+ argv = sys.argv
+ parser = OptionParser()
+ parser.usage = ('lcov_cobertura.py lcov-file.dat [-b source/dir] '
+ '[-e <exclude packages regex>] [-o output.xml] [-d]')
+ parser.description = 'Converts lcov output to cobertura-compatible XML'
+ parser.add_option('-b', '--base-dir', action='store',
+ help='Directory where source files are located',
+ dest='base_dir', default='.')
+ parser.add_option('-e', '--excludes',
+ help='Comma-separated list of regexes of packages to exclude',
+ action='append', dest='excludes', default=[])
+ parser.add_option('-o', '--output',
+ help='Path to store cobertura xml file',
+ action='store', dest='output', default='coverage.xml')
+ parser.add_option('-d', '--demangle',
+ help='Demangle C++ function names using %s' % CPPFILT,
+ action='store_true', dest='demangle', default=False)
+ (options, args) = parser.parse_args(args=argv)
+
+ if options.demangle and not HAVE_CPPFILT:
+ raise RuntimeError("C++ filter executable (%s) not found!" % CPPFILT)
+
+ if len(args) != 2:
+ print(main.__doc__)
+ sys.exit(1)
+
+ try:
+ with open(args[1], 'r') as lcov_file:
+ lcov_data = lcov_file.read()
+ lcov_cobertura = LcovCobertura(lcov_data, options.base_dir, options.excludes, options.demangle)
+ cobertura_xml = lcov_cobertura.convert()
+ with open(options.output, mode='wt') as output_file:
+ output_file.write(cobertura_xml)
+ except IOError:
+ sys.stderr.write("Unable to convert %s to Cobertura XML" % args[1])
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/command/tizen_xu4_test.sh b/scripts/command/tizen_xu4_test.sh
new file mode 100755
index 000000000..bfebc352d
--- /dev/null
+++ b/scripts/command/tizen_xu4_test.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+NNFW_ROOT=$SCRIPT_ROOT/../..
+if [ -z "$TEST_ROOT" ]; then
+ TEST_ROOT=/opt/usr/nnfw-test
+fi
+
+function Usage()
+{
+ echo "Usage: ./tizen_xu4_test.sh --rpm-dir=path/to/rpm-dir --unittest --verification"
+ echo "Usage: ./tizen_xu4_test.sh --test-suite-path=path/to/test-suite.tar.gz --unittest --verification"
+ echo "--rpm-dir : directory containing nnfw.rpm and nnfw-test.rpm"
+ echo "--test-suite-path : filepath to test-suite.tar.gz"
+ echo "--unittest : run unittest"
+ echo "--verification : run verification"
+ echo "--framework : run framework"
+}
+
+
+function prepare_rpm_test()
+{
+ echo "======= Test with rpm packages(gbs build) ======="
+ # install nnfw nnfw-test rpms
+ for file in $RPM_DIR/*
+ do
+ $SDB_CMD push $file $TEST_ROOT
+ $SDB_CMD shell rpm -Uvh $TEST_ROOT/$(basename $file) --force --nodeps
+ done
+
+ # download tflite model files
+ pushd $NNFW_ROOT
+ tests/framework/run_test.sh --download=on
+ tar -zcf cache.tar.gz tests/framework/cache
+ $SDB_CMD push cache.tar.gz $TEST_ROOT/.
+ rm -rf cache.tar.gz
+ $SDB_CMD shell tar -zxf $TEST_ROOT/cache.tar.gz -C $TEST_ROOT
+}
+
+function prepare_suite_test()
+{
+ echo "======= Test with test-suite(cross build) ======="
+ # install test-suite
+ $SDB_CMD push $TEST_SUITE_PATH $TEST_ROOT/$(basename $TEST_SUITE_PATH)
+ $SDB_CMD shell tar -zxf $TEST_ROOT/$(basename $TEST_SUITE_PATH) -C $TEST_ROOT
+
+ # download tflite model files
+ pushd $NNFW_ROOT
+ tests/framework/run_test.sh --download=on
+ tar -zcf cache.tar.gz tests/framework/cache
+ $SDB_CMD push cache.tar.gz $TEST_ROOT/.
+ rm -rf cache.tar.gz
+ $SDB_CMD shell tar -zxf $TEST_ROOT/cache.tar.gz -C $TEST_ROOT
+}
+
+
+# Parse command argv
+for i in "$@"
+do
+ case $i in
+ -h|--help|help)
+ Usage
+ exit 1
+ ;;
+ --rpm-dir=*)
+ RPM_DIR=${i#*=}
+ ;;
+ --test-suite-path=*)
+ TEST_SUITE_PATH=${i#*=}
+ ;;
+ --unittest)
+ UNITTEST=on
+ ;;
+ --verification)
+ VERIFICATION=on
+ ;;
+ --framework)
+ FRAMEWORK=on
+ ;;
+ esac
+ shift
+done
+
+
+N=`sdb devices 2>/dev/null | wc -l`
+
+# exit if no device found
+if [[ $N -le 1 ]]; then
+ echo "No device found."
+ exit 1;
+fi
+
+NUM_DEV=$(($N-1))
+echo "device list"
+DEVICE_LIST=`sdb devices 2>/dev/null`
+echo "$DEVICE_LIST" | tail -n"$NUM_DEV"
+
+if [ -z "$SERIAL" ]; then
+ SERIAL=`echo "$DEVICE_LIST" | tail -n1 | awk '{print $1}'`
+fi
+SDB_CMD="sdb -s $SERIAL "
+
+# root on, remount as rw
+$SDB_CMD root on
+$SDB_CMD shell mount -o rw,remount /
+
+SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ROOT=$SCRIPT_ROOT/../
+
+if [ -z "$RPM_DIR" ] && [ -z "$TEST_SUITE_PATH" ]; then
+ echo "Please provide --rpm-dir or --test-suite-path"
+ exit 255
+fi
+
+if [ ! -z "$RPM_DIR" ]; then
+ prepare_rpm_test
+else
+ prepare_suite_test
+fi
+
+# run unittest
+if [ "$UNITTEST" == "on" ]; then
+ $SDB_CMD shell $TEST_ROOT/tools/test_driver/test_driver.sh --unittest --artifactpath=$TEST_ROOT
+fi
+
+# run framework test
+if [ "$FRAMEWORK" == "on" ]; then
+ $SDB_CMD shell $TEST_ROOT/tools/test_driver/test_driver.sh --frameworktest --artifactpath=$TEST_ROOT
+fi
+
+# run verification
+if [ "$VERIFICATION" == "on" ]; then
+ $SDB_CMD shell $TEST_ROOT/tools/test_driver/test_driver.sh --verification --artifactpath=$TEST_ROOT
+fi
+
diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile
new file mode 100644
index 000000000..cde173a5c
--- /dev/null
+++ b/scripts/docker/Dockerfile
@@ -0,0 +1,14 @@
+FROM ubuntu:16.04
+
+ENV http_proxy $http_proxy
+ENV https_proxy $https_proxy
+
+RUN apt-get update && apt-get --yes --force-yes install build-essential
+RUN apt-get update && apt-get --yes --force-yes install scons cmake
+RUN apt-get update && apt-get --yes --force-yes install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf
+RUN apt-get update && apt-get --yes --force-yes install libboost-all-dev
+RUN apt-get update && apt-get --yes --force-yes install git
+RUN apt-get update && apt-get --yes --force-yes install gcc-5-arm-linux-gnueabi g++-5-arm-linux-gnueabi
+RUN apt-get update && apt-get --yes --force-yes install lcov
+RUN apt-get update && apt-get --yes --force-yes install clang-format-3.9 python-pip
+RUN pip install yapf
diff --git a/scripts/docker/Dockerfile_tizen b/scripts/docker/Dockerfile_tizen
new file mode 100644
index 000000000..1c39380b0
--- /dev/null
+++ b/scripts/docker/Dockerfile_tizen
@@ -0,0 +1,8 @@
+FROM ubuntu:16.04
+
+ENV http_proxy $http_proxy
+ENV https_proxy $https_proxy
+
+RUN echo 'deb [trusted=yes] http://download.tizen.org/tools/latest-release/Ubuntu_16.04/ /' | cat >> /etc/apt/sources.list
+
+RUN apt-get update && apt-get --yes --force-yes install gbs