summaryrefslogtreecommitdiff
path: root/tests/scripts
diff options
context:
space:
mode:
authorJiyoung Yun <jy910.yun@samsung.com>2017-04-13 14:17:19 +0900
committerJiyoung Yun <jy910.yun@samsung.com>2017-04-13 14:17:19 +0900
commita56e30c8d33048216567753d9d3fefc2152af8ac (patch)
tree7e5d979695fc4a431740982eb1cfecc2898b23a5 /tests/scripts
parent4b11dc566a5bbfa1378d6266525c281b028abcc8 (diff)
downloadcoreclr-a56e30c8d33048216567753d9d3fefc2152af8ac.tar.gz
coreclr-a56e30c8d33048216567753d9d3fefc2152af8ac.tar.bz2
coreclr-a56e30c8d33048216567753d9d3fefc2152af8ac.zip
Imported Upstream version 2.0.0.11353upstream/2.0.0.11353
Diffstat (limited to 'tests/scripts')
-rwxr-xr-xtests/scripts/arm32_ci_script.sh215
-rwxr-xr-xtests/scripts/arm32_ci_test.sh124
-rw-r--r--tests/scripts/arm64_post_build.py2
-rw-r--r--tests/scripts/build_illink.cmd44
-rwxr-xr-xtests/scripts/build_illink.sh65
-rw-r--r--tests/scripts/format.py10
-rwxr-xr-x[-rw-r--r--]tests/scripts/lst_creator.py469
-rwxr-xr-xtests/scripts/optdata/bootstrap.py85
-rw-r--r--tests/scripts/optdata/project.json12
-rwxr-xr-xtests/scripts/perf-prep.sh54
-rw-r--r--tests/scripts/run-corefx-tests.py39
-rw-r--r--tests/scripts/run-throughput-perf.py400
-rw-r--r--tests/scripts/run-xunit-perf.cmd40
-rwxr-xr-xtests/scripts/run-xunit-perf.sh31
-rwxr-xr-xtests/scripts/x86_ci_script.sh58
15 files changed, 1369 insertions, 279 deletions
diff --git a/tests/scripts/arm32_ci_script.sh b/tests/scripts/arm32_ci_script.sh
index 6745e63145..3124cc89ce 100755
--- a/tests/scripts/arm32_ci_script.sh
+++ b/tests/scripts/arm32_ci_script.sh
@@ -27,9 +27,14 @@ function usage {
echo ' <path>/platform/rootfs-t30.ext4 should exist'
echo ' --mountPath=<path> : The desired path for mounting the emulator rootfs (without ending /)'
echo ' This path is created if not already present'
- echo ' --buildConfig=<config> : The value of config should be either Debug or Release'
+ echo ' --buildConfig=<config> : The value of config should be either Debug, Checked or Release'
echo ' Any other value is not accepted'
echo 'Optional Arguments:'
+ echo ' --mode=<mode> : docker or emulator (default)'
+ echo ' --arm : Build using hard ABI'
+ echo ' --armel : Build using softfp ABI (default)'
+ echo ' --linuxCodeName=<name> : Code name for Linux: For arm, trusty (default) and xenial. For armel, tizen'
+ echo ' --skipRootFS : Skip building rootfs'
echo ' --skipTests : Presenting this option skips testing the generated binaries'
echo ' If this option is not presented, then tests are run by default'
echo ' using the other test related options'
@@ -148,7 +153,9 @@ function handle_exit {
echo 'The script is exited. Cleaning environment..'
- clean_env
+ if [ "$__ciMode" == "emulator" ]; then
+ clean_env
+ fi
}
trap handle_exit EXIT
@@ -229,6 +236,64 @@ function cross_build_coreclr {
fi
}
+#Cross builds coreclr using Docker
+function cross_build_coreclr_with_docker {
+ __currentWorkingDirectory=`pwd`
+
+ # Check build configuration and choose Docker image
+ __dockerEnvironmentVariables=""
+ if [ "$__buildArch" == "arm" ]; then
+ # TODO: For arm, we are going to embed RootFS inside Docker image.
+ case $__linuxCodeName in
+ trusty)
+ __dockerImage=" microsoft/dotnet-buildtools-prereqs:ubuntu-14.04-cross-0cd4667-20172211042239"
+ __skipRootFS=1
+ __dockerEnvironmentVariables+=" -e ROOTFS_DIR=/crossrootfs/arm"
+ __runtimeOS="ubuntu.14.04"
+ ;;
+ xenial)
+ __dockerImage=" microsoft/dotnet-buildtools-prereqs:ubuntu-16.04-cross-ef0ac75-20175511035548"
+ __skipRootFS=1
+ __dockerEnvironmentVariables+=" -e ROOTFS_DIR=/crossrootfs/arm"
+ __runtimeOS="ubuntu.16.04"
+ ;;
+ *)
+ exit_with_error "ERROR: $__linuxCodeName is not a supported linux name for $__buildArch" false
+ ;;
+ esac
+ elif [ "$__buildArch" == "armel" ]; then
+ # For armel Tizen, we are going to construct RootFS on the fly.
+ case $__linuxCodeName in
+ tizen)
+ __dockerImage=" t2wish/dotnetcore:ubuntu1404_cross_prereqs_v4"
+ __runtimeOS="tizen.4.0.0"
+ ;;
+ *)
+ echo "ERROR: $__linuxCodeName is not a supported linux name for $__buildArch"
+ exit_with_error "ERROR: $__linuxCodeName is not a supported linux name for $__buildArch" false
+ ;;
+ esac
+ else
+ exit_with_error "ERROR: unknown buildArch $__buildArch" false
+ fi
+ __dockerCmd="sudo docker run ${__dockerEnvironmentVariables} --privileged -i --rm -v $__currentWorkingDirectory:/opt/code -w /opt/code $__dockerImage"
+
+ if [ $__skipRootFS == 0 ]; then
+ # Build rootfs
+ __buildRootfsCmd="./cross/build-rootfs.sh $__buildArch $__linuxCodeName --skipunmount"
+
+ (set +x; echo "Build RootFS for $__buildArch $__linuxCodeName")
+ $__dockerCmd $__buildRootfsCmd
+ sudo chown -R $(id -u -n) cross/rootfs
+ fi
+
+ # Cross building coreclr with rootfs in Docker
+ (set +x; echo "Start cross build coreclr for $__buildArch $__linuxCodeName")
+ __buildCmd="./build.sh $__buildArch cross $__verboseFlag $__skipMscorlib $__buildConfig -rebuild clang3.9"
+ $__dockerCmd $__buildCmd
+ sudo chown -R $(id -u -n) ./bin
+}
+
#Copy the needed files to the emulator to run tests
function copy_to_emulator {
@@ -246,11 +311,6 @@ function copy_to_emulator {
__testNativeBinDirBase="$__ARMEmulCoreclr/$__testNativeBinDirBase"
sudo cp -R "./$__coreClrBinDirBase" "$__ARMRootfsCoreclrPath/$__coreClrBinDirBase"
- if [ ! -z "$__mscorlibDir" ]; then
- sudo cp "$__mscorlibDir/mscorlib.dll" "$__ARMRootfsCoreclrPath/$__coreClrBinDirBase/"
- else
- sudo cp "./$__coreClrBinDirBase/mscorlib.dll" "$__ARMRootfsCoreclrPath/$__coreClrBinDirBase/"
- fi
__coreClrBinDirBase="$__ARMEmulCoreclr/$__coreClrBinDirBase"
__mscorlibDirBase="$__coreClrBinDirBase"
@@ -297,7 +357,47 @@ function run_tests {
EOF
}
+function run_tests_using_docker {
+ __currentWorkingDirectory=`pwd`
+
+ # Configure docker
+ __dockerEnvironmentVariables=""
+ if [ "$__buildArch" == "arm" ]; then
+ case $__linuxCodeName in
+ trusty)
+ __dockerImage=" microsoft/dotnet-buildtools-prereqs:ubuntu1404_cross_prereqs_v3"
+ __skipRootFS=1
+ __dockerEnvironmentVariables=" -e ROOTFS_DIR=/crossrootfs/arm"
+ ;;
+ xenial)
+ __dockerImage=" microsoft/dotnet-buildtools-prereqs:ubuntu1604_cross_prereqs_v3"
+ __skipRootFS=1
+ __dockerEnvironmentVariables=" -e ROOTFS_DIR=/crossrootfs/arm"
+ ;;
+ *)
+ exit_with_error "ERROR: $__linuxCodeName is not a supported linux name for $__buildArch" false
+ ;;
+ esac
+ elif [ "$__buildArch" == "armel" ]; then
+ case $__linuxCodeName in
+ tizen)
+ __dockerImage=" t2wish/dotnetcore:ubuntu1404_cross_prereqs_v3"
+ ;;
+ *)
+ exit_with_error "ERROR: $__linuxCodeName is not a supported linux name for $__buildArch" false
+ ;;
+ esac
+ else
+ exit_with_error "ERROR: unknown buildArch $__buildArch" false
+ fi
+ __dockerCmd="sudo docker run ${__dockerEnvironmentVariables} --privileged -i --rm -v $__currentWorkingDirectory:/opt/code -w /opt/code $__dockerImage"
+ __testCmd="./tests/scripts/arm32_ci_test.sh --abi=${__buildArch} --buildConfig=${__buildConfig}"
+
+ $__dockerCmd $__testCmd
+}
+
#Define script variables
+__ciMode="emulator"
__ARMEmulRootfs=/mnt/arm-emulator-rootfs
__ARMEmulPath=
__ARMRootfsMountPath=
@@ -312,6 +412,8 @@ __testDirFile=
__verboseFlag=
__buildOS="Linux"
__buildArch="armel"
+__linuxCodeName="tizen"
+__skipRootFS=0
__buildDirName=
__initialGitHead=`git rev-parse --verify HEAD`
@@ -327,10 +429,13 @@ do
;;
--buildConfig=*)
__buildConfig="$(echo ${arg#*=} | awk '{print tolower($0)}')"
- if [[ "$__buildConfig" != "debug" && "$__buildConfig" != "release" ]]; then
- exit_with_error "--buildConfig can be only Debug or Release" true
+ if [[ "$__buildConfig" != "debug" && "$__buildConfig" != "release" && "$__buildConfig" != "checked" ]]; then
+ exit_with_error "--buildConfig can be Debug, Checked or Release" true
fi
;;
+ --mode=*)
+ __ciMode=${arg#*=}
+ ;;
--skipTests)
__skipTests=1
;;
@@ -355,6 +460,19 @@ do
--testDirFile=*)
__testDirFile=${arg#*=}
;;
+ --arm)
+ __buildArch="arm"
+ ;;
+ --armel)
+ __buildArch="armel"
+ __linuxCodeName="tizen"
+ ;;
+ --linuxCodeName=*)
+ __linuxCodeName=${arg#*=}
+ ;;
+ --skipRootFS)
+ __skipRootFS=1
+ ;;
-h|--help)
usage
;;
@@ -372,26 +490,24 @@ if [[ $(git status -s) != "" ]]; then
exit 1
fi
-#Check if the compulsory arguments have been presented to the script and if the input paths exist
-exit_if_empty "$__ARMEmulPath" "--emulatorPath is a mandatory argument, not provided" true
-exit_if_empty "$__ARMRootfsMountPath" "--mountPath is a mandatory argument, not provided" true
exit_if_empty "$__buildConfig" "--buildConfig is a mandatory argument, not provided" true
-exit_if_path_absent "$__ARMEmulPath/platform/rootfs-t30.ext4" "Path specified in --emulatorPath does not have the rootfs" false
+if [ "$__ciMode" == "emulator" ]; then
+ #Check if the compulsory arguments have been presented to the script and if the input paths exist
+ exit_if_empty "$__ARMEmulPath" "--emulatorPath is a mandatory argument, not provided" true
+ exit_if_empty "$__ARMRootfsMountPath" "--mountPath is a mandatory argument, not provided" true
+ exit_if_path_absent "$__ARMEmulPath/platform/rootfs-t30.ext4" "Path specified in --emulatorPath does not have the rootfs" false
+ # Test is not available in emulator mode.
+ __skipTests=1
+fi
+__coreFxBinDir="./bin/CoreFxBinDir" # TODO-clenup: Just for testing....
#Check if the optional arguments are present in the case that testing is to be done
if [ $__skipTests == 0 ]; then
exit_if_empty "$__testRootDir" "Testing requested, but --testRootDir not provided" true
exit_if_path_absent "$__testRootDir" "Path specified in --testRootDir does not exist" false
- exit_if_empty "$__coreFxNativeBinDir" "Testing requested but --coreFxNativeBinDir not provided" true
- exit_if_path_absent "$__coreFxNativeBinDir" "Path specified in --coreFxNativeBinDir does not exist" false
-
exit_if_empty "$__coreFxBinDir" "Testing requested, but --coreFxBinDir not provided" true
- while IFS=';' read -ra coreFxBinDirectories; do
- for currDir in "${coreFxBinDirectories[@]}"; do
- exit_if_path_absent "$currDir" "Path specified in --coreFxBinDir, $currDir does not exist" false
- done
- done <<< "$__coreFxBinDir"
+ exit_if_path_absent "$__coreFxBinDir" "Path specified in --coreFxBinDir does not exist" false
exit_if_empty "$__testDirFile" "Testing requested, but --testDirFile not provided" true
exit_if_path_absent "$__testDirFile" "Path specified in --testDirFile does not exist" false
@@ -408,6 +524,8 @@ fi
#Change build configuration to the capitalized form to create build product paths correctly
if [[ "$__buildConfig" == "release" ]]; then
__buildConfig="Release"
+elif [[ "$__buildConfig" == "checked" ]]; then
+ __buildConfig="Checked"
else
__buildConfig="Debug"
fi
@@ -433,37 +551,50 @@ set -e
## Begin cross build
(set +x; echo "Git HEAD @ $__initialGitHead")
-#Mount the emulator
-(set +x; echo 'Mounting emulator...')
-mount_emulator
+if [ "$__ciMode" == "docker" ]; then
+ # Complete the cross build using Docker
+ (set +x; echo 'Building coreclr...')
+ cross_build_coreclr_with_docker
+else
+ #Mount the emulator
+ (set +x; echo 'Mounting emulator...')
+ mount_emulator
-#Clean the emulator
-(set +x; echo 'Cleaning emulator...')
-clean_emulator
+ #Clean the emulator
+ (set +x; echo 'Cleaning emulator...')
+ clean_emulator
-#Complete the cross build
-(set +x; echo 'Building coreclr...')
-cross_build_coreclr
+ #Complete the cross build
+ (set +x; echo 'Building coreclr...')
+ cross_build_coreclr
+fi
#If tests are to be skipped end the script here, else continue
if [ $__skipTests == 1 ]; then
exit 0
fi
-## Tests are going to be performed in an emulated environment
-
-#Copy the needed files to the emulator before entering the emulated environment
-(set +x; echo 'Setting up emulator to run tests...')
-copy_to_emulator
-
-#Enter the emulated mode and run the tests
-(set +x; echo 'Running tests...')
-run_tests
+__unittestResult=0
+## Begin CoreCLR test
+if [ "$__ciMode" == "docker" ]; then
+ run_tests_using_docker
+ __unittestResult=$?
+else
+ ## Tests are going to be performed in an emulated environment
+ #Copy the needed files to the emulator before entering the emulated environment
+ (set +x; echo 'Setting up emulator to run tests...')
+ copy_to_emulator
-#Clean the environment
-(set +x; echo 'Cleaning environment...')
-clean_env
+ #Enter the emulated mode and run the tests
+ (set +x; echo 'Running tests...')
+ run_tests
+ __unittestResult=$?
+ #Clean the environment
+ (set +x; echo 'Cleaning environment...')
+ clean_env
+fi
(set +x; echo 'Build and test complete')
+exit $__unittestResult
diff --git a/tests/scripts/arm32_ci_test.sh b/tests/scripts/arm32_ci_test.sh
new file mode 100755
index 0000000000..33a951e324
--- /dev/null
+++ b/tests/scripts/arm32_ci_test.sh
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+set -x
+
+function usage {
+ echo 'ARM Test Script'
+ echo '$ ./tests/scripts/arm32_ci_test.sh'
+ echo ' --abi=arm'
+ echo ' --buildConfig=Release'
+ echo 'Required Arguments:'
+ echo ' --abi=<abi> : arm (default) or armel'
+ echo ' --buildConfig=<config> : Release (default) Checked, or Debug'
+}
+
+# Display error message and exit
+function exit_with_error {
+ set +x
+
+ local errorMessage="$1"
+ local printUsage=$2
+
+ echo "ERROR: $errorMessage"
+ if [ "$printUsage" == "true" ]; then
+ echo ''
+ usage
+ fi
+ exit 1
+}
+
+# Exit if the input path does not exist
+function exit_if_path_absent {
+ local path="$1"
+ local errorMessage="$2"
+ local printUsage=$3
+
+ if [ ! -f "$path" -a ! -d "$path" ]; then
+ exit_with_error "$errorMessage" $printUsage
+ fi
+}
+
+__abi="arm"
+__buildConfig="Release"
+
+# Parse command line arguments
+for arg in "$@"
+do
+ case $arg in
+ --abi=*)
+ __abi=${arg#*=}
+ if [[ "$__abi" != "arm" && "$__abi" != "armel" ]]; then
+ exit_with_error "--abi can be either arm or armel" true
+ fi
+ ;;
+ --buildConfig=*)
+ __buildConfig=${arg#*=}
+ if [[ "$__buildConfig" != "Debug" && "$__buildConfig" != "Release" && "$__buildConfig" != "Checked" ]]; then
+ exit_with_error "--buildConfig can be Debug, Checked or Release" true
+ fi
+ ;;
+ -v|--verbose)
+ __verboseFlag="verbose"
+ ;;
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ *)
+ exit_with_error "$arg not a recognized argument" true
+ ;;
+ esac
+done
+__buildDirName="Linux.${__abi}.${__buildConfig}"
+
+CORECLR_DIR=/opt/code
+ARM_CHROOT_HOME_DIR=/home/coreclr
+
+if [ -z "${ROOTFS_DIR}" ]; then
+ __ROOTFS_DIR=${CORECLR_DIR}/cross/rootfs/${__abi}
+else
+ __ROOTFS_DIR=${ROOTFS_DIR}
+fi
+
+if [ "$__abi" == "armel" ]; then
+ # TODO: Make use of a single Tizen rootfs for build and test
+
+ # TODO-cleanup: the latest docker image already has mic installed.
+ # Prepare Tizen (armel) environment
+ #echo "deb http://download.tizen.org/tools/latest-release/Ubuntu_14.04 /" >> /etc/apt/sources.list
+ #apt-get update
+ #apt-get -y -qq --force-yes install mic
+
+ pushd ${CORECLR_DIR}/cross/armel/tizen
+ mic --non-interactive create fs --pack-to=tizen.tar.gz tizen-dotnet.ks
+ if [ -d ${__ROOTFS_DIR} ]; then
+ mv ${__ROOTFS_DIR} ${__ROOTFS_DIR}_build
+ fi
+ mkdir -p ${__ROOTFS_DIR}
+ tar -zxf mic-output/tizen.tar.gz -C ${__ROOTFS_DIR}
+ apt-get update
+ apt-get -y -qq --force-yes install --reinstall qemu binfmt-support qemu-user-static
+ __qemuARM=$(which qemu-arm-static)
+ cp $__qemuARM ${CORECLR_DIR}/cross/rootfs/armel/usr/bin/
+ cp $__qemuARM ${__ROOTFS_DIR}/usr/bin/
+ popd
+fi
+
+# Mount
+mkdir -p ${__ROOTFS_DIR}${ARM_CHROOT_HOME_DIR}
+mount -t proc /proc ${__ROOTFS_DIR}/proc
+mount -o bind /dev ${__ROOTFS_DIR}/dev
+mount -o bind /dev/pts ${__ROOTFS_DIR}/dev/pts
+mount -o bind /sys ${__ROOTFS_DIR}/sys
+mount -o bind ${CORECLR_DIR} ${__ROOTFS_DIR}${ARM_CHROOT_HOME_DIR}
+
+chroot ${__ROOTFS_DIR} /bin/bash -x <<EOF
+ cd ${ARM_CHROOT_HOME_DIR}
+ ./tests/runtest.sh --sequential\
+ --coreClrBinDir=${ARM_CHROOT_HOME_DIR}/bin/Product/${__buildDirName} \
+ --mscorlibDir=${ARM_CHROOT_HOME_DIR}/bin/Product/${__buildDirName} \
+ --testNativeBinDir=${ARM_CHROOT_HOME_DIR}/bin/obj/${__buildDirName}/tests \
+ --coreFxBinDir=${ARM_CHROOT_HOME_DIR}/bin/CoreFxBinDir \
+ --testRootDir=${ARM_CHROOT_HOME_DIR}/bin/tests/Windows_NT.x64.${__buildConfig} \
+ --testDirFile=${ARM_CHROOT_HOME_DIR}/tests/testsRunningInsideARM.txt
+EOF
diff --git a/tests/scripts/arm64_post_build.py b/tests/scripts/arm64_post_build.py
index 49f5f6a452..4ed8032fc2 100644
--- a/tests/scripts/arm64_post_build.py
+++ b/tests/scripts/arm64_post_build.py
@@ -34,7 +34,7 @@ from collections import defaultdict
g_arm64ci_path = os.path.join(os.environ["USERPROFILE"], "bin")
g_dotnet_url = "https://go.microsoft.com/fwlink/?LinkID=831469"
-g_test_url = "https://clrjit.blob.core.windows.net/arm64ci/CoreCLRTests-2c0a2c05ba82460a8d8a4b1e2d98e908e59d5d54.zip"
+g_test_url = "https://clrjit.blob.core.windows.net/arm64ci/CoreCLR-Pri1Testing.zip"
g_x64_client_url = "https://clrjit.blob.core.windows.net/arm64ci/x64_client.zip"
################################################################################
diff --git a/tests/scripts/build_illink.cmd b/tests/scripts/build_illink.cmd
new file mode 100644
index 0000000000..726bd44c10
--- /dev/null
+++ b/tests/scripts/build_illink.cmd
@@ -0,0 +1,44 @@
+@if not defined _echo @echo off
+setlocal
+set rid=win10-x64
+
+:Arg_Loop
+if "%1" == "" goto ArgsDone
+if /i "%1" == "-?" goto Usage
+if /i "%1" == "-h" goto Usage
+if /i "%1" == "-help" goto Usage
+if /i "%1" == "clone" (set doClone=1&shift&goto Arg_Loop)
+if /i "%1" == "x64" (set rid=win10-x64&shift&goto Arg_Loop)
+if /i "%1" == "x86" (set rid=win10-x86&shift&goto Arg_Loop)
+
+goto Usage
+
+:ArgsDone
+
+if defined doCLone (
+ git clone --recursive https://github.com/mono/linker
+)
+
+pushd linker\corebuild
+call restore.cmd -r %rid%
+cd ..\linker
+..\corebuild\Tools\dotnetcli\dotnet.exe publish -r %rid% -c netcore_Release
+popd
+
+echo Built %cd%\linker\linker\bin\netcore_Release\netcoreapp2.0\%rid%\publish\illink.exe
+
+:Done
+exit /b 0
+
+:Usage
+echo.
+echo.Build ILLINKer for CoreCLR testing
+echo.
+echo.Usage:
+echo build_illink.cmd [clone] [setenv] [arch]
+echo.Where:
+echo.-? -h -help: view this message.
+echo.clone : Clone the repository https://github.com/mono/linker
+echo.arch : The architecture to build: x64 (default) or x86
+echo.
+goto Done
diff --git a/tests/scripts/build_illink.sh b/tests/scripts/build_illink.sh
new file mode 100755
index 0000000000..73bbd1a9fd
--- /dev/null
+++ b/tests/scripts/build_illink.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+
+function print_usage {
+ echo ''
+ echo 'Build ILLINKer for CoreCLR testing'
+ echo ''
+ echo 'Optional arguments:'
+ echo ' -?|-h|--help : Show usage information.'
+ echo ' --clone : Clone the repository https://github.com/mono/linker'
+ echo ' --arch : The architecture to build (default X64)'
+ echo ' --os : The os/runtime to build x64 (ubuntu.16.04)'
+ echo ''
+}
+
+# Argument variables
+clone=
+setenv=
+os='ubuntu'
+arch='x64'
+
+for i in "$@"
+do
+ case $i in
+ -?|-h|--help)
+ print_usage
+ exit $EXIT_CODE_SUCCESS
+ ;;
+
+ --clone)
+ clone=1
+ ;;
+
+ --arch=*)
+ arch=${i#*=}
+ ;;
+
+ --os=*)
+ os=${i#*=}
+ ;;
+
+ *)
+ echo "Unknown switch: $i"
+ print_usage
+ exit $EXIT_CODE_SUCCESS
+ ;;
+ esac
+done
+
+rid="$os-$arch"
+
+if [ ! -z "$clone" ]; then
+ git clone --recursive https://github.com/mono/linker
+fi
+
+pushd linker/corebuild
+./restore.sh -r $rid
+cd ../linker
+../corebuild/Tools/dotnetcli/dotnet publish -r $rid -c netcore_Release
+popd
+
+dir=$(pwd)
+output="$dir/linker/linker/bin/netcore_Release/netcoreapp2.0/$rid/publish/illink"
+echo Built $output
+
+exit $EXIT_CODE_SUCCESS
diff --git a/tests/scripts/format.py b/tests/scripts/format.py
index 9736c033b2..4c84cb532b 100644
--- a/tests/scripts/format.py
+++ b/tests/scripts/format.py
@@ -84,13 +84,13 @@ def main(argv):
print("Downloading .Net CLI")
if platform == 'Linux':
- dotnetcliUrl = "https://go.microsoft.com/fwlink/?LinkID=809129"
+ dotnetcliUrl = "https://go.microsoft.com/fwlink/?linkid=839628"
dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.tar.gz')
elif platform == 'OSX':
- dotnetcliUrl = "https://go.microsoft.com/fwlink/?LinkID=809128"
+ dotnetcliUrl = "https://go.microsoft.com/fwlink/?linkid=839641"
dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.tar.gz')
elif platform == 'Windows_NT':
- dotnetcliUrl = "https://go.microsoft.com/fwlink/?LinkID=809126"
+ dotnetcliUrl = "https://go.microsoft.com/fwlink/?linkid=839634"
dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.zip')
else:
print('Unknown os ', os)
@@ -160,7 +160,7 @@ def main(argv):
# Run bootstrap
- my_env["PATH"] += os.pathsep + dotnetcliPath
+ my_env["PATH"] = dotnetcliPath + os.pathsep + my_env["PATH"]
if platform == 'Linux' or platform == 'OSX':
print("Running bootstrap")
proc = subprocess.Popen(['bash', bootstrapPath], env=my_env)
@@ -173,7 +173,7 @@ def main(argv):
returncode = 0
jitutilsBin = os.path.join(coreclr, "jitutils", "bin")
- my_env["PATH"] += os.pathsep + jitutilsBin
+ my_env["PATH"] = jitutilsBin + os.pathsep + my_env["PATH"]
current_dir = os.getcwd()
if not os.path.isdir(jitutilsBin):
diff --git a/tests/scripts/lst_creator.py b/tests/scripts/lst_creator.py
index 7f477ff2bc..deebd2fa04 100644..100755
--- a/tests/scripts/lst_creator.py
+++ b/tests/scripts/lst_creator.py
@@ -7,227 +7,398 @@
##
# Title :lst_creator.py
#
-# Script to create a working list file from the test overlay directory. This
+# Script to create a working list file from the test overlay directory. This
# will be used by smarty to run tests.
#
################################################################################
+import argparse
+import datetime
import os
-import os.path
+import re
import sys
+from collections import defaultdict
+
################################################################################
-# Globals
+# Argument Parser
################################################################################
-g_debug = False
-g_name = ""
-g_old_list_file = ""
+DESCRIPTION = """Python script to help create/update the arm64 lstFile
+ """
+
+PARSER = argparse.ArgumentParser(description=DESCRIPTION)
-def print_debug(str):
- if g_debug is True:
- print str
+PARSER.add_argument("--test", dest="testing", action="store_true", default=False)
+PARSER.add_argument("-lst_file", dest="old_list_file", nargs='?', default=None)
+PARSER.add_argument("-test_dir", dest="test_dir", nargs='?', default=None)
+PARSER.add_argument("-commit_hash", dest="commit_hash", nargs='?', default=None)
+PARSER.add_argument("-failures_csv", dest="failures_csv", nargs='?', default=None)
+PARSER.add_argument("--unset_new", dest="unset_new", action="store_true", default=False)
+ARGS = PARSER.parse_args(sys.argv[1:])
################################################################################
-# Requires the OSS test overlay directory to be built and passed.
-#
-# find_tests
+# Helper Functions
################################################################################
-def find_tests(base_dir, cat = None, dir = None):
- # Begin walking this folder recursively.
- # Look for any file that ends with .cmd
- # we will be returning a list of these files.
+def create_list_file(file_name, metadata):
+ """ Create a lstFile given a set of metadata input
- subdir_list = []
- cmd_list = []
+ Args:
+ file_name (str): Location to write the lstFile
+ metadata ({ str: { str: str } }): Dictionary mapping test name to
+ : a dictionary of key/value
+ : attributes.
- def traverse_dir(dir, cat, cat_dir, add_cat = False):
- if os.path.basename(dir) == cat_dir:
- add_cat = True
+ """
- dir = os.path.abspath(dir)
+ current_time = datetime.datetime.now()
+ current_time_str = current_time.strftime("%d-%b-%Y %H:%M:%S%z")
- for filename in os.listdir(dir):
- print_debug(filename)
+ metadata = [metadata[item] for item in metadata]
+ metadata = sorted(metadata, key=lambda item: item[1])
- filename = os.path.join(dir, filename)
- print_debug("Full Path: " + filename)
+ new_metadata = [item for item in metadata if item[1] == -1]
+ old_metadata = [item for item in metadata if item[1] != -1]
- if os.path.isfile(filename) and filename.split(".")[-1] == "cmd":
- if add_cat is True:
- cmd_list.append((filename, cat))
- else:
- cmd_list.append((filename, g_name))
+ with open(file_name, "w") as file_handle:
+ file_handle.write("## This list file has been produced automatically. Any changes" + os.pathsep)
+ file_handle.write("## are subject to being overwritten when reproducing this file." + os.pathsep))
+ file_handle.write("## " + os.pathsep))
+ file_handle.write("## Last Updated: %s%s" % (current_time_str, os.pathsep))
+ file_handle.write("## Commit: %s%s" % (ARGS.commit_hash, os.pathsep))
+ file_handle.write("## " + os.pathsep))
- elif os.path.isdir(filename):
- traverse_dir(filename, cat, cat_dir, add_cat)
+ order = ["RelativePath", "WorkingDir", "Expected",
+ "MaxAllowedDurationSeconds", "Categories", "HostStyle"]
- traverse_dir(base_dir, cat, dir)
+ def write_metadata(data, count=None):
+ for item in data:
+ test_name = item[0]["RelativePath"]
+ if item[1] != -1:
+ count = item[1]
- return cmd_list
+ item = item[0]
-################################################################################
-# Main
-################################################################################
+ # Get the test name.
+ title = "[%s_%d]" % (test_name.split("\\")[-1], count)
+ count += 1
-if __name__ == "__main__":
- print "Starting lst_creator"
- print "- - - - - - - - - - - - - - - - - - - - - - - - - - - -"
- print
+ file_handle.write("%s\n" % title)
- if len(sys.argv) < 4:
- print "Error, incorrect number of arguments."
- print "Ex usage: python lst_creator <root_oss_test_dir> <lst file name> <optional cat name> <old list file location>"
- print "Tests must be built!"
- exit(1)
+ attribute_str = ""
+ for key in order:
+ attribute_str += "%s=%s%s" % (key, item[key], os.pathsep))
- if not os.path.isdir(sys.argv[1]):
- print "Error argument passed is not a valid directory."
- exit(1)
+ file_handle.write(attribute_str + os.pathsep))
- g_name = sys.argv[3]
+ write_metadata(old_metadata)
+ write_metadata(new_metadata, old_metadata[-1][1] + 1)
- cat, dirname = None, None
-
- if len(sys.argv) > 4:
- if sys.argv[4] == "-D":
- cat = sys.argv[5]
- dirname = sys.argv[6]
+def create_metadata(tests):
+ """ Given a set of tests create the metadata around them
- elif sys.argv[4] != "-D":
- g_old_list_file = sys.argv[4]
+ Args:
+ tests ([str]): List of tests for which to determine metadata
- if not os.path.isfile(g_old_list_file):
+ Returns:
+ test_metadata ({ str: { str: str } }): Dictionary mapping test name to
+ : a dictionary of key/value
+ : attributes.
- print "Error, old list file must be valid."
- exit(1)
+ """
+ test_metadata = defaultdict(lambda: None)
- cmd_list = find_tests(sys.argv[1], cat, dirname)
+ failures_csv = ARGS.failures_csv
- print "Found " + str(len(cmd_list)) + " tests to add."
- print
+ failure_information = defaultdict(lambda: None)
+
+ if failures_csv is not None:
+ lines = []
+ assert(os.path.isfile(failures_csv))
+
+ with open(failures_csv, "r") as file_handle:
+ lines = file_handle.readlines()
+
+ try:
+ for line in lines:
+ split = line.split(",")
+ relative_path = split[0].replace("/", "\\")
+ category = split[1]
+
+ failure_information[relative_path] = category.strip()
+ except:
+ raise Exception("Error. CSV format expects: relativepath,category")
+
+ for test in tests:
+ working_directory = os.path.dirname(test).replace("/", "\\")
+
+ # Make sure the tests use the windows \ seperator.
+ relative_path = test.replace("/", "\\")
+ max_duration = "600"
+ categories = "EXPECTED_PASS"
+ expected = "0"
+ host_style = "0"
+
+ metadata = defaultdict(lambda: None)
+ metadata["RelativePath"] = relative_path
+ metadata["WorkingDir"] = working_directory
+ metadata["MaxAllowedDurationSeconds"] = max_duration
+ metadata["HostStyle"] = host_style
+ metadata["Expected"] = expected
+ metadata["Categories"] = categories
- if g_old_list_file is not "":
- print "Updating the old list file"
+ if failure_information[relative_path] is not None:
+ metadata["Categories"] = failure_information[relative_path]
- else:
- print "Creating the lst file."
+ test_metadata[relative_path] = metadata
- unique_output = dict()
- largest_value = 0
+ return test_metadata
- # If there was an old list file passed. Parse it for all the old tests.
+def get_all_tests(base_dir):
+ """ Find all of the tests in the enlistment
- if g_old_list_file is not "":
- old_list_file_lines = []
+ Args:
+ base_dir (str): Directory to start traversing from
- with open(sys.argv[4]) as lst_file_handle:
+ Returns:
+ test_list ([str]): List of the tests. Note this is defined to be every
+ : cmd file under the base_dir.
- old_list_file_lines = lst_file_handle.readlines()
+ Note:
+ To find the tests correctly you must build the tests correctly and
+ pass that directory. This method will NOT check to make sure that
+ this has been done correctly.
- for line in old_list_file_lines:
- split_line = line.split("[")
+ This is a recursive method.
- # We only need the test names
- # which come in as [ testname_number ]
- if len(split_line) == 1:
- continue
+ """
- # This is a test name, start splitting
+ def get_all_tests_helper(working_dir):
+ """ Helper function to recursively get all tests.
+ """
- split_line = split_line[1].split("]")
- split_line = split_line[0].split("_")
+ assert os.path.isdir(working_dir)
- if largest_value < int(split_line[-1]):
- largest_value = int(split_line[-1])
+ items = os.listdir(working_dir)
+ items = [os.path.join(working_dir, item) for item in items]
+ dirs = [item for item in items if os.path.isdir(item)]
+ tests = [item for item in items if ".cmd" in item]
- test_name = "_".join(split_line[:-1])
- if len(test_name.split("exe")) == 1:
- # Error, name is not an exe.
- print "Error"
+ for item in dirs:
+ tests += get_all_tests_helper(item)
- sys.exit(1)
+ return tests
- unique_output[test_name] = True
+ # Recursively get all of the tests in the directory.
+ tests = get_all_tests_helper(base_dir)
- print str(len(unique_output)) + " tests found in the old lstFile."
+ # Find the correct base directory for the tests.
+ common_prefix = os.path.commonprefix(tests)
- output = []
+ if common_prefix is not None:
+ tests = [test.replace(common_prefix, "") for test in tests]
- repeat_count = 0
- count = largest_value
+ return tests
- for line in cmd_list:
- path, cat = line[0], line[1]
+def log(message):
+ """ Log a debug message. This is to be used when the --test option is passed
+ """
- # get the relative path
- prefix = os.path.commonprefix([path, sys.argv[1]])
- rel_path = os.path.relpath(path, prefix)
+ if ARGS.testing is True:
+ print message
- cmd_contents = None
- with open(path) as cmd_file_handle:
- cmd_contents = cmd_file_handle.readlines()
+def parse_lst_file(lst_file):
+ """Parse a lstFile given.
- expected_exit_code_line = None
+ Args:
+ lst_file(str): location of the lstFile
- for cmd_line in cmd_contents:
- if cmd_line.find("CLRTestExpectedExitCode") != -1:
- expected_exit_code_line = cmd_line
- break
+ Returns:
+ test_metadata (defaultdict(lambda: None)): Key is test name.
- if expected_exit_code_line is None:
- print "Error, cmd file missing contents. Skipping, however, the test suite was not built correctly."
- print path
+ """
+
+ assert os.path.isfile(lst_file)
+
+ contents = None
+ with open(lst_file) as file_handle:
+ contents = file_handle.read()
+
+ split = re.split("\[(.*?)\]", contents)
+
+ unique_name = None
+ test_metadata = defaultdict(lambda: None)
+ for item in split:
+ if len(item) == 0 or item[0] == "#":
continue
- expected = expected_exit_code_line[expected_exit_code_line.find("CLRTestExpectedExitCode") + (len("CLRTestExpectedExitCode") + 1):].strip()
- max_allowed_duration = 600
- categories = cat
- build_type = "CoreSys"
- relative_path = rel_path[:rel_path.find("cmd")] + "exe"
- working_dir = os.path.dirname(rel_path)
- test_name = os.path.basename(relative_path)
+ if unique_name is None:
+ unique_name = item
+ else:
+ index = int(unique_name.split("_")[-1])
+ metadata = defaultdict(lambda: None)
- try:
- if unique_output[test_name] == True:
- repeat_count += 1
+ attributes = item.split(os.linesep)
+ for attribute in attributes:
+ # Skip the removed new lines.
+ if len(attribute) == 0:
+ continue
- continue
+ pair = attribute.split("=")
+ key = pair[0].strip()
+ value = pair[1].strip()
- except:
- output.append("[" + test_name + "_" + str(count) + "]" + "\n")
+ metadata[key] = value
- count = count + 1
+ # Relative path is unique, while the test name alone is not.
+ unique_name = metadata["RelativePath"]
+ test_metadata[unique_name] = (metadata, index)
+ unique_name = None
- output.append("RelativePath=" + os.path.relpath(relative_path) + "\n")
- output.append("WorkingDir=" + os.path.relpath(working_dir) + "\n")
- output.append("Expected=" + expected + "\n")
- output.append("MaxAllowedDurationSeconds=" + str(max_allowed_duration) + "\n")
- output.append("Categories=" + categories + "\n")
- output.append("HostStyle=Any")
- output.append("\n")
+ return test_metadata
- print
- print "Writing out lst file."
+################################################################################
+# Main
+################################################################################
+
+def main(args):
+ """ Main method
+ Args:
+ args ([str]): the arugments passed to the program.
+
+ """
- if repeat_count > 0:
- print "Found " + str(repeat_count) + " old tests."
-
- # If we found repeats then we open file to append not write.
+ # Assign all of the passed variables.
+ test_dir = args.test_dir
+ old_list_file = args.old_list_file
+ commit_hash = args.commit_hash
+ unset_new = args.unset_new
- with open(g_old_list_file, 'a') as list_file_handle:
- list_file_handle.write("\n")
+ if commit_hash is None:
+ print "Error please provide a commit hash."
+ sys.exit(1)
- for line in output:
- list_file_handle.write(line)
+ if test_dir is None or not os.path.isdir(test_dir):
+ print "Error the test directory passed is not a valid directory."
+ sys.exit(1)
+ tests = get_all_tests(test_dir)
+ print "Found %d tests in the test directory." % (len(tests))
+ print
- else:
- with open(sys.argv[2], 'w') as list_file_handle:
+ old_test_metadata = None
+ # If we are updating an old lstFile. Get all of the tests from that
+ # lstFile and their metadata.
+ if old_list_file is not None:
+ old_test_metadata = parse_lst_file(old_list_file)
+
+ print "Found %d tests in the old lstFile." % (len(old_test_metadata))
+
+ test_metadata = create_metadata(tests)
+
+ if old_test_metadata is not None:
+ # If the new information has been changed, we will need to update
+ # the lstFile.
+
+ new_test_count = 0
+ update_count = 0
+ for test_name in test_metadata:
+ new_metadata = test_metadata[test_name]
+ old_metadata = old_test_metadata[test_name]
+
+ attributes = None
+ if old_test_metadata[test_name] is None:
+ new_test_count += 1
+ new_metadata["Categories"] += ";NEW"
+ old_test_metadata[test_name] = (new_metadata, -1)
+
+ else:
+ index = old_metadata[1]
+ old_metadata = old_metadata[0]
+ attributes = set(old_metadata.keys() + new_metadata.keys())
+
+ # Make sure we go through all attributes of both sets.
+ # If an attribute exists in one set but not the other it will
+ # be None. If the new metadata has a new attribute, write this
+ # into the old metadata. If the old metadata has an attribute
+ # that does not exist in the new set. Do not remove it.
+
+ overwritten = False
+
+ for attribute in attributes:
+ if attribute == "MaxAllowedDurationSeconds":
+ continue
+ if attribute == "Categories":
+ new_split = new_metadata["Categories"].split(";")
+ old_split = old_metadata["Categories"].split(";")
+
+ if unset_new:
+ if "NEW" in old_split:
+ old_split.remove("NEW")
+
+ # If an old test is marked as a failing test. Make
+ # sure that we carry that information along.
+ if "EXPECTED_PASS" in new_split and "EXPECTED_FAIL" in old_split:
+ new_split.remove("EXPECTED_PASS")
+
+ # If it used to be marked as pass but it is now failing. Make sure
+ # we remove the old category.
+ elif "EXPECTED_FAIL" in new_split and "EXPECTED_PASS" in old_split:
+ old_split.remove("EXPECTED_PASS")
+
+ joined_categories = set(old_split + new_split)
+
+ overwritten = True
+ ordered_categories = []
+ for item in old_split:
+ if item in joined_categories:
+ ordered_categories.append(item)
+ joined_categories.remove(item)
+
+ old_metadata[attribute] = ";".join(ordered_categories)
+ old_metadata[attribute] = old_metadata[attribute] + ";" + ";".join(joined_categories) if len(joined_categories) > 0 else old_metadata[attribute]
+ old_test_metadata[test_name] = (old_metadata, index)
+
+ elif new_metadata[attribute] != old_metadata[attribute]:
+ # If the old information is not the same as the new
+ # information, keep the new information. overwrite the old
+ # metadata.
+ if new_metadata[attribute] is not None:
+ overwritten = True
+ old_metadata[attribute] = new_metadata[attribute]
+
+ old_test_metadata[test_name] = (old_metadata, index)
+
+ if overwritten:
+ update_count += 1
+
+ tests_removed = 0
+ tests_to_remove = []
+ for old_test_name in old_test_metadata:
+ # Remove all old unreferenced tests
+ if old_test_name not in test_metadata:
+ tests_to_remove.append(old_test_name)
+ tests_removed += 1
+
+ for test_name in tests_to_remove:
+ old_test_metadata.pop(test_name)
+
+ print "Added %d tests." % new_test_count
+ print "Removed %d tests." % tests_removed
+ print "Finished join. %d tests updated." % update_count
+
+ test_metadata = old_test_metadata
+
+ # Overwrite the old file if provided, else use the generic name Tests.lst
+ lst_file = "Tests.lst" if old_list_file is None else old_list_file
+
+ # Write out the new lstFile
+ create_list_file(lst_file, test_metadata)
- list_file_handle.write("##=== Test Definitions ===============================\n")
+################################################################################
+################################################################################
- for line in output:
- list_file_handle.write(line)
+if __name__ == "__main__":
+ main(ARGS)
diff --git a/tests/scripts/optdata/bootstrap.py b/tests/scripts/optdata/bootstrap.py
new file mode 100755
index 0000000000..1cf55fa70c
--- /dev/null
+++ b/tests/scripts/optdata/bootstrap.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+"""
+ This script prepares the local source tree to be built with
+ custom optdata. Simply run this script and follow the
+ instructions to inject manually created optdata into the build.
+"""
+
+import argparse
+import json
+import os
+from os import path
+import shutil
+import subprocess
+import sys
+
+# Display the docstring if the user passes -h|--help
+argparse.ArgumentParser(description=__doc__).parse_args()
+
+SCRIPT_ROOT = path.dirname(path.realpath(__file__))
+REPO_ROOT = path.realpath(path.join(SCRIPT_ROOT, '..', '..', '..'))
+
+NUGET_SRC_DIR = path.join(REPO_ROOT, 'src', '.nuget')
+assert path.exists(NUGET_SRC_DIR), \
+ "Expected %s to exist; please check whether REPO_ROOT is really %s" % (NUGET_SRC_DIR, REPO_ROOT)
+
+ORIGIN_FILE = path.join(SCRIPT_ROOT, 'project.json')
+TARGET_FILE = path.join(NUGET_SRC_DIR, 'optdata', 'project.json')
+
+ARCH_LIST = ['x64', 'x86']
+TOOL_LIST = ['IBC', 'PGO']
+
+def get_buildos():
+ """Returns the Build_OS component used by the build system."""
+ if os.name == 'nt':
+ return 'Windows_NT'
+ else:
+ sysname = os.uname()[0]
+ return 'OSX' if sysname.lower() == 'Darwin'.lower() else sysname
+
+def get_optdata_version(tool):
+ """Returns the version string specified in project.json for the given tool."""
+ package_name = 'optimization.%s.CoreCLR' % (tool)
+ with open(ORIGIN_FILE) as json_file:
+ return json.load(json_file)['dependencies'][package_name]
+
+def get_optdata_dir(tool, arch):
+ """Returns an absolute path to the directory that should contain optdata given a tool,arch"""
+ package_name = 'optimization.%s-%s.%s.CoreCLR' % (get_buildos(), arch, tool)
+ package_version = get_optdata_version(tool)
+ return path.join(REPO_ROOT, 'packages', package_name, package_version, 'data')
+
+def check_for_unstaged_changes(file_path):
+ """Returns whether a file in git has untracked changes."""
+ if not path.exists(file_path):
+ return False
+ try:
+ subprocess.check_call(['git', 'diff', '--quiet', '--', file_path])
+ return False
+ except subprocess.CalledProcessError:
+ return True
+
+def main():
+ """Entry point"""
+ if check_for_unstaged_changes(TARGET_FILE):
+ print("ERROR: You seem to have unstaged changes to %s that would be overwritten."
+ % (TARGET_FILE))
+ print("Please clean, commit, or stash them before running this script.")
+ return 1
+
+ if not path.exists(path.dirname(TARGET_FILE)):
+ os.makedirs(path.dirname(TARGET_FILE))
+ shutil.copyfile(ORIGIN_FILE, TARGET_FILE)
+
+ print("Bootstrapping optdata is complete.")
+ for tool in TOOL_LIST:
+ for arch in ARCH_LIST:
+ optdata_dir = get_optdata_dir(tool, arch)
+ print(" * Copy %s %s files into: %s" % (arch, tool, optdata_dir))
+ print("NOTE: Make sure to add 'skiprestoreoptdata' as a switch on the build command line!")
+
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tests/scripts/optdata/project.json b/tests/scripts/optdata/project.json
new file mode 100644
index 0000000000..ae8f9463c4
--- /dev/null
+++ b/tests/scripts/optdata/project.json
@@ -0,0 +1,12 @@
+{
+ "dependencies": {
+ "optimization.IBC.CoreCLR": "99.99.99-test",
+ "optimization.PGO.CoreCLR": "99.99.99-test"
+ },
+ "frameworks": {
+ "netstandard": {}
+ },
+ "runtimes": {
+ "win7-x64": {}
+ }
+}
diff --git a/tests/scripts/perf-prep.sh b/tests/scripts/perf-prep.sh
index effdc0a615..4468dbb6f7 100755
--- a/tests/scripts/perf-prep.sh
+++ b/tests/scripts/perf-prep.sh
@@ -11,6 +11,8 @@ function print_usage {
echo ''
echo 'Required arguments:'
echo ' --branch=<path> : branch where coreclr/corefx/test bits are copied from (e.g. dotnet_coreclr).'
+ echo 'Optional arguments:'
+ echo ' --throughput : if we are running setup for a throughput run.'
}
# Exit code constants
@@ -20,6 +22,7 @@ readonly EXIT_CODE_SUCCESS=0 # Script ran normally.
perfArch="x64"
perfConfig="Release"
perfBranch=
+throughput=0
for i in "$@"
do
@@ -31,6 +34,9 @@ do
--branch=*)
perfBranch=${i#*=}
;;
+ -t|--throughput)
+ throughput=1
+ ;;
*)
echo "Unknown switch: $i"
print_usage
@@ -57,22 +63,38 @@ unzip -q -o benchview.zip -d ./tests/scripts/Microsoft.BenchView.JSONFormat
python3.5 --version
python3.5 ./tests/scripts/Microsoft.BenchView.JSONFormat/tools/machinedata.py
-# Set up the copies
-# Coreclr build containing the tests and mscorlib
-curl https://ci.dot.net/job/$perfBranch/job/master/job/release_windows_nt/lastSuccessfulBuild/artifact/bin/tests/tests.zip -o tests.zip
+if [ $throughput -eq 1 ]; then
+ # Clone corefx
+ if [ -d "_" ]; then
+ rm -r -f _
+ fi
+ mkdir _
+ git clone https://github.com/dotnet/corefx.git _/fx
+ cd _/fx
+
+ # Checkout the specific commit we want
+ git checkout f0b9e238c08f62a1db90ef0378980ac771204d35
+
+ # Build
+ ./build.sh -release
+else
+ # Set up the copies
+ # Coreclr build containing the tests and mscorlib
+ curl https://ci.dot.net/job/$perfBranch/job/master/job/release_windows_nt/lastSuccessfulBuild/artifact/bin/tests/tests.zip -o tests.zip
-# Corefx components. We now have full stack builds on all distros we test here, so we can copy straight from CoreFX jobs.
-mkdir corefx
-curl https://ci.dot.net/job/dotnet_corefx/job/master/job/ubuntu14.04_release/lastSuccessfulBuild/artifact/bin/build.tar.gz -o ./corefx/build.tar.gz
+ # Corefx components. We now have full stack builds on all distros we test here, so we can copy straight from CoreFX jobs.
+ mkdir corefx
+ curl https://ci.dot.net/job/dotnet_corefx/job/master/job/ubuntu14.04_release/lastSuccessfulBuild/artifact/bin/build.tar.gz -o ./corefx/build.tar.gz
-# Unpack the corefx binaries
-pushd corefx > /dev/null
-tar -xf build.tar.gz
-rm build.tar.gz
-popd > /dev/null
+ # Unpack the corefx binaries
+ pushd corefx > /dev/null
+ tar -xf build.tar.gz
+ rm build.tar.gz
+ popd > /dev/null
-# Unzip the tests first. Exit with 0
-mkdir bin
-mkdir bin/tests
-unzip -q -o tests.zip -d ./bin/tests/Windows_NT.$perfArch.$perfConfig || exit 0
-echo "unzip tests to ./bin/tests/Windows_NT.$perfArch.$perfConfig"
+ # Unzip the tests first. Exit with 0
+ mkdir bin
+ mkdir bin/tests
+ unzip -q -o tests.zip -d ./bin/tests/Windows_NT.$perfArch.$perfConfig || exit 0
+ echo "unzip tests to ./bin/tests/Windows_NT.$perfArch.$perfConfig"
+fi
diff --git a/tests/scripts/run-corefx-tests.py b/tests/scripts/run-corefx-tests.py
index f0111da19c..2340a64d00 100644
--- a/tests/scripts/run-corefx-tests.py
+++ b/tests/scripts/run-corefx-tests.py
@@ -167,19 +167,6 @@ def nth_dirname(path, n):
return path
-
-def dotnet_rid_os(dotnet_path):
- """ Determine the OS identifier from the RID as reported by dotnet
- Args:
- dotnet_path (str): path to folder containing dotnet(.exe)
- Returns:
- rid_os (str): OS component of RID as reported by dotnet
- """
- dotnet_info = subprocess.check_output([os.path.join(dotnet_path, 'dotnet'), '--info'])
- m = re.search('^\s*RID:\s+([^-]*)-(\S*)\s*$', dotnet_info, re.MULTILINE)
- return m.group(1)
-
-
def log(message):
""" Print logging information
Args:
@@ -263,37 +250,21 @@ def main(args):
# Determine the RID to specify the to corefix build scripts. This seems to
# be way harder than it ought to be.
- if testing:
- rid_os = dotnet_rid_os('')
- else:
- if Is_windows:
- rid_os = "win7"
- else:
- rid_os = dotnet_rid_os(os.path.join(clr_root, 'Tools', 'dotnetcli'))
-
# Gather up some arguments to pass to both build and build-tests.
- config_args = '-Release -RuntimeOS=%s -ArchGroup=%s' % (rid_os, arch)
+ config_args = '-Release -os:%s -buildArch:%s' % (clr_os, arch)
# Run the primary (non-test) corefx build
- command = ' '.join(('build.cmd' if Is_windows else './build.sh', config_args))
+ command = ' '.join(('build.cmd' if Is_windows else './build.sh',
+ config_args,
+ '-- /p:CoreCLROverridePath=%s' % core_root))
+
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
sys.exit(returncode)
- # Copy the coreclr runtime we wish to run tests against. This is the recommended
- # hack until a full-stack test solution is ready. This assumes there is a single
- # directory under <fx_root>/bin/runtime into which we copy coreclr binaries. We
- # assume the appropriate coreclr has already been built.
-
- fx_runtime_dir = os.path.join(fx_root, 'bin', 'runtime')
- overlay_dest = os.path.join(fx_runtime_dir, os.listdir(fx_runtime_dir)[0])
- log('[overlay] %s -> %s' % (core_root, overlay_dest))
- if not testing:
- distutils.dir_util.copy_tree(core_root, overlay_dest)
-
# Build the build-tests command line.
if Is_windows:
diff --git a/tests/scripts/run-throughput-perf.py b/tests/scripts/run-throughput-perf.py
new file mode 100644
index 0000000000..ee6e4a3c58
--- /dev/null
+++ b/tests/scripts/run-throughput-perf.py
@@ -0,0 +1,400 @@
+#!/usr/bin/env python
+#
+# Licensed to the .NET Foundation under one or more agreements.
+# The .NET Foundation licenses this file to you under the MIT license.
+# See the LICENSE file in the project root for more information.
+#
+##########################################################################
+##########################################################################
+#
+# Module: run-throughput-tests.py
+#
+# Notes: runs throughput testing for coreclr and uploads the timing results
+# to benchview
+#
+#
+##########################################################################
+##########################################################################
+
+import argparse
+import distutils.dir_util
+import os
+import re
+import shutil
+import subprocess
+import sys
+import time
+import timeit
+import stat
+import csv
+
+##########################################################################
+# Globals
+##########################################################################
+
+# List of dlls we want to exclude
+dll_exclude_list = {
+ 'Windows_NT': [
+ # Require Newtonsoft.Json
+ "Microsoft.DotNet.ProjectModel.dll",
+ "Microsoft.Extensions.DependencyModel.dll",
+ # Require System.Security.Principal.Windows
+ "System.Net.Requests.dll",
+ "System.Net.Security.dll",
+ "System.Net.Sockets.dll"
+ ],
+ 'Linux' : [
+ # Required System.Runtime.WindowsRuntime
+ "System.Runtime.WindowsRuntime.UI.Xaml.dll"
+ ]
+}
+
+jit_list = {
+ 'Windows_NT': {
+ 'x64': 'clrjit.dll',
+ 'x86': 'clrjit.dll',
+ 'x86jit32': 'compatjit.dll'
+ },
+ 'Linux': {
+ 'x64': 'libclrjit.so'
+ }
+}
+
+os_group_list = {
+ 'Windows_NT': 'Windows_NT',
+ 'Ubuntu14.04': 'Linux'
+}
+
+python_exe_list = {
+ 'Windows_NT': 'py',
+ 'Linux': 'python3.5'
+}
+
+##########################################################################
+# Argument Parser
+##########################################################################
+
+description = 'Tool to collect throughtput performance data'
+
+parser = argparse.ArgumentParser(description=description)
+
+parser.add_argument('-arch', dest='arch', default='x64')
+parser.add_argument('-configuration', dest='build_type', default='Release')
+parser.add_argument('-run_type', dest='run_type', default='rolling')
+parser.add_argument('-os', dest='operating_system', default='Windows_NT')
+parser.add_argument('-clr_root', dest='clr_root', default=None)
+parser.add_argument('-assembly_root', dest='assembly_root', default=None)
+parser.add_argument('-benchview_path', dest='benchview_path', default=None)
+
+##########################################################################
+# Helper Functions
+##########################################################################
+
+def validate_args(args):
+ """ Validate all of the arguments parsed.
+ Args:
+ args (argparser.ArgumentParser): Args parsed by the argument parser.
+ Returns:
+ (arch, build_type, clr_root, fx_root, fx_branch, fx_commit, env_script)
+ (str, str, str, str, str, str, str)
+ Notes:
+ If the arguments are valid then return them all in a tuple. If not, raise
+ an exception stating x argument is incorrect.
+ """
+
+ arch = args.arch
+ build_type = args.build_type
+ run_type = args.run_type
+ operating_system = args.operating_system
+ clr_root = args.clr_root
+ assembly_root = args.assembly_root
+ benchview_path = args.benchview_path
+
+ def validate_arg(arg, check):
+ """ Validate an individual arg
+ Args:
+ arg (str|bool): argument to be validated
+ check (lambda: x-> bool): test that returns either True or False
+ : based on whether the check passes.
+
+ Returns:
+ is_valid (bool): Is the argument valid?
+ """
+
+ helper = lambda item: item is not None and check(item)
+
+ if not helper(arg):
+ raise Exception('Argument: %s is not valid.' % (arg))
+
+ valid_archs = {'Windows_NT': ['x86', 'x64', 'x86jit32'], 'Linux': ['x64']}
+ valid_build_types = ['Release']
+ valid_run_types = ['rolling', 'private']
+ valid_os = ['Windows_NT', 'Ubuntu14.04']
+
+ arch = next((a for a in valid_archs if a.lower() == arch.lower()), arch)
+ build_type = next((b for b in valid_build_types if b.lower() == build_type.lower()), build_type)
+
+ validate_arg(operating_system, lambda item: item in valid_os)
+
+ os_group = os_group_list[operating_system]
+
+ validate_arg(arch, lambda item: item in valid_archs[os_group])
+ validate_arg(build_type, lambda item: item in valid_build_types)
+ validate_arg(run_type, lambda item: item in valid_run_types)
+
+ if clr_root is None:
+ raise Exception('--clr_root must be set')
+ else:
+ clr_root = os.path.normpath(clr_root)
+ validate_arg(clr_root, lambda item: os.path.isdir(clr_root))
+
+ if assembly_root is None:
+ raise Exception('--assembly_root must be set')
+ else:
+ assembly_root = os.path.normpath(assembly_root)
+ validate_arg(assembly_root, lambda item: os.path.isdir(assembly_root))
+
+ if not benchview_path is None:
+ benchview_path = os.path.normpath(benchview_path)
+ validate_arg(benchview_path, lambda item: os.path.isdir(benchview_path))
+
+ args = (arch, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path)
+
+ # Log configuration
+ log('Configuration:')
+ log(' arch: %s' % arch)
+ log(' os: %s' % operating_system)
+ log(' os_group: %s' % os_group)
+ log(' build_type: %s' % build_type)
+ log(' run_type: %s' % run_type)
+ log(' clr_root: %s' % clr_root)
+ log(' assembly_root: %s' % assembly_root)
+ if not benchview_path is None:
+ log('benchview_path : %s' % benchview_path)
+
+ return args
+
+def nth_dirname(path, n):
+ """ Find the Nth parent directory of the given path
+ Args:
+ path (str): path name containing at least N components
+ n (int): num of basenames to remove
+ Returns:
+ outpath (str): path with the last n components removed
+ Notes:
+ If n is 0, path is returned unmodified
+ """
+
+ assert n >= 0
+
+ for i in range(0, n):
+ path = os.path.dirname(path)
+
+ return path
+
+def del_rw(action, name, exc):
+ os.chmod(name, stat.S_IWRITE)
+ os.remove(name)
+
+def log(message):
+ """ Print logging information
+ Args:
+ message (str): message to be printed
+ """
+
+ print('[%s]: %s' % (sys.argv[0], message))
+
+def generateCSV(dll_name, dll_runtimes):
+ """ Write throuput performance data to a csv file to be consumed by measurement.py
+ Args:
+ dll_name (str): the name of the dll
+ dll_runtimes (float[]): A list of runtimes for each iteration of the performance test
+ """
+
+ csv_file_name = "throughput-%s.csv" % (dll_name)
+ csv_file_path = os.path.join(os.getcwd(), csv_file_name)
+
+ with open(csv_file_path, 'w') as csvfile:
+ output_file = csv.writer(csvfile, delimiter=',', lineterminator='\n')
+
+ for iteration in dll_runtimes:
+ output_file.writerow(["default", "coreclr-crossgen-tp", dll_name, iteration])
+
+ return csv_file_name
+
+def runIterations(dll_name, dll_path, iterations, crossgen_path, jit_path, assemblies_path):
+ """ Run throughput testing for a given dll
+ Args:
+ dll_name: the name of the dll
+ dll_path: the path to the dll
+ iterations: the number of times to run crossgen on the dll
+ crossgen_path: the path to crossgen
+ jit_path: the path to the jit
+ assemblies_path: the path to the assemblies that may be needed for the crossgen run
+ Returns:
+ dll_elapsed_times: a list of the elapsed times for the dll
+ """
+
+ dll_elapsed_times = []
+
+ # Set up arguments for running crossgen
+ run_args = [crossgen_path,
+ '/JITPath',
+ jit_path,
+ '/Platform_Assemblies_Paths',
+ assemblies_path,
+ dll_path
+ ]
+
+ log(" ".join(run_args))
+
+ # Time.clock() returns seconds, with a resolution of 0.4 microseconds, so multiply by the multiplier to get milliseconds
+ multiplier = 1000
+
+ for iteration in range(0,iterations):
+ proc = subprocess.Popen(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ start_time = timeit.default_timer()
+ (out, err) = proc.communicate()
+ end_time = timeit.default_timer()
+
+ if proc.returncode == 0:
+ # Calculate the runtime
+ elapsed_time = (end_time - start_time) * multiplier
+ dll_elapsed_times.append(elapsed_time)
+ else:
+ log("Error in %s" % (dll_name))
+ log(err.decode("utf-8"))
+
+ return dll_elapsed_times
+
+##########################################################################
+# Main
+##########################################################################
+
+def main(args):
+ global dll_exclude_list
+ global jit_list
+ global os_group_list
+ global python_exe_list
+
+ architecture, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path = validate_args(args)
+ arch = architecture
+
+ if architecture == 'x86jit32':
+ arch = 'x86'
+
+ current_dir = os.getcwd()
+ jit = jit_list[os_group][architecture]
+ crossgen = 'crossgen'
+
+ if os_group == 'Windows_NT':
+ crossgen += '.exe'
+
+ # Make sandbox
+ sandbox_path = os.path.join(clr_root, "sandbox")
+ if os.path.isdir(sandbox_path):
+ shutil.rmtree(sandbox_path, onerror=del_rw)
+
+ os.makedirs(sandbox_path)
+ os.chdir(sandbox_path)
+
+ # Set up paths
+ bin_path = os.path.join(clr_root, 'bin', 'Product', os_group + '.' + arch + '.' + build_type)
+
+ crossgen_path = os.path.join(bin_path,crossgen)
+ jit_path = os.path.join(bin_path, jit)
+
+ iterations = 6
+
+ python_exe = python_exe_list[os_group]
+
+ # Run throughput testing
+ for dll_file_name in os.listdir(assembly_root):
+ # Find all framework dlls in the assembly_root dir, which we will crossgen
+ if (dll_file_name.endswith(".dll") and
+ (not ".ni." in dll_file_name) and
+ ("Microsoft" in dll_file_name or "System" in dll_file_name) and
+ (not dll_file_name in dll_exclude_list[os_group])):
+ dll_name = dll_file_name.replace(".dll", "")
+ dll_path = os.path.join(assembly_root, dll_file_name)
+ dll_elapsed_times = runIterations(dll_file_name, dll_path, iterations, crossgen_path, jit_path, assembly_root)
+
+ if len(dll_elapsed_times) != 0:
+ if not benchview_path is None:
+ # Generate the csv file
+ csv_file_name = generateCSV(dll_name, dll_elapsed_times)
+ shutil.copy(csv_file_name, clr_root)
+
+ # For each benchmark, call measurement.py
+ measurement_args = [python_exe,
+ os.path.join(benchview_path, "measurement.py"),
+ "csv",
+ os.path.join(os.getcwd(), csv_file_name),
+ "--metric",
+ "execution_time",
+ "--unit",
+ "milliseconds",
+ "--better",
+ "desc",
+ "--drop-first-value",
+ "--append"]
+ log(" ".join(measurement_args))
+ proc = subprocess.Popen(measurement_args)
+ proc.communicate()
+ else:
+ # Write output to console if we are not publishing
+ log("%s" % (dll_name))
+ log("Duration: [%s]" % (", ".join(str(x) for x in dll_elapsed_times)))
+
+ # Upload the data
+ if not benchview_path is None:
+ # Call submission.py
+ submission_args = [python_exe,
+ os.path.join(benchview_path, "submission.py"),
+ "measurement.json",
+ "--build",
+ os.path.join(clr_root, "build.json"),
+ "--machine-data",
+ os.path.join(clr_root, "machinedata.json"),
+ "--metadata",
+ os.path.join(clr_root, "submission-metadata.json"),
+ "--group",
+ "CoreCLR-throughput",
+ "--type",
+ run_type,
+ "--config-name",
+ build_type,
+ "--config",
+ "Configuration",
+ build_type,
+ "--config",
+ "OS",
+ operating_system,
+ "--arch",
+ architecture,
+ "--machinepool",
+ "PerfSnake"
+ ]
+ log(" ".join(submission_args))
+ proc = subprocess.Popen(submission_args)
+ proc.communicate()
+
+ # Call upload.py
+ upload_args = [python_exe,
+ os.path.join(benchview_path, "upload.py"),
+ "submission.json",
+ "--container",
+ "coreclr"
+ ]
+ log(" ".join(upload_args))
+ proc = subprocess.Popen(upload_args)
+ proc.communicate()
+
+ os.chdir(current_dir)
+
+ return 0
+
+if __name__ == "__main__":
+ Args = parser.parse_args(sys.argv[1:])
+ main(Args)
diff --git a/tests/scripts/run-xunit-perf.cmd b/tests/scripts/run-xunit-perf.cmd
index 060ba33171..bad1f93a40 100644
--- a/tests/scripts/run-xunit-perf.cmd
+++ b/tests/scripts/run-xunit-perf.cmd
@@ -4,17 +4,26 @@
@setlocal
@echo off
+Setlocal EnableDelayedExpansion
rem Set defaults for the file extension, architecture and configuration
set CORECLR_REPO=%CD%
set TEST_FILE_EXT=exe
set TEST_ARCH=x64
+set TEST_ARCHITECTURE=x64
set TEST_CONFIG=Release
goto :ARGLOOP
+
:SETUP
+IF /I [%TEST_ARCHITECTURE%] == [x86jit32] (
+ set TEST_ARCH=x86
+) ELSE (
+ set TEST_ARCH=%TEST_ARCHITECTURE%
+)
+
set CORECLR_OVERLAY=%CORECLR_REPO%\bin\tests\Windows_NT.%TEST_ARCH%.%TEST_CONFIG%\Tests\Core_Root
set RUNLOG=%CORECLR_REPO%\bin\Logs\perfrun.log
@@ -35,9 +44,9 @@ pushd sandbox
@rem stage stuff we need
@rem xunit and perf
-xcopy /sy %CORECLR_REPO%\packages\Microsoft.DotNet.xunit.performance.runner.Windows\1.0.0-alpha-build0040\tools\* . > %RUNLOG%
-xcopy /sy %CORECLR_REPO%\packages\Microsoft.DotNet.xunit.performance.analysis\1.0.0-alpha-build0040\tools\* . >> %RUNLOG%
-xcopy /sy %CORECLR_REPO%\packages\xunit.console.netcore\1.0.2-prerelease-00177\runtimes\any\native\* . >> %RUNLOG%
+"%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\project.json"
+"%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" publish "%CORECLR_REPO%\tests\src\Common\PerfHarness\project.json" -c Release -o %CORECLR_REPO%\sandbox
+xcopy /sy %CORECLR_REPO%\packages\Microsoft.Diagnostics.Tracing.TraceEvent\1.0.0-alpha-experimental\lib\native\* . >> %RUNLOG%
xcopy /sy %CORECLR_REPO%\bin\tests\Windows_NT.%TEST_ARCH%.%TEST_CONFIG%\Tests\Core_Root\* . >> %RUNLOG%
@rem find and stage the tests
@@ -56,7 +65,7 @@ if not [%BENCHVIEW_PATH%] == [] (
--config-name "%TEST_CONFIG%" ^
--config Configuration "%TEST_CONFIG%" ^
--config OS "Windows_NT" ^
- --arch "%TEST_ARCH%" ^
+ --arch "%TEST_ARCHITECTURE%" ^
--machinepool "PerfSnake"
py "%BENCHVIEW_PATH%\upload.py" submission.json --container coreclr
)
@@ -78,16 +87,21 @@ xcopy /s %BENCHDIR%*.txt . >> %RUNLOG%
set CORE_ROOT=%CORECLR_REPO%\sandbox
-xunit.performance.run.exe %BENCHNAME%.%TEST_FILE_EXT% -runner xunit.console.netcore.exe -runnerhost corerun.exe -verbose -runid %PERFOUT% > %BENCHNAME%.out
+@rem setup additional environment variables
+if DEFINED TEST_ENV (
+ if EXIST !TEST_ENV! (
+ call %TEST_ENV%
+ )
+)
-xunit.performance.analysis.exe %PERFOUT%.xml -xml %XMLOUT% > %BENCHNAME%-analysis.out
+corerun.exe PerfHarness.dll %WORKSPACE%\sandbox\%BENCHNAME%.%TEST_FILE_EXT% --perf:runid Perf > %BENCHNAME%.out
@rem optionally generate results for benchview
if not [%BENCHVIEW_PATH%] == [] (
- py "%BENCHVIEW_PATH%\measurement.py" xunit "perf-%BENCHNAME%.xml" --better desc --drop-first-value --append
+ py "%BENCHVIEW_PATH%\measurement.py" xunit "Perf-%BENCHNAME%.xml" --better desc --drop-first-value --append
REM Save off the results to the root directory for recovery later in Jenkins
- xcopy perf-%BENCHNAME%*.xml %CORECLR_REPO%\
- xcopy perf-%BENCHNAME%*.etl %CORECLR_REPO%\
+ xcopy Perf-%BENCHNAME%*.xml %CORECLR_REPO%\
+ xcopy Perf-%BENCHNAME%*.etl %CORECLR_REPO%\
) else (
type %XMLOUT% | findstr "test name"
type %XMLOUT% | findstr Duration
@@ -121,7 +135,13 @@ shift
goto :ARGLOOP
)
IF /I [%1] == [-arch] (
-set TEST_ARCH=%2
+set TEST_ARCHITECTURE=%2
+shift
+shift
+goto :ARGLOOP
+)
+IF /I [%1] == [-testEnv] (
+set TEST_ENV=%2
shift
shift
goto :ARGLOOP
diff --git a/tests/scripts/run-xunit-perf.sh b/tests/scripts/run-xunit-perf.sh
index 27f84c2011..6f49bf6e0a 100755
--- a/tests/scripts/run-xunit-perf.sh
+++ b/tests/scripts/run-xunit-perf.sh
@@ -200,9 +200,6 @@ function create_core_overlay {
if [ ! -d "$coreClrBinDir" ]; then
exit_with_error "$errorSource" "Directory specified by --coreClrBinDir does not exist: $coreClrBinDir"
fi
- if [ ! -f "$mscorlibDir/mscorlib.dll" ]; then
- exit_with_error "$errorSource" "mscorlib.dll was not found in: $mscorlibDir"
- fi
if [ -z "$coreFxBinDir" ]; then
exit_with_error "$errorSource" "One of --coreOverlayDir or --coreFxBinDir must be specified." "$printUsage"
fi
@@ -217,12 +214,7 @@ function create_core_overlay {
cp -f -v "$coreFxBinDir"/* "$coreOverlayDir/" 2>/dev/null
cp -f -v "$coreClrBinDir/"* "$coreOverlayDir/" 2>/dev/null
- cp -f -v "$mscorlibDir/mscorlib.dll" "$coreOverlayDir/"
cp -n -v "$testDependenciesDir"/* "$coreOverlayDir/" 2>/dev/null
- if [ -f "$coreOverlayDir/mscorlib.ni.dll" ]; then
- # Test dependencies come from a Windows build, and mscorlib.ni.dll would be the one from Windows
- rm -f "$coreOverlayDir/mscorlib.ni.dll"
- fi
}
function precompile_overlay_assemblies {
@@ -235,20 +227,15 @@ function precompile_overlay_assemblies {
for fileToPrecompile in ${filesToPrecompile}
do
local filename=${fileToPrecompile}
- # Precompile any assembly except mscorlib since we already have its NI image available.
- if [[ "$filename" != *"mscorlib.dll"* ]]; then
- if [[ "$filename" != *"mscorlib.ni.dll"* ]]; then
- echo Precompiling $filename
- $overlayDir/crossgen /Platform_Assemblies_Paths $overlayDir $filename 2>/dev/null
- local exitCode=$?
- if [ $exitCode == -2146230517 ]; then
- echo $filename is not a managed assembly.
- elif [ $exitCode != 0 ]; then
- echo Unable to precompile $filename.
- else
- echo Successfully precompiled $filename
- fi
- fi
+ echo Precompiling $filename
+ $overlayDir/crossgen /Platform_Assemblies_Paths $overlayDir $filename 2>/dev/null
+ local exitCode=$?
+ if [ $exitCode == -2146230517 ]; then
+ echo $filename is not a managed assembly.
+ elif [ $exitCode != 0 ]; then
+ echo Unable to precompile $filename.
+ else
+ echo Successfully precompiled $filename
fi
done
else
diff --git a/tests/scripts/x86_ci_script.sh b/tests/scripts/x86_ci_script.sh
new file mode 100755
index 0000000000..1f82c3010c
--- /dev/null
+++ b/tests/scripts/x86_ci_script.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+#Parse command line arguments
+__buildConfig=
+for arg in "$@"
+do
+ case $arg in
+ --buildConfig=*)
+ __buildConfig="$(echo ${arg#*=} | awk '{print tolower($0)}')"
+ if [[ "$__buildConfig" != "debug" && "$__buildConfig" != "release" && "$__buildConfig" != "checked" ]]; then
+ exit_with_error "--buildConfig can be only Debug or Release" true
+ fi
+ ;;
+ *)
+ ;;
+ esac
+done
+
+#Check if there are any uncommited changes in the source directory as git adds and removes patches
+if [[ $(git status -s) != "" ]]; then
+ echo 'ERROR: There are some uncommited changes. To avoid losing these changes commit them and try again.'
+ echo ''
+ git status
+ exit 1
+fi
+
+#Change build configuration to the capitalized form to create build product paths correctly
+if [[ "$__buildConfig" == "release" ]]; then
+ __buildConfig="Release"
+elif [[ "$__buildConfig" == "checked" ]]; then
+ __buildConfig="Checked"
+else
+ __buildConfig="Debug"
+fi
+__buildDirName="$__buildOS.$__buildArch.$__buildConfig"
+
+set -x
+set -e
+
+__dockerImage="hseok82/dotnet-buildtools-prereqs:ubuntu1604_cross_prereqs_v3_x86"
+
+# Begin cross build
+# We cannot build nuget package yet
+__dockerEnvironmentSet="-e ROOTFS_DIR=/crossrootfs/x86"
+__currentWorkingDir=`pwd`
+__dockerCmd="docker run -i --rm ${__dockerEnvironmentVariable} -v $__currentWorkingDir:/opt/code -w /opt/code $__dockerImage"
+__buildCmd="./build.sh x86 cross skipnuget $__buildConfig"
+$__dockerCmd $__buildCmd
+
+# Begin PAL test
+__dockerImage="hseok82/dotnet-buildtools-prereqs:ubuntu1604_x86_test"
+__dockerCmd="docker run -i --rm -v $__currentWorkingDir:/opt/code -w /opt/code $__dockerImage"
+__palTestCmd="./src/pal/tests/palsuite/runpaltests.sh /opt/code/bin/obj/Linux.x86.${__buildConfig} /opt/code/bin/paltestout"
+$__dockerCmd $__palTestCmd
+
+sudo chown -R $(id -u -n) bin/
+
+(set +x; echo 'Build complete')