summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--tests/buildtest.cmd23
-rw-r--r--tests/runtest.cmd24
-rw-r--r--tests/runtest.proj8
-rwxr-xr-x[-rw-r--r--]tests/runtest.sh647
-rw-r--r--tests/src/CLRTest.Execute.Bash.targets26
-rw-r--r--tests/src/CLRTest.Execute.Batch.targets2
-rw-r--r--tests/src/managed/Compilation/Compilation.cs4
-rw-r--r--tests/testsFailingOutsideWindows.txt76
-rw-r--r--tests/testsUnsupportedOutsideWindows.txt11
9 files changed, 708 insertions, 113 deletions
diff --git a/tests/buildtest.cmd b/tests/buildtest.cmd
index a73a67fbff..4d97c798e3 100644
--- a/tests/buildtest.cmd
+++ b/tests/buildtest.cmd
@@ -166,6 +166,23 @@ set _buildprefix=
set _buildpostfix=
set _buildappend=
call :build %1
+
+set CORE_ROOT=%__TestBinDir%\Tests\Core_Root
+echo.
+echo Creating test overlay...
+
+:: Log build command line
+set _buildprefix=echo
+set _buildpostfix=^> "%__TestManagedBuildLog%"
+set _buildappend=^>
+call :CreateTestOverlay %1
+
+:: Build
+set _buildprefix=
+set _buildpostfix=
+set _buildappend=
+call :CreateTestOverlay %1
+
exit /b %ERRORLEVEL%
:build
@@ -174,6 +191,12 @@ exit /b %ERRORLEVEL%
IF ERRORLEVEL 1 echo Test build failed. Refer to !__TestManagedBuildLog! for details && exit /b 1
exit /b 0
+:CreateTestOverlay
+
+%_buildprefix% %_msbuildexe% "%__ProjectFilesDir%\runtest.proj" /t:CreateTestOverlay /nologo /maxcpucount /verbosity:minimal /nodeReuse:false /fileloggerparameters:Verbosity=normal;LogFile="%__TestManagedBuildLog%";Append %* %_buildpostfix%
+IF ERRORLEVEL 1 echo Failed to create the test overlay. Refer to !__TestManagedBuildLog! for details && exit /b 1
+exit /b 0
+
:Usage
echo.
echo Usage:
diff --git a/tests/runtest.cmd b/tests/runtest.cmd
index 1c324333c8..f08af94110 100644
--- a/tests/runtest.cmd
+++ b/tests/runtest.cmd
@@ -27,7 +27,7 @@ if /i "%1" == "vs2015" (set __VSVersion=%1&shift&goto Arg_Loop)
if /i "%1" == "/?" (goto Usage)
-set Core_Root=%1
+set CORE_ROOT=%1
shift
:ArgsDone
:: Check prerequisites
@@ -70,16 +70,16 @@ if not defined __LogsDir set __LogsDir=%__ProjectFilesDir%..\bin\Logs
:: Default global test environment variables
if not defined XunitTestBinBase set XunitTestBinBase=%__TestWorkingDir%\
if not defined XunitTestReportDirBase set XunitTestReportDirBase=%XunitTestBinBase%\Reports\
-if defined Core_Root goto :CheckTestEnv
+if defined CORE_ROOT goto :CheckTestEnv
set noCore_RootSet=true
-set Core_Root=%__BinDir%
+set CORE_ROOT=%__BinDir%
:CheckTestEnv
::Check if the test Binaries are built
if not exist %XunitTestBinBase% echo Error: Ensure the Test Binaries are built and are present at %XunitTestBinBase%, Run - buildtest.cmd %__BuildArch% %__BuildType% to build the tests first. && exit /b 1
-if "%Core_Root%" == "" echo Error: Ensure you have done a successful build of the Product and Run - runtest BuildArch BuildType {path to product binaries}. && exit /b 1
-if not exist %Core_Root%\coreclr.dll echo Error: Ensure you have done a successful build of the Product and %Core_Root% contains runtime binaries. && exit /b 1
+if "%CORE_ROOT%" == "" echo Error: Ensure you have done a successful build of the Product and Run - runtest BuildArch BuildType {path to product binaries}. && exit /b 1
+if not exist %CORE_ROOT%\coreclr.dll echo Error: Ensure you have done a successful build of the Product and %CORE_ROOT% contains runtime binaries. && exit /b 1
if not "%__Exclude%"=="" (if not exist %__Exclude% echo Error: Exclusion .targets file not found && exit /b 1)
if not "%__TestEnv%"=="" (if not exist %__TestEnv% echo Error: Test Environment script not found && exit /b 1)
if not exist %__LogsDir% md %__LogsDir%
@@ -90,7 +90,7 @@ set __TestRunBuildLog=%__LogsDir%\TestRunResults_%__BuildOS%__%__BuildArch%__%__
set __TestRunHtmlLog=%__LogsDir%\TestRun_%__BuildOS%__%__BuildArch%__%__BuildType%.html
set __TestRunXmlLog=%__LogsDir%\TestRun_%__BuildOS%__%__BuildArch%__%__BuildType%.xml
-echo Core_Root that will be used is: %Core_Root%
+echo CORE_ROOT that will be used is: %CORE_ROOT%
echo Starting The Test Run ...
if "%__SkipWrapperGeneration%"=="true" goto :preptests
@@ -121,12 +121,12 @@ set _buildprefix=
set _buildpostfix=
set _buildappend=
if not "%noCore_RootSet%"=="true" goto :runtests
-set Core_Root=%XunitTestBinBase%\Tests\Core_Root
-echo Using Default Core_Root as %Core_Root%
-echo Copying Built binaries from %__BinDir% to %Core_Root%
-if exist %Core_Root% rd /s /q %Core_Root%
-md %Core_Root%
-xcopy /s %__BinDir% %Core_Root%
+set CORE_ROOT=%XunitTestBinBase%\Tests\Core_Root
+echo Using Default CORE_ROOT as %CORE_ROOT%
+echo Copying Built binaries from %__BinDir% to %CORE_ROOT%
+if exist %CORE_ROOT% rd /s /q %CORE_ROOT%
+md %CORE_ROOT%
+xcopy /s %__BinDir% %CORE_ROOT%
call :runtests
if ERRORLEVEL 1 (
echo Test Run failed. Refer to the following"
diff --git a/tests/runtest.proj b/tests/runtest.proj
index 97ab648f73..96f1c05d56 100644
--- a/tests/runtest.proj
+++ b/tests/runtest.proj
@@ -306,6 +306,11 @@ public class $([System.String]::Copy('%(AllCMDs.FullPath)').Replace("$(_CMDDIR)"
<Import Project="tests.targets" />
<Import Project="publishdependency.targets" />
+ <Target Name="CreateTestOverlay">
+ <MSBuild Projects="$(MSBuildProjectFile)"
+ Targets="CopyDependecyToCoreRoot"/>
+ </Target>
+
<Target Name="Build">
<!-- Default for building -->
<MSBuild Projects="$(MSBuildProjectFile)"
@@ -315,7 +320,8 @@ public class $([System.String]::Copy('%(AllCMDs.FullPath)').Replace("$(_CMDDIR)"
<!-- Execution -->
- <MSBuild Projects="$(MSBuildProjectFile)" Targets="CopyDependecyToCoreRoot"
+ <MSBuild Projects="$(MSBuildProjectFile)"
+ Targets="CreateTestOverlay"
Condition=" '$(NoRun)'!='true' "/>
<MSBuild Projects="$(MSBuildProjectFile)" Targets="RunTests"
diff --git a/tests/runtest.sh b/tests/runtest.sh
index 00847c6466..6333873266 100644..100755
--- a/tests/runtest.sh
+++ b/tests/runtest.sh
@@ -1,13 +1,44 @@
#!/usr/bin/env bash
function print_usage {
- echo ""
- echo "CoreCLR test runner script."
- echo "Arguments:"
- echo " -v, --verbose : Show output from each test."
- echo " --testDirFile=<path> : Run tests only in the directories specified by the file at <path>."
- echo " The file should specify one directory per line."
- echo ""
+ echo ''
+ echo 'CoreCLR test runner script.'
+ echo ''
+ echo 'Typical command line:'
+ echo ''
+ echo 'coreclr/tests/runtest.sh'
+ echo ' --testRootDir="temp/Windows.x64.Debug"'
+ echo ' --testNativeBinDir="coreclr/bin/obj/Linux.x64.Debug/tests"'
+ echo ' --coreClrBinDir="coreclr/bin/Product/Linux.x64.Debug"'
+ echo ' --mscorlibDir="windows/coreclr/bin/Product/Linux.x64.Debug"'
+ echo ' --coreFxBinDir="corefx/bin/Linux.AnyCPU.Debug"'
+ echo ' --coreFxNativeBinDir="corefx/bin/Linux.x64.Debug"'
+ echo ''
+ echo 'Required arguments:'
+ echo ' --testRootDir=<path> : Root directory of the test build (e.g. coreclr/bin/tests/Windows_NT.x64.Debug).'
+ echo ' --testNativeBinDir=<path> : Directory of the native CoreCLR test build (e.g. coreclr/bin/obj/Linux.x64.Debug/tests).'
+ echo ' (Also required: Either --coreOverlayDir, or all of the switches --coreOverlayDir overrides)'
+ echo ''
+ echo 'Optional arguments:'
+ echo ' --coreOverlayDir=<path> : Directory containing core binaries and test dependencies. If not specified, the'
+ echo ' default is testRootDir/Tests/coreoverlay. This switch overrides --coreClrBinDir,'
+ echo ' --mscorlibDir, --coreFxBinDir, and --coreFxNativeBinDir.'
+ echo ' --coreClrBinDir=<path> : Directory of the CoreCLR build (e.g. coreclr/bin/Product/Linux.x64.Debug).'
+ echo ' --mscorlibDir=<path> : Directory containing the built mscorlib.dll. If not specified, it is expected to be'
+ echo ' in the directory specified by --coreClrBinDir.'
+ echo ' --coreFxBinDir=<path> : Directory of the CoreFX build (e.g. corefx/bin/Linux.AnyCPU.Debug).'
+ echo ' --coreFxNativeBinDir=<path> : Directory of the CoreFX native build (e.g. corefx/bin/Linux.x64.Debug).'
+ echo ' --testDir=<path> : Run tests only in the specified directory. The path is relative to the directory'
+ echo ' specified by --testRootDir. Multiple of this switch may be specified.'
+ echo ' --testDirFile=<path> : Run tests only in the directories specified by the file at <path>. Paths are listed'
+ echo ' one line, relative to the directory specified by --testRootDir.'
+ echo ' --runFailingTestsOnly : Run only the tests that are disabled on this platform due to unexpected failures.'
+ echo ' Failing tests are listed in coreclr/tests/failingTestsOutsideWindows.txt, one per'
+ echo ' line, as paths to .sh files relative to the directory specified by --testRootDir.'
+ echo ' --sequential : Run tests sequentially (default is to run in parallel).'
+ echo ' -v, --verbose : Show output from each test.'
+ echo ' -h|--help : Show usage information.'
+ echo ''
}
function print_results {
@@ -22,120 +53,590 @@ function print_results {
echo "======================="
}
+# Initialize counters for bookkeeping.
+countTotalTests=0
+countPassedTests=0
+countFailedTests=0
+countSkippedTests=0
+
+# Variables for xUnit-style XML output. XML format: https://xunit.github.io/docs/format-xml-v2.html
+xunitOutputPath=
+xunitTestOutputPath=
+
+function xunit_output_begin {
+ xunitOutputPath=$testRootDir/coreclrtests.xml
+ xunitTestOutputPath=${xunitOutputPath}.test
+ if [ -e "$xunitOutputPath" ]; then
+ rm -f -r "$xunitOutputPath"
+ fi
+ if [ -e "$xunitTestOutputPath" ]; then
+ rm -f -r "$xunitTestOutputPath"
+ fi
+}
+
+function xunit_output_add_test {
+ # <assemblies>
+ # <assembly>
+ # <collection>
+ # <test .../> <!-- Write this element here -->
+
+ local scriptFilePath=$1
+ local outputFilePath=$2
+ local testResult=$3 # Pass, Fail, or Skip
+ local testScriptExitCode=$4
+
+ local testPath=${scriptFilePath:0:(-3)} # Remove trailing ".sh"
+ local testDir=$(dirname "$testPath")
+ local testName=$(basename "$testPath")
+
+ # Replace '/' with '.'
+ testPath=$(echo "$testPath" | tr / .)
+ testDir=$(echo "$testDir" | tr / .)
+
+ local line
+
+ line=" "
+ line="${line}<test"
+ line="${line} name=\"${testPath}\""
+ line="${line} type=\"${testDir}\""
+ line="${line} method=\"${testName}\""
+ line="${line} result=\"${testResult}\""
+
+ if [ "$testResult" == "Pass" ]; then
+ line="${line}/>"
+ echo "$line" >>"$xunitTestOutputPath"
+ return
+ fi
+
+ line="${line}>"
+ echo "$line" >>"$xunitTestOutputPath"
+
+ line=" "
+ if [ "$testResult" == "Skip" ]; then
+ line="${line}<reason><![CDATA[$(cat "$outputFilePath")]]></reason>"
+ echo "$line" >>"$xunitTestOutputPath"
+ else
+ line="${line}<failure exception-type=\"Exit code: ${testScriptExitCode}\">"
+ echo "$line" >>"$xunitTestOutputPath"
+
+ line=" "
+ line="${line}<message>"
+ echo "$line" >>"$xunitTestOutputPath"
+ line=" "
+ line="${line}<![CDATA["
+ echo "$line" >>"$xunitTestOutputPath"
+ cat "$outputFilePath" >>"$xunitTestOutputPath"
+ line=" "
+ line="${line}]]>"
+ echo "$line" >>"$xunitTestOutputPath"
+ line=" "
+ line="${line}</message>"
+ echo "$line" >>"$xunitTestOutputPath"
+
+ line=" "
+ line="${line}</failure>"
+ echo "$line" >>"$xunitTestOutputPath"
+ fi
+
+ line=" "
+ line="${line}</test>"
+ echo "$line" >>"$xunitTestOutputPath"
+}
+
+function xunit_output_end {
+ local errorSource=$1
+ local errorMessage=$2
+
+ local errorCount
+ if [ -z "$errorSource" ]; then
+ ((errorCount = 0))
+ else
+ ((errorCount = 1))
+ fi
+
+ echo '<?xml version="1.0" encoding="utf-8"?>' >>"$xunitOutputPath"
+ echo '<assemblies>' >>"$xunitOutputPath"
+
+ local line
+
+ # <assembly ...>
+ line=" "
+ line="${line}<assembly"
+ line="${line} name=\"CoreClrTestAssembly\""
+ line="${line} total=\"${countTotalTests}\""
+ line="${line} passed=\"${countPassedTests}\""
+ line="${line} failed=\"${countFailedTests}\""
+ line="${line} skipped=\"${countSkippedTests}\""
+ line="${line} errors=\"${errorCount}\""
+ line="${line}>"
+ echo "$line" >>"$xunitOutputPath"
+
+ # <collection ...>
+ line=" "
+ line="${line}<collection"
+ line="${line} name=\"CoreClrTestCollection\""
+ line="${line} total=\"${countTotalTests}\""
+ line="${line} passed=\"${countPassedTests}\""
+ line="${line} failed=\"${countFailedTests}\""
+ line="${line} skipped=\"${countSkippedTests}\""
+ line="${line}>"
+ echo "$line" >>"$xunitOutputPath"
+
+ # <test .../> <test .../> ...
+ if [ -f "$xunitTestOutputPath" ]; then
+ cat "$xunitTestOutputPath" >>"$xunitOutputPath"
+ rm -f "$xunitTestOutputPath"
+ fi
+
+ # </collection>
+ line=" "
+ line="${line}</collection>"
+ echo "$line" >>"$xunitOutputPath"
+
+ if [ -n "$errorSource" ]; then
+ # <errors>
+ line=" "
+ line="${line}<errors>"
+ echo "$line" >>"$xunitOutputPath"
+
+ # <error ...>
+ line=" "
+ line="${line}<error"
+ line="${line} type=\"TestHarnessError\""
+ line="${line} name=\"${errorSource}\""
+ line="${line}>"
+ echo "$line" >>"$xunitOutputPath"
+
+ # <failure .../>
+ line=" "
+ line="${line}<failure>${errorMessage}</failure>"
+ echo "$line" >>"$xunitOutputPath"
+
+ # </error>
+ line=" "
+ line="${line}</error>"
+ echo "$line" >>"$xunitOutputPath"
+
+ # </errors>
+ line=" "
+ line="${line}</errors>"
+ echo "$line" >>"$xunitOutputPath"
+ fi
+
+ # </assembly>
+ line=" "
+ line="${line}</assembly>"
+ echo "$line" >>"$xunitOutputPath"
+
+ # </assemblies>
+ echo '</assemblies>' >>"$xunitOutputPath"
+}
+
+function exit_with_error {
+ local errorSource=$1
+ local errorMessage=$2
+ local printUsage=$3
+
+ if [ -z "$printUsage" ]; then
+ ((printUsage = 0))
+ fi
+
+ echo "$errorMessage"
+ xunit_output_end "$errorSource" "$errorMessage"
+ if ((printUsage != 0)); then
+ print_usage
+ fi
+ exit 1
+}
+
# Handle Ctrl-C. We will stop execution and print the results that
# we gathered so far.
function handle_ctrl_c {
+ local errorSource='handle_ctrl_c'
+
echo ""
echo "*** Stopping... ***"
print_results
- exit 0
+ exit_with_error "$errorSource" "Test run aborted by Ctrl+C."
}
# Register the Ctrl-C handler
trap handle_ctrl_c INT
+function create_core_overlay {
+ local errorSource='create_core_overlay'
+ local printUsage=1
+
+ if [ -n "$coreOverlayDir" ]; then
+ export CORE_ROOT="$coreOverlayDir"
+ return
+ fi
+
+ # Check inputs to make sure we have enough information to create the core layout. $testRootDir/Tests/Core_Root should
+ # already exist and contain test dependencies that are not built.
+ local testDependenciesDir=$testRootDir/Tests/Core_Root
+ if [ ! -d "$testDependenciesDir" ]; then
+ exit_with_error "$errorSource" "Did not find the test dependencies directory: $testDependenciesDir"
+ fi
+ if [ -z "$coreClrBinDir" ]; then
+ exit_with_error "$errorSource" "One of --coreOverlayDir or --coreClrBinDir must be specified." "$printUsage"
+ fi
+ if [ ! -d "$coreClrBinDir" ]; then
+ exit_with_error "$errorSource" "Directory specified by --coreClrBinDir does not exist: $coreClrBinDir"
+ fi
+ if [ -z "$mscorlibDir" ]; then
+ mscorlibDir=$coreClrBinDir
+ fi
+ if [ ! -f "$mscorlibDir/mscorlib.dll" ]; then
+ exit_with_error "$errorSource" "mscorlib.dll was not found in: $mscorlibDir"
+ fi
+ if [ -z "$coreFxBinDir" ]; then
+ exit_with_error "$errorSource" "One of --coreOverlayDir or --coreFxBinDir must be specified." "$printUsage"
+ fi
+ if [ ! -d "$coreFxBinDir" ]; then
+ exit_with_error "$errorSource" "Directory specified by --coreFxBinDir does not exist: $coreFxBinDir"
+ fi
+ if [ -z "$coreFxNativeBinDir" ]; then
+ exit_with_error "$errorSource" "One of --coreOverlayDir or --coreFxBinDir must be specified." "$printUsage"
+ fi
+ if [ ! -d "$coreFxNativeBinDir/Native" ]; then
+ exit_with_error "$errorSource" "Directory specified by --coreFxBinDir does not exist: $coreFxNativeBinDir/Native"
+ fi
+
+ # Create the overlay
+ coreOverlayDir=$testRootDir/Tests/coreoverlay
+ export CORE_ROOT="$coreOverlayDir"
+ if [ -e "$coreOverlayDir" ]; then
+ rm -f -r "$coreOverlayDir"
+ fi
+ mkdir "$coreOverlayDir"
+ find "$coreFxBinDir" -iname '*.dll' \! -iwholename '*test*' \! -iwholename '*/ToolRuntime/*' -exec cp -f -u '{}' "$coreOverlayDir/" \;
+ cp -f "$coreFxNativeBinDir/Native/"*.so "$coreOverlayDir/" 2>/dev/null
+ cp -f "$coreClrBinDir/"* "$coreOverlayDir/" 2>/dev/null
+ cp -f "$mscorlibDir/mscorlib.dll" "$coreOverlayDir/"
+ cp -n "$testDependenciesDir"/* "$coreOverlayDir/" 2>/dev/null
+ if [ -f "$coreOverlayDir/mscorlib.ni.dll" ]; then
+ rm -f "$coreOverlayDir/mscorlib.ni.dll"
+ fi
+}
+
+function copy_test_native_bin_to_test_root {
+ local errorSource='copy_test_native_bin_to_test_root'
+
+ if [ -z "$testNativeBinDir" ]; then
+ exit_with_error "$errorSource" "--testNativeBinDir is required."
+ fi
+ testNativeBinDir=$testNativeBinDir/src
+ if [ ! -d "$testNativeBinDir" ]; then
+ exit_with_error "$errorSource" "Directory specified by --testNativeBinDir does not exist: $testNativeBinDir"
+ fi
+
+ # Copy native test components from the native test build into the respective test directory in the test root directory
+ find "$testNativeBinDir" -type f -iname '*.so' |
+ while IFS='' read -r filePath || [ -n "$filePath" ]; do
+ local dirPath=$(dirname "$filePath")
+ local destinationDirPath=${testRootDir}${dirPath:${#testNativeBinDir}}
+ if [ ! -d "$destinationDirPath" ]; then
+ exit_with_error "$errorSource" "Cannot copy native test bin '$filePath' to '$destinationDirPath/', as the destination directory does not exist."
+ fi
+ cp -f "$filePath" "$destinationDirPath/"
+ done
+}
+
+# Variables for unsupported and failing tests
+declare -a unsupportedTests
+declare -a failingTests
+((runFailingTestsOnly = 0))
+
+function load_unsupported_tests {
+ # Load the list of tests that fail and on this platform. These tests are disabled (skipped), pending investigation.
+ # 'readarray' is not used here, as it includes the trailing linefeed in lines placed in the array.
+ while IFS='' read -r line || [ -n "$line" ]; do
+ unsupportedTests[${#unsupportedTests[@]}]=$line
+ done <"$(dirname "$0")/testsUnsupportedOutsideWindows.txt"
+}
+
+function load_failing_tests {
+ # Load the list of tests that fail and on this platform. These tests are disabled (skipped), pending investigation.
+ # 'readarray' is not used here, as it includes the trailing linefeed in lines placed in the array.
+ while IFS='' read -r line || [ -n "$line" ]; do
+ failingTests[${#failingTests[@]}]=$line
+ done <"$(dirname "$0")/testsFailingOutsideWindows.txt"
+}
+
+function is_unsupported_test {
+ for unsupportedTest in "${unsupportedTests[@]}"; do
+ if [ "$1" == "$unsupportedTest" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+function is_failing_test {
+ for failingTest in "${failingTests[@]}"; do
+ if [ "$1" == "$failingTest" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+function skip_unsupported_test {
+ # This function runs in a background process. It should not echo anything, and should not use global variables. This
+ # function is analogous to run_test, and causes the test to be skipped with the message below.
+
+ local scriptFilePath=$1
+ local outputFilePath=$2
+
+ echo "Not supported on this platform." >"$outputFilePath"
+ return 2 # skip the test
+}
+
+function skip_failing_test {
+ # This function runs in a background process. It should not echo anything, and should not use global variables. This
+ # function is analogous to run_test, and causes the test to be skipped with the message below.
+
+ local scriptFilePath=$1
+ local outputFilePath=$2
+
+ echo "Temporarily disabled on this platform due to unexpected failures." >"$outputFilePath"
+ return 2 # skip the test
+}
+
+function run_test {
+ # This function runs in a background process. It should not echo anything, and should not use global variables.
+
+ local scriptFilePath=$1
+ local outputFilePath=$2
+
+ # Switch to directory where the script is
+ cd "$(dirname "$scriptFilePath")"
+
+ local scriptFileName=$(basename "$scriptFilePath")
+ local outputFileName=$(basename "$outputFilePath")
+
+ # Convert DOS line endings to Unix if needed
+ sed -i 's/\r$//' "$scriptFileName"
+
+ "./$scriptFileName" >"$outputFileName" 2>&1
+ return $?
+}
+
+# Variables for running tests in the background
+((maxProcesses = $(getconf _NPROCESSORS_ONLN) * 3 / 2)) # long tests delay process creation, use a few more processors
+((nextProcessIndex = 0))
+((processCount = 0))
+declare -a scriptFilePaths
+declare -a outputFilePaths
+declare -a processIds
+
+function finish_test {
+ wait ${processIds[$nextProcessIndex]}
+ local testScriptExitCode=$?
+ ((--processCount))
+
+ local scriptFilePath=${scriptFilePaths[$nextProcessIndex]}
+ local outputFilePath=${outputFilePaths[$nextProcessIndex]}
+ local scriptFileName=$(basename "$scriptFilePath")
+
+ local xunitTestResult
+ case $testScriptExitCode in
+ 0)
+ let countPassedTests++
+ xunitTestResult='Pass'
+ if ((verbose == 1 || runFailingTestsOnly == 1)); then
+ echo "PASSED - $scriptFilePath"
+ else
+ echo " - $scriptFilePath"
+ fi
+ ;;
+ 2)
+ let countSkippedTests++
+ xunitTestResult='Skip'
+ echo "SKIPPED - $scriptFilePath"
+ ;;
+ *)
+ let countFailedTests++
+ xunitTestResult='Fail'
+ echo "FAILED - $scriptFilePath"
+ ;;
+ esac
+ let countTotalTests++
+
+ if ((verbose == 1 || testScriptExitCode != 0)); then
+ while IFS='' read -r line || [ -n "$line" ]; do
+ echo " $line"
+ done <"$outputFilePath"
+ fi
+
+ xunit_output_add_test "$scriptFilePath" "$outputFilePath" "$xunitTestResult" "$testScriptExitCode"
+}
+
+function finish_remaining_tests {
+ # Finish the remaining tests in the order in which they were started
+ if ((nextProcessIndex >= processCount)); then
+ ((nextProcessIndex = 0))
+ fi
+ while ((processCount > 0)); do
+ finish_test
+ ((nextProcessIndex = (nextProcessIndex + 1) % maxProcesses))
+ done
+ ((nextProcessIndex = 0))
+}
+
+function start_test {
+ local scriptFilePath=$1
+
+ if ((runFailingTestsOnly == 1)) && ! is_failing_test "$scriptFilePath"; then
+ return
+ fi
+
+ if ((nextProcessIndex < processCount)); then
+ finish_test
+ fi
+
+ scriptFilePaths[$nextProcessIndex]=$scriptFilePath
+ local scriptFileName=$(basename "$scriptFilePath")
+ local outputFilePath=$(dirname "$scriptFilePath")/${scriptFileName}.out
+ outputFilePaths[$nextProcessIndex]=$outputFilePath
+
+ test "$verbose" == 1 && echo "Starting $scriptFilePath"
+ if is_unsupported_test "$scriptFilePath"; then
+ skip_unsupported_test "$scriptFilePath" "$outputFilePath" &
+ elif ((runFailingTestsOnly == 0)) && is_failing_test "$scriptFilePath"; then
+ skip_failing_test "$scriptFilePath" "$outputFilePath" &
+ else
+ run_test "$scriptFilePath" "$outputFilePath" &
+ fi
+ processIds[$nextProcessIndex]=$!
+
+ ((nextProcessIndex = (nextProcessIndex + 1) % maxProcesses))
+ ((++processCount))
+}
+
# Get a list of directories in which to scan for tests by reading the
# specified file line by line.
function set_test_directories {
- listFileName=$1
+ local errorSource='set_test_directories'
+
+ local listFileName=$1
if [ ! -f "$listFileName" ]
then
- echo "Test directories file not found at $listFileName"
- echo "Exiting..."
- exit 1
+ exit_with_error "$errorSource" "Test directories file not found at $listFileName"
fi
readarray testDirectories < "$listFileName"
}
function run_tests_in_directory {
- rootDir=$1
+ local testDir=$1
# Recursively search through directories for .sh files to run.
- for file in $(find "$rootDir" -name '*.sh' -printf '%P\n')
+ for scriptFilePath in $(find "$testDir" -type f -iname '*.sh' | sort)
do
- scriptFullPath="$rootDir/$file"
-
- # Switch to directory where the script is
- cd "$(dirname "$scriptFullPath")"
-
- # Convert DOS line endings to Unix if needed
- sed -i 's/\r$//' "$scriptFullPath"
-
- scriptName=$(basename "$file")
- test "$verbose" == 1 && echo "Starting $scriptName"
-
- # Run the test
- ./"$scriptName" |
- while testOutput= read -r line
- do
- # Print the test output if verbose mode is on
- test "$verbose" == 1 && echo " $line"
- done;
-
- testScriptExitCode=${PIPESTATUS[0]}
- case $testScriptExitCode in
- 0)
- let countPassedTests++
- echo "PASSED - $scriptFullPath"
- ;;
- 1)
- let countFailedTests++
- echo "FAILED - $scriptFullPath"
- ;;
- 2)
- let countSkippedTests++
- echo "SKIPPED - $scriptFullPath"
- ;;
- esac
-
- let countTotalTests++
-
- # Return to root directory
- cd "$rootDir"
+ start_test "${scriptFilePath:2}"
done
}
-# Initialize counters for bookkeeping.
-countTotalTests=0
-countPassedTests=0
-countFailedTests=0
-countSkippedTests=0
-
-currDir=`pwd`
+# Argument variables
+testRootDir=
+testNativeBinDir=
+coreOverlayDir=
+coreClrBinDir=
+mscorlibDir=
+coreFxBinDir=
+coreFxNativeBinDir=
# Handle arguments
verbose=0
for i in "$@"
do
case $i in
- -h|--help)
- print_usage
- exit 0;
- ;;
- -v|--verbose)
- verbose=1
- ;;
- --testDirFile=*)
- set_test_directories "${i#*=}"
- ;;
- *);;
+ -h|--help)
+ print_usage
+ exit 0
+ ;;
+ -v|--verbose)
+ verbose=1
+ ;;
+ --testRootDir=*)
+ testRootDir=${i#*=}
+ ;;
+ --testNativeBinDir=*)
+ testNativeBinDir=${i#*=}
+ ;;
+ --coreOverlayDir=*)
+ coreOverlayDir=${i#*=}
+ ;;
+ --coreClrBinDir=*)
+ coreClrBinDir=${i#*=}
+ ;;
+ --mscorlibDir=*)
+ mscorlibDir=${i#*=}
+ ;;
+ --coreFxBinDir=*)
+ coreFxBinDir=${i#*=}
+ ;;
+ --coreFxNativeBinDir=*)
+ coreFxNativeBinDir=${i#*=}
+ ;;
+ --testDir=*)
+ testDirectories[${#testDirectories[@]}]=${i#*=}
+ ;;
+ --testDirFile=*)
+ set_test_directories "${i#*=}"
+ ;;
+ --runFailingTestsOnly)
+ ((runFailingTestsOnly = 1))
+ ;;
+ --sequential)
+ ((maxProcesses = 1))
+ ;;
+ *)
+ echo "Unknown switch: $i"
+ print_usage
+ exit 0
+ ;;
esac
done
+if [ -z "$testRootDir" ]; then
+ echo "--testRootDir is required."
+ print_usage
+ exit 1
+fi
+if [ ! -d "$testRootDir" ]; then
+ echo "Directory specified by --testRootDir does not exist: $testRootDir"
+ exit 1
+fi
+cd "$testRootDir"
+
+xunit_output_begin
+create_core_overlay
+copy_test_native_bin_to_test_root
+load_unsupported_tests
+load_failing_tests
+
if [ -z "$testDirectories" ]
then
# No test directories were specified, so run everything in the current
# directory and its subdirectories.
- run_tests_in_directory "$currDir"
+ run_tests_in_directory "."
else
# Otherwise, run all the tests in each specified test directory.
for testDir in "${testDirectories[@]}"
do
- run_tests_in_directory $currDir/$testDir
+ if [ ! -d "$testDir" ]; then
+ echo "Test directory does not exist: $testDir"
+ else
+ run_tests_in_directory "./$testDir"
+ fi
done
fi
+finish_remaining_tests
print_results
+xunit_output_end
exit 0
diff --git a/tests/src/CLRTest.Execute.Bash.targets b/tests/src/CLRTest.Execute.Bash.targets
index 72d3619e69..7e91d5618e 100644
--- a/tests/src/CLRTest.Execute.Bash.targets
+++ b/tests/src/CLRTest.Execute.Bash.targets
@@ -101,7 +101,7 @@ fi
<_CLRTestRunFile Condition="'$(_CLRTestNeedsProjectToRun)' != 'True'">"$(AssemblyName).exe"</_CLRTestRunFile>
<!-- TODO: make this better? -->
- <_CLRTestRunFile Condition=" '$(CLRTestIsHosted)'=='true' And !$(_CLRTestNeedsProjectToRun) ">"$Core_Root/corerun" $(_CLRTestRunFile)</_CLRTestRunFile>
+ <_CLRTestRunFile Condition=" '$(CLRTestIsHosted)'=='true' And !$(_CLRTestNeedsProjectToRun) ">"$CORE_ROOT/corerun" $(_CLRTestRunFile)</_CLRTestRunFile>
<BashCLRTestLaunchCmds Condition=" '$(BashCLRTestLaunchCmds)'=='' "><![CDATA[
echo $(_CLRTestRunFile) $CLRTestExecutionArguments $Host_Args
@@ -129,22 +129,6 @@ CLRTestExitCode=$?
<Output TaskParameter="ParamList" PropertyName="_CLRTestParamList"/>
</GenerateParamList>
- <!-- If a test has precommands or postcommands but no bash-specific ones,
- we will skip running that test. In order to enable the test on Unix,
- the corresponding bash versions (_BashCLRTest[Pre|Post]Commands) of
- the commands should be specified. -->
- <PropertyGroup>
- <ShouldSkipTest>false</ShouldSkipTest>
- <ShouldSkipTest Condition="('$(_CLRTestPreCommands)' != '' AND '$(_BashCLRTestPreCommands)' == '')
- OR ('$(_CLRTestPostCommands)' != '' AND '$(_BashCLRTestPostCommands)' == '')"
- >true</ShouldSkipTest>
-
- <SkipTest>
-echo "Skipping this test due to presence of pre- or post-commands that are not bash-specific."
-exit 2 # Exit code indicating skip
- </SkipTest>
- </PropertyGroup>
-
<PropertyGroup>
<!--
This generates the script portion to parse all of the command line arguments.
@@ -183,14 +167,8 @@ done
$(BashCLRTestArgPrep)
]]></BashCLRTestArgPrep>
- <_CLRTestExecutionScriptText Condition="$(ShouldSkipTest)">
- <![CDATA[
-$(SkipTest)
- ]]>
- </_CLRTestExecutionScriptText>
-
<!-- NOTE! semicolons must be escaped with %3B boooo -->
- <_CLRTestExecutionScriptText Condition="!$(ShouldSkipTest)">
+ <_CLRTestExecutionScriptText>
<![CDATA[
# The __TestEnv variable may be used to specify something to run before the test.
$__TestEnv
diff --git a/tests/src/CLRTest.Execute.Batch.targets b/tests/src/CLRTest.Execute.Batch.targets
index d32feb2f8e..06e9524b95 100644
--- a/tests/src/CLRTest.Execute.Batch.targets
+++ b/tests/src/CLRTest.Execute.Batch.targets
@@ -103,7 +103,7 @@ IF NOT "%CLRTestExitCode%"=="%CLRTestExpectedExitCode%" (
<_CLRTestRunFile Condition="'$(_CLRTestNeedsProjectToRun)' != 'True'">"$(AssemblyName).exe"</_CLRTestRunFile>
<!-- TODO: make this better? -->
- <_CLRTestRunFile Condition=" '$(CLRTestIsHosted)'=='true' And !$(_CLRTestNeedsProjectToRun) ">"%Core_Root%\corerun.exe" $(_CLRTestRunFile)</_CLRTestRunFile>
+ <_CLRTestRunFile Condition=" '$(CLRTestIsHosted)'=='true' And !$(_CLRTestNeedsProjectToRun) ">"%CORE_ROOT%\corerun.exe" $(_CLRTestRunFile)</_CLRTestRunFile>
<BatchCLRTestLaunchCmds Condition=" '$(BatchCLRTestLaunchCmds)'=='' "><![CDATA[
ECHO $(_CLRTestRunFile) %CLRTestExecutionArguments% %Host_Args%
diff --git a/tests/src/managed/Compilation/Compilation.cs b/tests/src/managed/Compilation/Compilation.cs
index 0a0037af29..9b3ebcdd90 100644
--- a/tests/src/managed/Compilation/Compilation.cs
+++ b/tests/src/managed/Compilation/Compilation.cs
@@ -14,11 +14,11 @@ class Program
static int Main(string[] args)
{
Console.WriteLine("Starting the test");
- string codeFile = @"helloWorld.cs";
+ string codeFile = @"HelloWorld.cs";
var sourceTree = new List<SyntaxTree>(){SyntaxFactory.ParseSyntaxTree(File.ReadAllText(codeFile))};
- string mscorlibFile = Path.Combine(Environment.GetEnvironmentVariable("Core_root"), "mscorlib.dll");
+ string mscorlibFile = Path.Combine(Environment.GetEnvironmentVariable("CORE_ROOT"), "mscorlib.dll");
Console.WriteLine("Using reference to: {0}", mscorlibFile);
var reference = new List<MetadataReference>(){ MetadataReference.CreateFromFile(mscorlibFile)};
diff --git a/tests/testsFailingOutsideWindows.txt b/tests/testsFailingOutsideWindows.txt
new file mode 100644
index 0000000000..ba354dc807
--- /dev/null
+++ b/tests/testsFailingOutsideWindows.txt
@@ -0,0 +1,76 @@
+Interop/ICastable/Castable.sh
+Interop/ReversePInvoke/Marshalling/MarshalBoolArray.sh
+JIT/Directed/lifetime/lifetime2.sh
+JIT/Directed/newarr/newarr.sh
+JIT/Directed/PREFIX/unaligned/1/arglist.sh
+JIT/Directed/PREFIX/unaligned/2/arglist.sh
+JIT/Directed/PREFIX/unaligned/4/arglist.sh
+JIT/Directed/PREFIX/volatile/1/arglist.sh
+JIT/Directed/TypedReference/TypedReference.sh
+JIT/Methodical/ELEMENT_TYPE_IU/_il_dbgi_ref.sh
+JIT/Methodical/ELEMENT_TYPE_IU/_il_dbgu_ref.sh
+JIT/Methodical/ELEMENT_TYPE_IU/_il_reli_ref.sh
+JIT/Methodical/ELEMENT_TYPE_IU/_il_relu_ref.sh
+JIT/Methodical/refany/array1.sh
+JIT/Methodical/refany/array2.sh
+JIT/Methodical/refany/format.sh
+JIT/Methodical/refany/gcreport.sh
+JIT/Methodical/refany/_il_dbgarray1.sh
+JIT/Methodical/refany/_il_dbgarray2.sh
+JIT/Methodical/refany/_il_dbgarray3.sh
+JIT/Methodical/refany/_il_dbgnative.sh
+JIT/Methodical/refany/_il_dbgseq.sh
+JIT/Methodical/refany/_il_dbgu_native.sh
+JIT/Methodical/refany/_il_relarray1.sh
+JIT/Methodical/refany/_il_relarray2.sh
+JIT/Methodical/refany/_il_relarray3.sh
+JIT/Methodical/refany/_il_relnative.sh
+JIT/Methodical/refany/_il_relseq.sh
+JIT/Methodical/refany/_il_relu_native.sh
+JIT/Methodical/refany/lcs.sh
+JIT/Methodical/refany/native.sh
+JIT/Methodical/refany/virtcall.sh
+JIT/Methodical/tailcall_v4/tailcall_AV.sh
+JIT/Methodical/varargs/callconv/gc_ctor_il_d.sh
+JIT/Methodical/varargs/callconv/gc_ctor_il_r.sh
+JIT/Methodical/varargs/callconv/val_ctor_il_d.sh
+JIT/Methodical/varargs/callconv/val_ctor_il_r.sh
+JIT/Methodical/varargs/misc/Dev10_615402.sh
+JIT/Methodical/VT/etc/gc_nested.sh
+JIT/Methodical/VT/etc/nested.sh
+JIT/Methodical/xxobj/ldobj/_il_dbgldobj_I8.sh
+JIT/Methodical/xxobj/ldobj/_il_dbgldobj_I.sh
+JIT/Methodical/xxobj/ldobj/_il_dbgldobj_R4.sh
+JIT/Methodical/xxobj/ldobj/_il_dbgldobj_R8.sh
+JIT/Methodical/xxobj/ldobj/_il_dbgldobj_U2.sh
+JIT/Methodical/xxobj/ldobj/_il_dbgldobj_V.sh
+JIT/Methodical/xxobj/ldobj/_il_relldobj_I8.sh
+JIT/Methodical/xxobj/ldobj/_il_relldobj_I.sh
+JIT/Methodical/xxobj/ldobj/_il_relldobj_R4.sh
+JIT/Methodical/xxobj/ldobj/_il_relldobj_R8.sh
+JIT/Methodical/xxobj/ldobj/_il_relldobj_U2.sh
+JIT/Methodical/xxobj/ldobj/_il_relldobj_V.sh
+JIT/Methodical/xxobj/operand/_il_dbgrefanyval.sh
+JIT/Methodical/xxobj/operand/_il_relrefanyval.sh
+JIT/Methodical/xxobj/operand/refanyval.sh
+JIT/Regression/CLR-x86-EJIT/V1-M12-Beta2/b26323/b26323.sh
+JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b16423/b16423.sh
+JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b28901/b28901.sh
+JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b29583/b29583.sh
+JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b30838/b30838.sh
+JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b30864/b30864.sh
+JIT/Regression/CLR-x86-JIT/V1-M11-Beta1/b35784/b35784.sh
+JIT/Regression/CLR-x86-JIT/V1-M11-Beta1/b36472/b36472.sh
+JIT/Regression/CLR-x86-JIT/V1-M11-Beta1/b37598/b37598.sh
+JIT/Regression/CLR-x86-JIT/V1-M11-Beta1/b41391/b41391.sh
+JIT/Regression/CLR-x86-JIT/V1-M11-Beta1/b41621/b41621.sh
+JIT/Regression/CLR-x86-JIT/V1-M11-Beta1/b46867/b46867.sh
+JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b31745/b31745.sh
+JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b31746/b31746.sh
+JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b37646/b37646.sh
+JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b41852/b41852.sh
+JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b51575/b51575.sh
+JIT/Regression/CLR-x86-JIT/V1-M13-RTM/b88793/b88793.sh
+JIT/Regression/CLR-x86-JIT/V1-M13-RTM/b91248/b91248.sh
+JIT/Regression/CLR-x86-JIT/V2.0-Beta2/b409748/b409748.sh
+Regressions/expl_double/expl_double_1.sh
diff --git a/tests/testsUnsupportedOutsideWindows.txt b/tests/testsUnsupportedOutsideWindows.txt
new file mode 100644
index 0000000000..dd0509ede1
--- /dev/null
+++ b/tests/testsUnsupportedOutsideWindows.txt
@@ -0,0 +1,11 @@
+Interop/NativeCallable/NativeCallableTest.sh
+JIT/Directed/coverage/oldtests/callipinvoke.sh
+JIT/Directed/coverage/oldtests/Desktop/callipinvoke_il_d.sh
+JIT/Directed/coverage/oldtests/Desktop/callipinvoke_il_r.sh
+JIT/Directed/IL/PInvokeTail/TailWinApi.sh
+JIT/Directed/pinvoke/jump.sh
+JIT/Directed/pinvoke/sysinfo_il.sh
+JIT/Regression/clr-x64-JIT/v2.1/b173569/b173569.sh
+managed/Compilation/Compilation.sh
+readytorun/mainv1.sh
+readytorun/mainv2.sh