summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruce Forstall <Bruce_Forstall@msn.com>2018-08-22 13:55:53 -0700
committerBruce Forstall <Bruce_Forstall@msn.com>2018-09-19 11:34:28 -0700
commit707e84e98920546264db5af4252509a3a54f6203 (patch)
tree13cdbc5268758fbb063f1c481c028062c1be81f8
parent541f710095b6c10f2e650ad68e3f5961aa467d02 (diff)
downloadcoreclr-707e84e98920546264db5af4252509a3a54f6203.tar.gz
coreclr-707e84e98920546264db5af4252509a3a54f6203.tar.bz2
coreclr-707e84e98920546264db5af4252509a3a54f6203.zip
Enable arm64 Linux testing in CI
Testing is enabled on a set of Qualcomm Centriq arm64 servers running Ubuntu 16.04. The set of jobs enabled almost matches the set run for arm32 Linux testing, including innerloop, JIT and GC Stress, corefx, and R2R. Temporarily, the innerloop jobs are commit jobs (invoked when a PR is merged) instead of "default trigger" jobs (invoked when a PR is submitted), until we get more experience with the robustness of the machines and jobs. The machines are fast enough that they are not marked as "limited hardware" (like arm32 Linux machines). That means that many jobs are run daily, not weekly, as periodic jobs. Notes about the changes: 1. The Linux arm64 machines are managed by Helix, which allocates them to Jenkins. 2. The arm64 OS used has been renamed from "small_page_size" to "Ubuntu16.04". If we add large page size machines, we'll need to add a differentiator. 3. The Jenkins "copy artifacts" plug-in runs ridiculously slowly on this hardware, for unknown reasons, so we copy artifacts directly using "wget". 4. Tests are built using "build-test.sh" on the (cross) build machine; we don't use Windows-built tests. 5. Added Jenkins archiving of build .log/.wrn/.err files. 6. Various tests were disabled in issues.targets, and with a new arm64/corefx_linux_test_exclusions.txt file, to get jobs to run clean. (Several issues have been opened to track these and other known failures.)
-rwxr-xr-xbuild-test.sh5
-rwxr-xr-xnetci.groovy479
-rw-r--r--tests/arm64/corefx_linux_test_exclusions.txt13
-rw-r--r--tests/issues.targets15
-rw-r--r--tests/runtest.proj6
-rwxr-xr-xtests/runtest.py172
-rw-r--r--tests/tests.targets3
7 files changed, 386 insertions, 307 deletions
diff --git a/build-test.sh b/build-test.sh
index 1652f989d8..8ae1e4cbe9 100755
--- a/build-test.sh
+++ b/build-test.sh
@@ -476,8 +476,9 @@ build_native_projects()
pushd "$intermediatesForBuild"
# Regenerate the CMake solution
- echo "Invoking \"$__ProjectRoot/src/pal/tools/gen-buildsys-clang.sh\" \"$__TestDir\" $__ClangMajorVersion $__ClangMinorVersion $platformArch $__BuildType $__CodeCoverage $generator $extraCmakeArguments $__cmakeargs"
- "$__ProjectRoot/src/pal/tools/gen-buildsys-clang.sh" "$__TestDir" $__ClangMajorVersion $__ClangMinorVersion $platformArch $__BuildType $__CodeCoverage $generator "$extraCmakeArguments" "$__cmakeargs"
+ # Force cross dir to point to project root cross dir, in case there is a cross build.
+ echo "Invoking CONFIG_DIR=\"$__ProjectRoot/cross\" \"$__ProjectRoot/src/pal/tools/gen-buildsys-clang.sh\" \"$__TestDir\" $__ClangMajorVersion $__ClangMinorVersion $platformArch $__BuildType $__CodeCoverage $generator $extraCmakeArguments $__cmakeargs"
+ CONFIG_DIR="$__ProjectRoot/cross" "$__ProjectRoot/src/pal/tools/gen-buildsys-clang.sh" "$__TestDir" $__ClangMajorVersion $__ClangMinorVersion $platformArch $__BuildType $__CodeCoverage $generator "$extraCmakeArguments" "$__cmakeargs"
popd
fi
diff --git a/netci.groovy b/netci.groovy
index f9088dc819..5a854c4ed8 100755
--- a/netci.groovy
+++ b/netci.groovy
@@ -17,16 +17,16 @@ folder('illink')
Utilities.addStandardFolderView(this, 'illink', project)
def static getOSGroup(def os) {
- def osGroupMap = ['Ubuntu':'Linux',
- 'RHEL7.2': 'Linux',
- 'Ubuntu16.04': 'Linux',
- 'Ubuntu16.10': 'Linux',
- 'Debian8.4':'Linux',
- 'Fedora24':'Linux',
- 'OSX10.12':'OSX',
- 'Windows_NT':'Windows_NT',
- 'CentOS7.1': 'Linux',
- 'Tizen': 'Linux']
+ def osGroupMap = ['Ubuntu' : 'Linux',
+ 'Ubuntu16.04' : 'Linux',
+ 'Ubuntu16.10' : 'Linux',
+ 'RHEL7.2' : 'Linux',
+ 'Debian8.4' : 'Linux',
+ 'Fedora24' : 'Linux',
+ 'CentOS7.1' : 'Linux',
+ 'Tizen' : 'Linux',
+ 'OSX10.12' : 'OSX',
+ 'Windows_NT' : 'Windows_NT']
def osGroup = osGroupMap.get(os, null)
assert osGroup != null : "Could not find os group for ${os}"
return osGroupMap[os]
@@ -212,13 +212,15 @@ class Constants {
'x64': [
'Checked'
],
- 'arm64': [
- 'Debug'
- ],
'arm': [
'Checked'
]
],
+ 'Ubuntu16.04': [
+ 'arm64': [
+ 'Checked'
+ ]
+ ],
'CentOS7.1': [
'x64': [
'Debug',
@@ -474,8 +476,11 @@ class Constants {
'r2r_jitminopts',
'r2r_jitforcerelocs',
'r2r_gcstress15',
+ 'r2r_no_tiered_compilation',
'minopts',
'tieredcompilation',
+ 'no_tiered_compilation',
+ 'no_tiered_compilation_innerloop',
'forcerelocs',
'jitstress1',
'jitstress2',
@@ -712,15 +717,19 @@ def static setMachineAffinity(def job, def os, def architecture, def options = n
// |-> os == "Ubuntu" && (architecture == "arm") && options['is_flow_job'] == true
// Arm32 hardware (Build) -> Ubuntu 16.04 latest-or-auto
// |-> os == "Ubuntu" && (architecture == "arm") && options['is_build_job'] == true
- // Arm32 hardware (Test) -> ubuntu.1404.arm32.open
+ // Arm32 hardware (Test) -> Helix ubuntu.1404.arm32.open queue
// |-> os == "Ubuntu" && (architecture == "arm")
//
// Arm64 (Build) -> arm64-cross-latest
- // |-> os != "Windows_NT" && architecture == "arm64" && options['is_build_only'] == true
- // Arm64 Small Page Size (Test) -> arm64-small-page-size
- // |-> os != "Windows_NT" && architecture == "arm64" && options['large_pages'] == false
- // Arm64 Large Page Size (Test) -> arm64-huge-page-size
- // |-> os != "Windows_NT" && architecture == "arm64" && options['large_pages'] == true
+ // |-> os != "Windows_NT" && architecture == "arm64" && options['is_build_job'] == true
+ // Arm64 (Test) -> Helix Ubuntu.1604.Arm64.Open queue
+ // |-> os != "Windows_NT" && architecture == "arm64"
+ //
+ // Note: we are no longer using Jenkins tags "arm64-huge-page-size", "arm64-small-page-size".
+ // Support for Linux arm64 large page size has been removed for now, as it wasn't being used.
+ //
+ // Note: we are no longer using Jenkins tag 'latest-arm64' for arm/arm64 Windows build machines. Instead,
+ // we are using public VS2017 arm/arm64 tools in a VM from Helix.
// This has to be a arm arch
assert architecture in armArches
@@ -729,10 +738,6 @@ def static setMachineAffinity(def job, def os, def architecture, def options = n
def isBuild = options['use_arm64_build_machine'] == true
if (isBuild == true) {
- // Current set of machines with private Windows arm64 toolset:
- // Utilities.setMachineAffinity(job, os, 'latest-arm64')
- //
- // New set of machines with public Windows arm64 toolset, coming from Helix:
job.with {
label('Windows.10.Amd64.ClientRS4.DevEx.Open')
}
@@ -742,48 +747,34 @@ def static setMachineAffinity(def job, def os, def architecture, def options = n
} else {
assert os != 'Windows_NT'
- if (architecture == 'arm64') {
- assert os == 'Ubuntu'
- def isFlow = (options != null) && (options['is_flow_job'] == true)
- def isBuild = (options != null) && (options['is_build_job'] == true)
- if (isFlow || isBuild) {
- // Arm64 Ubuntu build machine. Build uses docker, so the actual host OS is not
- // very important. Therefore, use latest or auto. Flow jobs don't need to use
- // Arm64 hardware.
- Utilities.setMachineAffinity(job, 'Ubuntu16.04', 'latest-or-auto')
- } else {
- // Arm64 Linux test machines
- if ((options != null) && (options['large_pages'] == true)) {
- Utilities.setMachineAffinity(job, os, 'arm64-huge-page-size')
- } else {
- Utilities.setMachineAffinity(job, os, 'arm64-small-page-size')
- }
- }
- }
- else if (architecture == 'armem') {
+ if (architecture == 'armem') {
// arm emulator (Tizen). Build and test on same machine,
// using Docker.
assert os == 'Tizen'
Utilities.setMachineAffinity(job, 'Ubuntu', 'arm-cross-latest')
}
else {
- // arm Ubuntu on hardware.
- assert architecture == 'arm'
- assert os == 'Ubuntu'
+ // arm/arm64 Ubuntu on hardware.
+ assert architecture == 'arm' || architecture == 'arm64'
def isFlow = (options != null) && (options['is_flow_job'] == true)
def isBuild = (options != null) && (options['is_build_job'] == true)
if (isFlow || isBuild) {
- // arm Ubuntu build machine. Build uses docker, so the actual host OS is not
- // very important. Therefore, use latest or auto. Flow jobs don't need to use
- // arm hardware.
+ // arm/arm64 Ubuntu build machine. Build uses docker, so the actual host OS is not
+ // very important. Therefore, use latest or auto. Flow jobs don't need to use arm hardware.
Utilities.setMachineAffinity(job, 'Ubuntu16.04', 'latest-or-auto')
} else {
- // arm Ubuntu test machine
- // There is no tag (like, e.g., "arm-latest") for this, so don't call
- // Utilities.setMachineAffinity. Just add the machine affinity
- // manually. We specify the Helix queue name here.
- job.with {
- label('ubuntu.1404.arm32.open')
+ // arm/arm64 Ubuntu test machine. Specify the Helix queue name here.
+ if (architecture == 'arm64') {
+ assert os == 'Ubuntu16.04'
+ job.with {
+ label('Ubuntu.1604.Arm64.Open')
+ }
+ }
+ else {
+ assert os == 'Ubuntu'
+ job.with {
+ label('ubuntu.1404.arm32.open')
+ }
}
}
}
@@ -814,16 +805,7 @@ def static setJobMachineAffinity(def architecture, def os, def isBuildJob, def i
}
}
else {
- if (architecture == 'arm64') {
- if (isBuildJob) {
- affinityOptions = ['is_build_job': true]
- } else if (isFlowJob) {
- affinityOptions = ['is_flow_job': true]
- } else if (isTestJob) {
- affinityOptions = [ "large_pages" : false ]
- }
- }
- else if (architecture == 'arm') {
+ if ((architecture == 'arm64') || (architecture == 'arm')) {
if (isBuildJob) {
affinityOptions = ['is_build_job': true]
} else if (isFlowJob) {
@@ -1185,7 +1167,7 @@ def static isNeedDocker(def architecture, def os, def isBuild) {
}
}
else if (architecture == 'arm64') {
- if (os == 'Ubuntu') {
+ if (os == 'Ubuntu16.04') {
return true
}
}
@@ -1215,7 +1197,7 @@ def static getDockerImageName(def architecture, def os, def isBuild) {
}
}
else if (architecture == 'arm64') {
- if (os == 'Ubuntu') {
+ if (os == 'Ubuntu16.04') {
return "microsoft/dotnet-buildtools-prereqs:ubuntu-16.04-cross-arm64-a3ae44b-20180315221921"
}
}
@@ -1233,17 +1215,19 @@ def static getDockerImageName(def architecture, def os, def isBuild) {
// We have a limited amount of some hardware. For these, scale back the periodic testing we do,
// and only allowing using this hardware in some specific branches.
def static jobRequiresLimitedHardware(def architecture, def os) {
- if (((architecture == 'arm64') || (architecture == 'arm')) && (os == 'Windows_NT')) {
- // These test jobs require ARM64 hardware
- return true
- }
- else if ((architecture == 'arm') && (os == 'Ubuntu')) {
- // These test jobs require Linux/arm32 hardware
+ if (architecture == 'arm') {
+ // arm Windows and Linux hardware is limited.
return true
}
- else if ((architecture == 'arm64') && (os == 'Ubuntu')) {
- // These test jobs require Linux/arm64 hardware
- return true
+ else if (architecture == 'arm64') {
+ if (os == 'Windows_NT') {
+ // arm64 Windows hardware is limited.
+ return true
+ }
+ else {
+ // arm64 Linux hardware is fast enough to allow more frequent jobs
+ return false
+ }
}
else {
return false
@@ -1276,22 +1260,14 @@ def static getJobName(def configuration, def architecture, def os, def scenario,
baseName = architecture.toLowerCase() + '_' + configuration.toLowerCase() + '_' + os.toLowerCase()
}
break
- case 'arm64':
- if (os.toLowerCase() == "windows_nt") {
- // These are cross builds
- baseName = architecture.toLowerCase() + '_cross_' + configuration.toLowerCase() + '_' + os.toLowerCase()
- }
- else {
- // Defaults to a small page size set of machines.
- baseName = architecture.toLowerCase() + '_' + configuration.toLowerCase() + '_' + "small_page_size"
- }
- break
case 'armem':
// These are cross builds
assert os == 'Tizen'
baseName = 'armel_cross_' + configuration.toLowerCase() + '_' + os.toLowerCase()
break
case 'arm':
+ case 'arm64':
+ // These are cross builds
baseName = architecture.toLowerCase() + '_cross_' + configuration.toLowerCase() + '_' + os.toLowerCase()
break
case 'x86':
@@ -1316,11 +1292,6 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
return
}
- // No arm64 Ubuntu cron jobs for now: we don't have enough hardware.
- if ((architecture == 'arm64') && (os != 'Windows_NT')) {
- return
- }
-
// Ubuntu x86 CI jobs are failing. Disable non-PR triggered jobs to avoid these constant failures
// until this is fixed. Tracked by https://github.com/dotnet/coreclr/issues/19003.
if (architecture == 'x86' && os == 'Ubuntu') {
@@ -1331,6 +1302,13 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
switch (scenario) {
case 'innerloop':
case 'no_tiered_compilation_innerloop':
+ // TEMPORARY: make arm64 Linux innerloop jobs push jobs, not default triggered jobs, until we have experience
+ // with the machines running these jobs (and the jobs themselves), to understand how robust they are.
+ // We should never get here (in the "innerloop cases) for non-PR jobs, except for this TEMPORARY exception.
+ assert (isInnerloopTestScenario(scenario) && (architecture == 'arm64') && (os == 'Ubuntu16.04') && (configuration == 'Checked'))
+ addGithubPushTriggerHelper(job)
+ break
+
case 'crossgen_comparison':
break
case 'normal':
@@ -1340,7 +1318,7 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
if (isFlowJob && architecture == 'x86' && os == 'Ubuntu') {
addPeriodicTriggerHelper(job, '@daily')
}
- else if (isFlowJob || os == 'Windows_NT' || !(os in Constants.crossList)) {
+ else if (isFlowJob || os == 'Windows_NT' || (architecture == 'x64' && !(os in Constants.crossList))) {
addGithubPushTriggerHelper(job)
}
break
@@ -1397,7 +1375,6 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
assert !(os in bidailyCrossList)
// r2r gets a push trigger for checked/release
if (configuration == 'Checked' || configuration == 'Release') {
- assert (os == 'Windows_NT') || (os in Constants.crossList)
if (architecture == 'x64' && os != 'OSX10.12') {
//Flow jobs should be Windows, Ubuntu, OSX0.12, or CentOS
if (isFlowJob || os == 'Windows_NT') {
@@ -1415,21 +1392,18 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
addGithubPushTriggerHelper(job)
}
}
- // arm64 r2r jobs should only run weekly.
- // arm64 r2r jobs are only run on Windows (Q: should they run on non-Windows?)
- else if (architecture == 'arm64') {
- if (os == 'Windows_NT') {
- if (isFlowJob) {
- addPeriodicTriggerHelper(job, '@weekly')
- }
- }
- }
// arm r2r jobs should only run weekly.
else if (architecture == 'arm') {
if (isFlowJob) {
addPeriodicTriggerHelper(job, '@weekly')
}
}
+ // arm64 r2r jobs should only run weekly.
+ else if (architecture == 'arm64') {
+ if (isFlowJob) {
+ addPeriodicTriggerHelper(job, '@weekly')
+ }
+ }
}
break
case 'r2r_jitstress1':
@@ -1455,13 +1429,10 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
break
}
- // GC Stress 15 r2r gets a push trigger for checked/release
if (configuration == 'Checked' || configuration == 'Release') {
- assert (os == 'Windows_NT') || (os in Constants.crossList)
if (architecture == 'x64') {
//Flow jobs should be Windows, Ubuntu, OSX10.12, or CentOS
if (isFlowJob || os == 'Windows_NT') {
- // Add a weekly periodic trigger
addPeriodicTriggerHelper(job, 'H H * * 3,6') // some time every Wednesday and Saturday
}
}
@@ -1471,15 +1442,12 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
addPeriodicTriggerHelper(job, 'H H * * 3,6') // some time every Wednesday and Saturday
}
}
- // arm64 r2r jobs are only run on Windows (Q: should they run on non-Windows?)
- else if (architecture == 'arm64') {
- if (os == 'Windows_NT') {
- if (isFlowJob) {
- addPeriodicTriggerHelper(job, '@weekly')
- }
+ else if (architecture == 'arm') {
+ if (isFlowJob) {
+ addPeriodicTriggerHelper(job, '@weekly')
}
}
- else if (architecture == 'arm') {
+ else if (architecture == 'arm64') {
if (isFlowJob) {
addPeriodicTriggerHelper(job, '@weekly')
}
@@ -1518,8 +1486,6 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
assert !(os in bidailyCrossList)
// ILASM/ILDASM roundtrip one gets a daily build, and only for release
if (architecture == 'x64' && configuration == 'Release') {
- // We don't expect to see a job generated except in these scenarios
- assert (os == 'Windows_NT') || (os in Constants.crossList)
if (isFlowJob || os == 'Windows_NT') {
addPeriodicTriggerHelper(job, '@daily')
}
@@ -1598,7 +1564,6 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
if ((architecture == 'arm64') && isCoreFxScenario(scenario) && !isFlowJob) {
break
}
- assert (os == 'Windows_NT') || (os in Constants.crossList)
if (jobRequiresLimitedHardware(architecture, os)) {
addPeriodicTriggerHelper(job, '@weekly')
}
@@ -1614,11 +1579,6 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
if (os in bidailyCrossList) {
break
}
- if ((architecture == 'arm64') && (os != 'Windows_NT')) {
- // TODO: should we have cron jobs for arm64 Linux GCStress?
- break
- }
- assert (os == 'Windows_NT') || (os in Constants.crossList)
addPeriodicTriggerHelper(job, '@weekly')
break
case 'gcstress0xc':
@@ -1628,21 +1588,16 @@ def static addNonPRTriggers(def job, def branch, def isPR, def architecture, def
case 'gcstress0xc_jitstress1':
case 'gcstress0xc_jitstress2':
case 'gcstress0xc_minopts_heapverify1':
- if (os == 'CentOS7.1') {
- break
- }
if (os == 'OSX10.12') {
// GCStress=C is currently not supported on OS X
break
}
- if (os in bidailyCrossList) {
+ if (os == 'CentOS7.1') {
break
}
- if ((architecture == 'arm64') && (os != 'Windows_NT')) {
- // TODO: should we have cron jobs for arm64 Linux GCStress?
+ if (os in bidailyCrossList) {
break
}
- assert (os == 'Windows_NT') || (os in Constants.crossList)
addPeriodicTriggerHelper(job, '@weekly')
break
@@ -1954,34 +1909,26 @@ def static addTriggers(def job, def branch, def isPR, def architecture, def os,
switch (os) {
case 'Ubuntu':
- // TODO: make arm and arm64 Ubuntu more alike
-
- if (architecture == 'arm') {
- // Triggers on the non-flow jobs aren't necessary
- if (!isFlowJob) {
- needsTrigger = false
- break
- }
+ case 'Ubuntu16.04':
- switch (scenario) {
- case 'innerloop':
- case 'no_tiered_compilation_innerloop':
- if (configuration == 'Checked') {
- isDefaultTrigger = true
- }
- break
- }
+ // Triggers on the non-flow jobs aren't necessary
+ if (!isFlowJob) {
+ needsTrigger = false
+ break
}
- else {
- assert architecture == 'arm64'
- switch (scenario) {
- case 'innerloop':
- if (configuration == 'Debug' && !isFlowJob) {
- isDefaultTrigger = true
+ switch (scenario) {
+ case 'innerloop':
+ case 'no_tiered_compilation_innerloop':
+ if (configuration == 'Checked') {
+ // TEMPORARY: make arm64 Linux innerloop jobs push jobs, not default triggered jobs, until we have experience
+ // with the machines running these jobs (and the jobs themselves), to understand how robust they are.
+ if (architecture == 'arm64') {
+ break
}
- break
- }
+ isDefaultTrigger = true
+ }
+ break
}
break
@@ -2469,7 +2416,7 @@ def static calculateBuildCommands(def newJob, def scenario, def branch, def isPR
case 'arm64':
case 'arm':
// Non-Windows ARM cross builds on hardware run on Ubuntu only
- assert (os == 'Ubuntu')
+ assert (os == 'Ubuntu') || (os == 'Ubuntu16.04')
// Add some useful information to the log file. Ignore return codes.
buildCommands += "uname -a || true"
@@ -2534,7 +2481,7 @@ def static calculateBuildCommands(def newJob, def scenario, def branch, def isPR
buildCommands += "${dockerCmd}zip -r ${workspaceRelativeArtifactsArchive} ${workspaceRelativeCoreLib} ${workspaceRelativeCoreRootDir} ${workspaceRelativeCrossGenComparisonScript} ${workspaceRelativeResultsDir}"
Utilities.addArchival(newJob, "${workspaceRelativeArtifactsArchive}")
}
- else {
+ else if (architecture == 'arm') {
// Then, using the same docker image, generate the CORE_ROOT layout using build-test.sh to
// download the appropriate CoreFX packages.
// Note that docker should not be necessary here, for the "generatelayoutonly" case, but we use it
@@ -2554,6 +2501,31 @@ def static calculateBuildCommands(def newJob, def scenario, def branch, def isPR
Utilities.addArchival(newJob, "coreroot.${lowerConfiguration}.zip,testnativebin.${lowerConfiguration}.zip", "")
}
+ else {
+ assert architecture == 'arm64'
+
+ // Then, using the same docker image, build the tests and generate the CORE_ROOT layout.
+ // Linux/arm64 does not use Windows-built tests.
+
+ def testBuildOpts = ""
+ if (priority == '1') {
+ testBuildOpts = "priority1"
+ }
+
+ buildCommands += "${dockerCmd}\${WORKSPACE}/build-test.sh ${lowerConfiguration} ${architecture} cross ${testBuildOpts}"
+
+ // ZIP up the built tests (including CORE_ROOT and native test components copied to the CORE_ROOT) for the test job (created in the flow job code)
+ buildCommands += "zip -r tests.${lowerConfiguration}.zip ./bin/tests/Linux.${architecture}.${configuration}"
+
+ // We still use the testnativebin files until they get placed properly in the tests directory (next to their respective tests).
+ // With https://github.com/dotnet/coreclr/pull/19918 this shouldn't be needed anymore.
+ buildCommands += "zip -r testnativebin.${lowerConfiguration}.zip ./bin/obj/Linux.${architecture}.${configuration}/tests"
+
+ Utilities.addArchival(newJob, "tests.${lowerConfiguration}.zip,testnativebin.${lowerConfiguration}.zip", "")
+ }
+
+ // Archive the build logs from both product and test builds.
+ Utilities.addArchival(newJob, "bin/Logs/*.log,bin/Logs/*.wrn,bin/Logs/*.err", "")
// We need to clean up the build machines; the docker build leaves newly built files with root permission, which
// the cleanup task in Jenkins can't remove.
@@ -2585,15 +2557,22 @@ def static calculateBuildCommands(def newJob, def scenario, def branch, def isPR
// Returns true if the job should be generated.
def static shouldGenerateJob(def scenario, def isPR, def architecture, def configuration, def os, def isBuildOnly)
{
- // The "innerloop" (Pri-0 testing) scenario is only available as PR triggered.
- if (scenario == 'innerloop' && !isPR) {
- return false
- }
+ // The various "innerloop" jobs are only available as PR triggered.
- // Run basic corefx tests only on PR-triggered jobs
- // Runs under Release and Checked
- if (scenario == 'corefx_innerloop' && !isPR) {
- return false
+ if (!isPR) {
+ if (isInnerloopTestScenario(scenario) && (architecture == 'arm64') && (os == 'Ubuntu16.04') && (configuration == 'Checked')) {
+ // TEMPORARY: make arm64 Linux innerloop jobs push jobs, not default triggered jobs, until we have experience
+ // with the machines running these jobs (and the jobs themselves), to understand how robust they are.
+ return true
+ }
+
+ if (isInnerloopTestScenario(scenario)) {
+ return false
+ }
+
+ if (scenario == 'corefx_innerloop') {
+ return false
+ }
}
// Tizen is only supported for armem architecture
@@ -2604,12 +2583,16 @@ def static shouldGenerateJob(def scenario, def isPR, def architecture, def confi
// Filter based on architecture.
switch (architecture) {
- case 'arm64':
case 'arm':
if ((os != 'Windows_NT') && (os != 'Ubuntu')) {
return false
}
break
+ case 'arm64':
+ if ((os != 'Windows_NT') && (os != 'Ubuntu16.04')) {
+ return false
+ }
+ break
case 'armem':
if (os != 'Tizen') {
return false
@@ -2666,7 +2649,10 @@ def static shouldGenerateJob(def scenario, def isPR, def architecture, def confi
return false
}
- def isEnabledOS = (os == 'Windows_NT') || (os == 'Ubuntu' && (isCoreFxScenario(scenario) || architecture == 'arm' || architecture == 'arm64'))
+ def isEnabledOS = (os == 'Windows_NT') ||
+ (os == 'Ubuntu' && (architecture == 'x64') && isCoreFxScenario(scenario)) ||
+ (os == 'Ubuntu' && architecture == 'arm') ||
+ (os == 'Ubuntu16.04' && architecture == 'arm64')
if (!isEnabledOS) {
return false
}
@@ -2685,6 +2671,7 @@ def static shouldGenerateJob(def scenario, def isPR, def architecture, def confi
break
case 'arm':
+ case 'arm64':
// We use build only jobs for Windows arm/arm64 cross-compilation corefx testing, so we need to generate builds for that.
// No "regular" Windows arm corefx jobs, e.g.
// For Ubuntu arm corefx testing, we use regular jobs (not "build only" since only Windows has "build only", and
@@ -2701,21 +2688,7 @@ def static shouldGenerateJob(def scenario, def isPR, def architecture, def confi
}
break
- case 'arm64':
- if (os == 'Windows_NT') {
- if (! (isBuildOnly && isCoreFxScenario(scenario)) ) {
- return false
- }
- }
- else {
- if (!isCoreFxScenario(scenario)) {
- return false
- }
- }
- break
-
default:
- // arm64: stress is handled through flow jobs.
// armem: no stress jobs for ARM emulator.
return false
}
@@ -3150,7 +3123,12 @@ def static CreateWindowsArmTestJob(def dslFactory, def project, def architecture
// Returns the newly created job.
def static CreateOtherTestJob(def dslFactory, def project, def branch, def architecture, def os, def configuration, def scenario, def isPR, def inputCoreCLRBuildName, def inputTestsBuildName)
{
- def isUbuntuArmJob = (os == "Ubuntu") && ((architecture == 'arm') || (architecture == 'arm64')) // ARM Ubuntu running on hardware (not emulator)
+ def lowerConfiguration = configuration.toLowerCase()
+
+ def isUbuntuArm64Job = ((os == "Ubuntu16.04") && (architecture == 'arm64'))
+ def isUbuntuArm32Job = ((os == "Ubuntu") && (architecture == 'arm'))
+ def isUbuntuArmJob = isUbuntuArm32Job || isUbuntuArm64Job
+
def doCoreFxTesting = isCoreFxScenario(scenario)
def workspaceRelativeFxRootLinux = "_/fx" // only used for CoreFX testing
@@ -3289,11 +3267,16 @@ def static CreateOtherTestJob(def dslFactory, def project, def branch, def archi
// Coreclr build we are trying to test
//
// ** NOTE ** This will, correctly, overwrite the CORE_ROOT from the Windows test archive
+ //
+ // HACK: the Ubuntu arm64 copyArtifacts Jenkins plug-in is ridiculously slow (45 minutes to
+ // 1.5 hours for this step). Instead, directly use wget, which is fast (1 minute).
- copyArtifacts(inputCoreCLRBuildName) {
- excludePatterns('**/testResults.xml', '**/*.ni.dll')
- buildSelector {
- buildNumber('${CORECLR_BUILD}')
+ if (!isUbuntuArm64Job) {
+ copyArtifacts(inputCoreCLRBuildName) {
+ excludePatterns('**/testResults.xml', '**/*.ni.dll')
+ buildSelector {
+ buildNumber('${CORECLR_BUILD}')
+ }
}
}
@@ -3302,6 +3285,64 @@ def static CreateOtherTestJob(def dslFactory, def project, def branch, def archi
shell("uname -a || true")
}
+ if (isUbuntuArm64Job) {
+ // Copy the required artifacts directly, using wget, e.g.:
+ //
+ // https://ci.dot.net/job/dotnet_coreclr/job/master/job/arm64_cross_checked_ubuntu16.04_innerloop_prtest/16/artifact/testnativebin.checked.zip
+ // https://ci.dot.net/job/dotnet_coreclr/job/master/job/arm64_cross_checked_ubuntu16.04_innerloop_prtest/16/artifact/tests.checked.zip
+ //
+ // parameterized as:
+ //
+ // https://ci.dot.net/job/${mungedProjectName}/job/${mungedBranchName}/job/${inputJobName}/${CORECLR_BUILD}/artifact/testnativebin.checked.zip
+ // https://ci.dot.net/job/${mungedProjectName}/job/${mungedBranchName}/job/${inputJobName}/${CORECLR_BUILD}/artifact/tests.checked.zip
+ //
+ // CoreFX example artifact URLs:
+ //
+ // https://ci.dot.net/job/dotnet_coreclr/job/dev_unix_test_workflow/job/jitstress/job/arm64_cross_checked_ubuntu16.04_corefx_baseline_prtest/1/artifact/_/fx/fxruntime.zip
+ // https://ci.dot.net/job/dotnet_coreclr/job/dev_unix_test_workflow/job/jitstress/job/arm64_cross_checked_ubuntu16.04_corefx_baseline_prtest/1/artifact/_/fx/fxtests.zip
+ //
+ // Note that the source might be in a "jitstress" folder.
+ //
+ // Use `--progress=dot:giga` to display some progress output, but limit it in the log file.
+ //
+ // Use `--directory-prefix=_/fx` to specify where to put the corefx files (to match what other platforms do). Use this instead of `-O`.
+
+ shell("echo \"Using wget instead of the Jenkins copy artifacts plug-in to copy artifacts from ${inputCoreCLRBuildName}\"")
+
+ def mungedProjectName = Utilities.getFolderName(project)
+ def mungedBranchName = Utilities.getFolderName(branch)
+
+ def doCrossGenComparison = isCrossGenComparisonScenario(scenario)
+ def inputCoreCLRBuildScenario = isInnerloopTestScenario(scenario) ? 'innerloop' : 'normal'
+ if (doCoreFxTesting || doCrossGenComparison) {
+ // These depend on unique builds for each scenario
+ inputCoreCLRBuildScenario = scenario
+ }
+ def sourceJobName = getJobName(configuration, architecture, os, inputCoreCLRBuildScenario, false)
+ def inputJobName = Utilities.getFullJobName(sourceJobName, isPR)
+
+ // Need to add the sub-folder if necessary.
+ def inputJobPath = "job/${inputJobName}"
+ def folderName = getJobFolder(inputCoreCLRBuildScenario)
+ if (folderName != '') {
+ inputJobPath = "job/${folderName}/job/${inputJobName}"
+ }
+
+ def inputUrlRoot = "https://ci.dot.net/job/${mungedProjectName}/job/${mungedBranchName}/${inputJobPath}/\${CORECLR_BUILD}/artifact"
+
+ if (doCoreFxTesting) {
+ shell("mkdir -p ${workspaceRelativeFxRootLinux}")
+ shell("wget --progress=dot:giga --directory-prefix=${workspaceRelativeFxRootLinux} ${inputUrlRoot}/${workspaceRelativeFxRootLinux}/fxtests.zip")
+ shell("wget --progress=dot:giga --directory-prefix=${workspaceRelativeFxRootLinux} ${inputUrlRoot}/${workspaceRelativeFxRootLinux}/fxruntime.zip")
+ shell("wget --progress=dot:giga --directory-prefix=${workspaceRelativeFxRootLinux} ${inputUrlRoot}/${workspaceRelativeFxRootLinux}/run-test.sh")
+ shell("chmod +x ${workspaceRelativeFxRootLinux}/run-test.sh")
+ }
+ else {
+ shell("wget --progress=dot:giga ${inputUrlRoot}/testnativebin.${lowerConfiguration}.zip")
+ shell("wget --progress=dot:giga ${inputUrlRoot}/tests.${lowerConfiguration}.zip")
+ }
+ }
+
if (architecture == 'x86') {
shell("mkdir ./bin/CoreFxNative")
@@ -3322,10 +3363,12 @@ def static CreateOtherTestJob(def dslFactory, def project, def branch, def archi
// CoreFX testing downloads the CoreFX tests, not the coreclr tests. Also, unzip the built CoreFX layout/runtime directories.
if (doCoreFxTesting) {
- shell("unzip -o ${workspaceRelativeFxRootLinux}/fxtests.zip || exit 0")
- shell("unzip -o ${workspaceRelativeFxRootLinux}/fxruntime.zip || exit 0")
+ shell("unzip -q -o ${workspaceRelativeFxRootLinux}/fxtests.zip || exit 0")
+ shell("unzip -q -o ${workspaceRelativeFxRootLinux}/fxruntime.zip || exit 0")
}
- else {
+ else if (architecture != 'arm64') {
+ // ARM64 copies the tests from the build machine; this is for unzip'ing tests copied from a Windows build.
+ //
// Unzip the tests first. Exit with 0
shell("unzip -q -o ./bin/tests/tests.zip -d ./bin/tests/${osGroup}.${architecture}.${configuration} || exit 0")
shell("rm -r ./bin/tests/${osGroup}.${architecture}.${configuration}/Tests/Core_Root || exit 0")
@@ -3338,9 +3381,17 @@ def static CreateOtherTestJob(def dslFactory, def project, def branch, def archi
// copied correctly.
if (!doCoreFxTesting) {
if (isUbuntuArmJob) {
- def lowerConfiguration = configuration.toLowerCase()
- shell("unzip -o ./coreroot.${lowerConfiguration}.zip || exit 0") // unzips to ./bin/tests/Linux.${architecture}.${configuration}/Tests/Core_Root
- shell("unzip -o ./testnativebin.${lowerConfiguration}.zip || exit 0") // unzips to ./bin/obj/Linux.${architecture}.${configuration}/tests
+ if (architecture == 'arm') {
+ shell("unzip -q -o ./coreroot.${lowerConfiguration}.zip || exit 0") // unzips to ./bin/tests/Linux.${architecture}.${configuration}/Tests/Core_Root
+ shell("unzip -q -o ./testnativebin.${lowerConfiguration}.zip || exit 0") // unzips to ./bin/obj/Linux.${architecture}.${configuration}/tests
+ }
+ else {
+ assert architecture == 'arm64'
+ shell("unzip -q -o ./tests.${lowerConfiguration}.zip || exit 0") // unzips to ./bin/tests/Linux.${architecture}.${configuration}
+
+ // We still the testnativebin files until they get placed properly in the tests directory (next to their respective tests).
+ shell("unzip -q -o ./testnativebin.${lowerConfiguration}.zip || exit 0") // unzips to ./bin/obj/Linux.${architecture}.${configuration}/tests
+ }
}
else {
shell("./build-test.sh ${architecture} ${configuration} generatelayoutonly")
@@ -3385,6 +3436,8 @@ def static CreateOtherTestJob(def dslFactory, def project, def branch, def archi
else {
def runScript = "${dockerCmd}./tests/runtest.sh"
+ // TODO: the testNativeBinDir shouldn't be necessary if the native test binaries are placed properly with their corresponding managed test code.
+
shell("""\
${runScript} \\
--testRootDir=\"\${WORKSPACE}/bin/tests/${osGroup}.${architecture}.${configuration}\" \\
@@ -3584,24 +3637,31 @@ build(params + [CORECLR_BUILD: coreclrBuildJob.build.number,
// Returns true if the job should be generated.
def static shouldGenerateFlowJob(def scenario, def isPR, def architecture, def configuration, def os)
{
- // The "innerloop" (Pri-0 testing) scenario is only available as PR triggered.
- if (scenario == 'innerloop' && !isPR) {
- return false
- }
-
- if (scenario == 'corefx_innerloop') {
- return false
+ // The various "innerloop" jobs are only available as PR triggered.
+
+ if (!isPR) {
+ if (isInnerloopTestScenario(scenario)) {
+ return false
+ }
+
+ if (scenario == 'corefx_innerloop') {
+ return false
+ }
}
// Filter based on OS and architecture.
switch (architecture) {
- case 'arm64':
case 'arm':
if (os != "Ubuntu" && os != "Windows_NT") {
return false
}
break
+ case 'arm64':
+ if (os != "Ubuntu16.04" && os != "Windows_NT") {
+ return false
+ }
+ break
case 'x86':
if (os != "Ubuntu") {
return false
@@ -3643,19 +3703,13 @@ def static shouldGenerateFlowJob(def scenario, def isPR, def architecture, def c
}
else {
// Non-Windows
- if (architecture == 'arm64') {
- if (!(scenario in Constants.validLinuxArm64Scenarios)) {
- return false
- }
-
- if (isNormalOrInnerloop && (configuration == 'Debug')) {
- // The arm32/arm64 Debug configuration for innerloop/normal scenario is a special case: it does a build only, and no test run.
- // To do that, it doesn't require a flow job.
+ if (architecture == 'arm') {
+ if (!(scenario in Constants.validLinuxArmScenarios)) {
return false
}
}
- else if (architecture == 'arm') {
- if (!(scenario in Constants.validLinuxArmScenarios)) {
+ else if (architecture == 'arm64') {
+ if (!(scenario in Constants.validLinuxArm64Scenarios)) {
return false
}
}
@@ -3760,6 +3814,10 @@ def static shouldGenerateFlowJob(def scenario, def isPR, def architecture, def c
}
break
+ case 'corefx_innerloop':
+ // No flow job needed
+ return false
+
default:
println("Unknown scenario: ${scenario}")
assert false
@@ -3814,15 +3872,14 @@ Constants.allScenarios.each { scenario ->
def inputTestsBuildName = null
- if (!windowsArmJob && !doCoreFxTesting & !doCrossGenComparison) {
+ // Ubuntu Arm64 jobs do the test build on the build machine, and thus don't depend on a Windows build.
+ def isUbuntuArm64Job = ((os == "Ubuntu16.04") && (architecture == 'arm64'))
+
+ if (!windowsArmJob && !doCoreFxTesting & !doCrossGenComparison && !isUbuntuArm64Job) {
def testBuildScenario = isInnerloopTestScenario(scenario) ? 'innerloop' : 'normal'
def inputTestsBuildArch = architecture
- if (architecture == "arm64") {
- // Use the x64 test build for arm64 unix
- inputTestsBuildArch = "x64"
- }
- else if (architecture == "arm") {
+ if (architecture == "arm") {
// Use the x86 test build for arm unix
inputTestsBuildArch = "x86"
}
diff --git a/tests/arm64/corefx_linux_test_exclusions.txt b/tests/arm64/corefx_linux_test_exclusions.txt
new file mode 100644
index 0000000000..1331a07ba3
--- /dev/null
+++ b/tests/arm64/corefx_linux_test_exclusions.txt
@@ -0,0 +1,13 @@
+System.Buffers.Tests # timeout. baseline and jit stress.
+System.Drawing.Common.Tests # bad result data. baseline and jit stress.
+System.Collections.Immutable.Tests # JitStress=2
+System.Collections.Tests # JitStress=2
+System.Linq.Expressions.Tests # JitStress=1
+System.Linq.Tests # JitStress=2
+System.Net.NameResolution.Functional.Tests # baseline
+System.Net.NameResolution.Pal.Tests # baseline
+System.Numerics.Vectors.Tests # JitStress=2
+System.Runtime.Serialization.Formatters.Tests # baseline + many stress variants
+System.Security.Cryptography.Algorithms.Tests # bad result. baseline and jit stress.
+System.Text.RegularExpressions.Tests # https://github.com/dotnet/coreclr/issues/17754 -- timeout -- JitMinOpts + Tiered only
+System.Threading.Tests # bad result. baseline
diff --git a/tests/issues.targets b/tests/issues.targets
index a9db89618c..a4b7279f6c 100644
--- a/tests/issues.targets
+++ b/tests/issues.targets
@@ -222,6 +222,12 @@
<ExcludeList Include="$(XunitTestBinBase)/CoreMangLib/cti/system/string/StringFormat2/*">
<Issue>needs triage</Issue>
</ExcludeList>
+ <ExcludeList Include="$(XunitTestBinBase)/JIT/HardwareIntrinsics/Arm64/Simd/*">
+ <Issue>18895</Issue>
+ </ExcludeList>
+ <ExcludeList Include="$(XunitTestBinBase)/JIT/Methodical/MDArray/DataTypes/float_cs_ro/*">
+ <Issue>18989</Issue>
+ </ExcludeList>
<ExcludeList Include="$(XunitTestBinBase)/JIT/Regression/JitBlue/DevDiv_590772/DevDiv_590772/*">
<Issue>needs triage</Issue>
</ExcludeList>
@@ -357,9 +363,6 @@
<ExcludeList Include="$(XunitTestBinBase)/reflection/DefaultInterfaceMethods/InvokeConsumer/*">
<Issue>9565</Issue>
</ExcludeList>
- <ExcludeList Include="$(XunitTestBinBase)/JIT/HardwareIntrinsics/Arm64/Simd/*">
- <Issue>18895</Issue>
- </ExcludeList>
<ExcludeList Include="$(XunitTestBinBase)/reflection/DefaultInterfaceMethods/Emit/*">
<Issue>9565</Issue>
</ExcludeList>
@@ -468,6 +471,12 @@
<ExcludeList Include="$(XunitTestBinBase)/JIT/jit64/eh/FinallyExec/nonlocalexitinroot/*">
<Issue>Test times out</Issue>
</ExcludeList>
+ <ExcludeList Include="$(XunitTestBinBase)/JIT/Regression/VS-ia64-JIT/M00/b80373/b80373/*">
+ <Issue>20029</Issue>
+ </ExcludeList>
+ <ExcludeList Include="$(XunitTestBinBase)/GC/Scenarios/DoublinkList/dlstack/*">
+ <Issue>Release only crash</Issue>
+ </ExcludeList>
</ItemGroup>
<!-- Unix arm32 specific -->
diff --git a/tests/runtest.proj b/tests/runtest.proj
index 2db2918353..fc81b1528a 100644
--- a/tests/runtest.proj
+++ b/tests/runtest.proj
@@ -57,7 +57,9 @@
<!-- Target to check the test build, to see if it looks ok. We've had several cases where a change inadvertently and drastically changes
the set of tests that are built, and that change is unnoticed. The most common case is for a build of the Priority 1 tests
to only build the Priority 0 tests. This target is run after a test build to verify that the basic number of tests that were
- built is basically what was expected.
+ built is basically what was expected. When this was written, there were about 2500 Priority 0 tests and about 12270 Priority 1
+ tests (differing slightly based on platform). We currently check that the number of Priority 0 tests is greater than 2000 and
+ less than 3000, and the number of Priority 1 tests is greater than 11000.
-->
<Target Name="CheckTestBuild" DependsOnTargets="GetListOfTestCmds">
<Error Condition="!Exists('$(XunitTestBinBase)')"
@@ -71,7 +73,7 @@
<Error Condition="'$(CLRTestPriorityToBuild)' == '0' and '$(TestCount)' &lt;= 2000" Text="Unexpected test count. Expected &gt; 2000, found $(TestCount).'" />
<Error Condition="'$(CLRTestPriorityToBuild)' == '0' and '$(TestCount)' &gt;= 3000" Text="Unexpected test count. Expected &lt; 3000, found $(TestCount).'" />
- <Error Condition="'$(CLRTestPriorityToBuild)' == '1' and '$(TestCount)' &lt;= 11500" Text="Unexpected test count. Expected &gt; 1150, found $(TestCount).'" />
+ <Error Condition="'$(CLRTestPriorityToBuild)' == '1' and '$(TestCount)' &lt;= 11000" Text="Unexpected test count. Expected &gt; 11000, found $(TestCount).'" />
<Error Condition="'$(CLRTestPriorityToBuild)' != '0' and '$(CLRTestPriorityToBuild)' != '1'" Text="Unknown priority $(CLRTestPriorityToBuild)" />
</Target>
diff --git a/tests/runtest.py b/tests/runtest.py
index bd45d1118b..0845183690 100755
--- a/tests/runtest.py
+++ b/tests/runtest.py
@@ -427,7 +427,7 @@ def create_and_use_test_env(_os, env, func):
if len(list(complus_vars.keys())) > 0:
print("Found COMPlus variables in the current environment")
- print()
+ print("")
file_header = None
@@ -470,13 +470,13 @@ REM Temporary test env for test run.
test_env.write(line)
contents += line
- print()
+ print("")
print("TestEnv: %s" % test_env.name)
- print()
+ print("")
print("Contents:")
- print()
+ print("")
print(contents)
- print()
+ print("")
return func(test_env.name)
@@ -601,6 +601,8 @@ def call_msbuild(coreclr_repo_location,
"/p:__LogsDir=%s" % logs_dir]
print(" ".join(command))
+
+ sys.stdout.flush() # flush output before creating sub-process
proc = subprocess.Popen(command)
try:
@@ -743,6 +745,7 @@ def run_tests(host_os,
os.environ["__TestTimeout"] = str(120*60*1000) # 1,800,000 ms
# Set Core_Root
+ print("Setting CORE_ROOT=%s" % core_root)
os.environ["CORE_ROOT"] = core_root
# Set test env if exists
@@ -826,7 +829,7 @@ def setup_args(args):
print("Using default test location.")
print("TestLocation: %s" % default_test_location)
- print()
+ print("")
else:
# The tests for the default location have not been built.
@@ -888,7 +891,7 @@ def setup_args(args):
print("Using default test location.")
print("TestLocation: %s" % default_test_location)
- print()
+ print("")
if core_root is None:
default_core_root = os.path.join(test_location, "Tests", "Core_Root")
@@ -898,7 +901,7 @@ def setup_args(args):
print("Using default location for core_root.")
print("Core_Root: %s" % core_root)
- print()
+ print("")
elif args.generate_layout is False:
# CORE_ROOT has not been setup correctly.
@@ -918,7 +921,7 @@ def setup_args(args):
print("Using default location for test_native_bin_location.")
test_native_bin_location = os.path.join(os.path.join(coreclr_repo_location, "bin", "obj", "%s.%s.%s" % (host_os, arch, build_type), "tests"))
print("Native bin location: %s" % test_native_bin_location)
- print()
+ print("")
if not os.path.isdir(test_native_bin_location):
print("Error, test_native_bin_location: %s, does not exist." % test_native_bin_location)
@@ -982,43 +985,29 @@ def setup_coredis_tools(coreclr_repo_location, host_os, arch, core_root):
core_root(str) : core_root
"""
- test_location = os.path.join(coreclr_repo_location, "tests")
-
- def is_coredis_tools_supported(host_os, arch):
- """ Is coredis tools supported on this os/arch
-
- Args:
- host_os(str): os
- arch(str) : arch
-
- """
- unsupported_unix_arches = ["arm", "arm64"]
-
- if host_os.lower() == "osx":
- return False
-
- return True
+ if host_os.lower() == "osx":
+ print("GCStress C is not supported on your platform.")
+ sys.exit(1)
- if host_os != "Windows_NT" and arch in unsupported_unix_arches:
- return False
+ unsupported_arches = ["arm", "arm64"]
- return True
+ if arch in unsupported_arches:
+ # Nothing to do; CoreDisTools unneeded.
+ return
- if is_coredis_tools_supported(host_os, arch):
- command = None
- if host_os == "Windows_NT":
- command = [os.path.join(test_location, "setup-stress-dependencies.cmd"), "/arch", arch, "/outputdir", core_root]
- else:
- command = [os.path.join(test_location, "setup-stress-dependencies.sh"), "--outputDir=%s" % core_root]
+ command = None
+ test_location = os.path.join(coreclr_repo_location, "tests")
+ if host_os == "Windows_NT":
+ command = [os.path.join(test_location, "setup-stress-dependencies.cmd"), "/arch", arch, "/outputdir", core_root]
+ else:
+ command = [os.path.join(test_location, "setup-stress-dependencies.sh"), "--outputDir=%s" % core_root]
- proc = subprocess.Popen(command)
- proc.communicate()
+ sys.stdout.flush() # flush output before creating sub-process
+ proc = subprocess.Popen(command)
+ proc.communicate()
- if proc.returncode != 0:
- print("setup_stress_dependencies.sh failed.")
- sys.exit(1)
- else:
- print("GCStress C is not supported on your platform.")
+ if proc.returncode != 0:
+ print("Failed to set up stress dependencies.")
sys.exit(1)
def precompile_core_root(test_location,
@@ -1094,22 +1083,19 @@ def precompile_core_root(test_location,
return_code = proc.returncode
- passed = False
if return_code == -2146230517:
print("%s is not a managed assembly." % file)
- return passed
+ return False
if return_code != 0:
- print("Unable to precompile %s" % file)
- return passed
+ print("Unable to precompile %s (%d)" % (file, return_code))
+ return False
print("Successfully precompiled %s" % file)
- passed = True
-
- return passed
+ return True
print("Precompiling all assemblies in %s" % core_root)
- print()
+ print("")
env = os.environ.copy()
@@ -1135,7 +1121,7 @@ def precompile_core_root(test_location,
for dll in dlls:
call_crossgen(dll, env)
- print()
+ print("")
def setup_core_root(host_os,
arch,
@@ -1218,6 +1204,7 @@ def setup_core_root(host_os,
print("Restoring packages...")
print(" ".join(command))
+ sys.stdout.flush() # flush output before creating sub-process
if not g_verbose:
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
@@ -1230,7 +1217,7 @@ def setup_core_root(host_os,
sys.exit(1)
if proc.returncode == 1:
- "Error test dependency resultion failed."
+ print("Error: package restore failed.")
return False
os.environ["__BuildLogRootName"] = ""
@@ -1283,6 +1270,7 @@ def setup_core_root(host_os,
print("Creating Core_Root...")
print(" ".join(command))
+ sys.stdout.flush() # flush output before creating sub-process
if not g_verbose:
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
@@ -1295,7 +1283,7 @@ def setup_core_root(host_os,
sys.exit(1)
if proc.returncode == 1:
- "Error test dependency resultion failed."
+ print("Error: creating Core_Root failed.")
return False
os.environ["__BuildLogRootName"] = ""
@@ -1327,12 +1315,12 @@ def setup_core_root(host_os,
shutil.copytree(item, new_dir)
# Copy the product dir to the core_root directory
- print()
+ print("")
print("Copying Product Bin to Core_Root:")
print("cp -r %s%s* %s" % (product_location, os.path.sep, core_root))
copy_tree(product_location, core_root)
print("---------------------------------------------------------------------")
- print()
+ print("")
if is_corefx:
corefx_utility_setup = os.path.join(coreclr_repo_location,
@@ -1347,11 +1335,13 @@ def setup_core_root(host_os,
"msbuild",
os.path.join(coreclr_repo_location, "tests", "runtest.proj"),
"/p:GenerateRuntimeLayout=true"]
+
+ sys.stdout.flush() # flush output before creating sub-process
proc = subprocess.Popen(msbuild_command)
proc.communicate()
if not proc.returncode == 0:
- "Error test dependency resultion failed."
+ print("Error: generating test host failed.")
return False
os.environ["__BuildLogRootName"] = ""
@@ -1361,11 +1351,12 @@ def setup_core_root(host_os,
"/t:Restore",
corefx_utility_setup]
+ sys.stdout.flush() # flush output before creating sub-process
proc = subprocess.Popen(msbuild_command)
proc.communicate()
if proc.returncode == 1:
- "Error test dependency resultion failed."
+ print("Error: msbuild failed.")
return False
corefx_logpath = os.path.join(coreclr_repo_location,
@@ -1383,11 +1374,12 @@ def setup_core_root(host_os,
"/p:OutputPath=%s" % corefx_logpath,
corefx_utility_setup]
+ sys.stdout.flush() # flush output before creating sub-process
proc = subprocess.Popen(msbuild_command)
proc.communicate()
if proc.returncode == 1:
- "Error test dependency resultion failed."
+ print("Error: msbuild failed.")
return False
print("Core_Root setup.")
@@ -1476,6 +1468,7 @@ def build_test_wrappers(host_os,
print("Creating test wrappers...")
print(" ".join(command))
+ sys.stdout.flush() # flush output before creating sub-process
if not g_verbose:
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -1515,7 +1508,7 @@ def build_test_wrappers(host_os,
sys.exit(1)
if proc.returncode == 1:
- "Error test dependency resultion failed."
+ print("Error: creating test wrappers failed.")
return False
def find_test_from_name(host_os, test_location, test_name):
@@ -1740,14 +1733,6 @@ def print_summary(tests):
else:
skipped_tests.append(test)
- print()
- print("Total tests run: %d" % len(tests))
- print()
- print("Total passing tests: %d" % len(passed_tests))
- print("Total failed tests: %d" % len(failed_tests))
- print("Total skipped tests: %d" % len(skipped_tests))
- print()
-
failed_tests.sort(key=lambda item: item["time"], reverse=True)
passed_tests.sort(key=lambda item: item["time"], reverse=True)
skipped_tests.sort(key=lambda item: item["time"], reverse=True)
@@ -1788,41 +1773,51 @@ def print_summary(tests):
break
if len(failed_tests) > 0:
- print("Failed tests:")
- print()
+ print("%d failed tests:" % len(failed_tests))
+ print("")
print_tests_helper(failed_tests, None)
-
- if len(passed_tests) > 50:
- print()
- print("50 slowest passing tests:")
- print()
- print_tests_helper(passed_tests, 50)
+ # The following code is currently disabled, as it produces too much verbosity in a normal
+ # test run. It could be put under a switch, or else just enabled as needed when investigating
+ # test slowness.
+ #
+ # if len(passed_tests) > 50:
+ # print("")
+ # print("50 slowest passing tests:")
+ # print("")
+ # print_tests_helper(passed_tests, 50)
if len(failed_tests) > 0:
- print()
+ print("")
print("#################################################################")
print("Output of failing tests:")
- print()
+ print("")
for item in failed_tests:
print("[%s]: " % item["test_path"])
- print()
+ print("")
test_output = item["test_output"]
- # XUnit results are captured as escaped, escaped characters.
+ # XUnit results are captured as escaped characters.
test_output = test_output.replace("\\r", "\r")
test_output = test_output.replace("\\n", "\n")
print(test_output)
- print()
+ print("")
- print()
+ print("")
print("#################################################################")
print("End of output of failing tests")
print("#################################################################")
- print()
+ print("")
+
+ print("")
+ print("Total tests run : %d" % len(tests))
+ print("Total passing tests: %d" % len(passed_tests))
+ print("Total failed tests : %d" % len(failed_tests))
+ print("Total skipped tests: %d" % len(skipped_tests))
+ print("")
def create_repro(host_os, arch, build_type, env, core_root, coreclr_repo_location, tests):
""" Go through the failing tests and create repros for them
@@ -1849,13 +1844,11 @@ def create_repro(host_os, arch, build_type, env, core_root, coreclr_repo_locatio
repro_location = os.path.join(bin_location, "repro", "%s.%s.%s" % (host_os, arch, build_type))
if os.path.isdir(repro_location):
shutil.rmtree(repro_location)
-
- print("mkdir %s" % repro_location)
- os.makedirs(repro_location)
- print()
- print("Creating repo files, they can be found at: %s" % repro_location)
+ print("")
+ print("Creating repro files at: %s" % repro_location)
+ os.makedirs(repro_location)
assert os.path.isdir(repro_location)
# Now that the repro_location exists under <coreclr_location>/bin/repro
@@ -1865,7 +1858,6 @@ def create_repro(host_os, arch, build_type, env, core_root, coreclr_repo_locatio
debug_env.write_repro()
print("Repro files written.")
- print("They can be found at %s" % repro_location)
def do_setup(host_os,
arch,
@@ -1893,7 +1885,7 @@ def do_setup(host_os,
core_root)
if not success:
- print("Error GenerateLayout has failed.")
+ print("Error: GenerateLayout failed.")
sys.exit(1)
if unprocessed_args.generate_layout_only:
@@ -1932,7 +1924,7 @@ def do_setup(host_os,
else:
build_test_wrappers(host_os, arch, build_type, coreclr_repo_location, test_location)
- run_tests(host_os,
+ return run_tests(host_os,
arch,
build_type,
core_root,
@@ -1992,6 +1984,8 @@ def main(args):
print_summary(tests)
create_repro(host_os, arch, build_type, env, core_root, coreclr_repo_location, tests)
+ return ret_code
+
################################################################################
# __main__
################################################################################
diff --git a/tests/tests.targets b/tests/tests.targets
index 6c3afcd7f1..f6b62a79c8 100644
--- a/tests/tests.targets
+++ b/tests/tests.targets
@@ -48,6 +48,9 @@
<XunitArgs>$(XunitArgs) @(IncludeTraitsItems->'-trait %(Identity)', ' ')</XunitArgs>
<XunitArgs>$(XunitArgs) @(ExcludeTraitsItems->'-notrait %(Identity)', ' ')</XunitArgs>
+ <!-- Color output doesn't work well when capturing the output in the CI system -->
+ <XunitArgs>$(XunitArgs) -nocolor</XunitArgs>
+
<CorerunExecutable Condition="'$(RunningOnUnix)' == 'true'">$(CORE_ROOT)\corerun</CorerunExecutable>
<CorerunExecutable Condition="'$(RunningOnUnix)' != 'true'">$(CORE_ROOT)\corerun.exe</CorerunExecutable>
</PropertyGroup>