diff options
-rw-r--r-- | perf.groovy | 152 | ||||
-rw-r--r-- | tests/scripts/run-xunit-perf.cmd | 52 |
2 files changed, 130 insertions, 74 deletions
diff --git a/perf.groovy b/perf.groovy index 1eb115d8bd..b490219d1b 100644 --- a/perf.groovy +++ b/perf.groovy @@ -28,78 +28,110 @@ def static getOSGroup(def os) { [true, false].each { isPR -> ['Windows_NT'].each { os -> ['x64', 'x86'].each { arch -> - def architecture = arch + [true, false].each { isSmoketest -> + def architecture = arch + def jobName = isSmoketest ? "perf_perflab_${os}_${arch}_smoketest" : "perf_perflab_${os}_${arch}" - def newJob = job(Utilities.getFullJobName(project, "perf_perflab_${os}_${arch}", isPR)) { - // Set the label. - label('windows_clr_perf') - wrappers { - credentialsBinding { - string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas') - } + if (arch == 'x86jit32') + { + architecture = 'x86' + testEnv = '-testEnv %WORKSPACE%\\tests\\x86\\compatjit_x86_testenv.cmd' + } + else if (arch == 'x86') + { + testEnv = '-testEnv %WORKSPACE%\\tests\\x86\\ryujit_x86_testenv.cmd' } - if (isPR) - { - parameters + def newJob = job(Utilities.getFullJobName(project, jobName, isPR)) { + // Set the label. + label('windows_clr_perf') + wrappers { + credentialsBinding { + string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas') + } + } + + if (isPR) { - stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName') + parameters + { + stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName') + } } - } - def configuration = 'Release' - def runType = isPR ? 'private' : 'rolling' - def benchViewName = isPR ? 'coreclr private %BenchviewCommitName%' : 'coreclr rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%' - - steps { - // Batch + if (isSmoketest) + { + parameters + { + stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '2', 'Sets the number of iterations to one. We want to do this so that we can run as fast as possible as this is just for smoke testing') + stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '2', 'Sets the number of iterations to one. We want to do this so that we can run as fast as possible as this is just for smoke testing') + } + } + def configuration = 'Release' + def runType = isPR ? 'private' : 'rolling' + def benchViewName = isPR ? 'coreclr private %BenchviewCommitName%' : 'coreclr rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%' + def uploadString = isSmoketest ? '' : '-uploadToBenchview' + + steps { + // Batch - batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"") - batchFile("C:\\Tools\\nuget.exe install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion") - //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView - //we have to do it all as one statement because cmd is called each time and we lose the set environment variable - batchFile("if [%GIT_BRANCH:~0,7%] == [origin/] (set GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%) else (set GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%)\n" + - "set BENCHVIEWNAME=${benchViewName}\n" + - "set BENCHVIEWNAME=%BENCHVIEWNAME:\"=%\n" + - "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user \"dotnet-bot@microsoft.com\"\n" + - "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}") - batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"") - batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}") + batchFile("powershell wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"") + batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"") + batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion") + //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView + //we have to do it all as one statement because cmd is called each time and we lose the set environment variable + batchFile("if [%GIT_BRANCH:~0,7%] == [origin/] (set GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%) else (set GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%)\n" + + "set BENCHVIEWNAME=${benchViewName}\n" + + "set BENCHVIEWNAME=%BENCHVIEWNAME:\"=%\n" + + "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user \"dotnet-bot@microsoft.com\"\n" + + "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}") + batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"") + batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}") - batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly") + if (arch == 'x86jit32') + { + // Download package and copy compatjit into Core_Root + batchFile("C:\\Tools\\nuget.exe install runtime.win7-${architecture}.Microsoft.NETCore.Jit -Source https://dotnet.myget.org/F/dotnet-core -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion\n" + + "xcopy \"%WORKSPACE%\\runtime.win7-x86.Microsoft.NETCore.Jit\\runtimes\\win7-x86\\native\\compatjit.dll\" \"%WORKSPACE%\\bin\\Product\\${os}.${architecture}.${configuration}\" /Y") + } - batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -uploadToBenchview \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -runtype ${runType}") - batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -uploadToBenchview \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -runtype ${runType}") - } - } + batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly") - // Save machinedata.json to /artifact/bin/ Jenkins dir - def archiveSettings = new ArchivalSettings() - archiveSettings.addFiles('Perf-*.xml') - archiveSettings.addFiles('Perf-*.etl') - Utilities.addArchival(newJob, archiveSettings) + batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType}") + batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType}") + } + } + + if (isSmoketest) + { + Utilities.setMachineAffinity(newJob, "Windows_NT", '20170427-elevated') + } + // Save machinedata.json to /artifact/bin/ Jenkins dir + def archiveSettings = new ArchivalSettings() + archiveSettings.addFiles('Perf-*.xml') + archiveSettings.addFiles('Perf-*.etl') + Utilities.addArchival(newJob, archiveSettings) - Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}") - - newJob.with { - wrappers { - timeout { - absolute(240) + Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}") + + newJob.with { + wrappers { + timeout { + absolute(240) + } } } - } - - if (isPR) { - TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest() - builder.setGithubContext("${os} ${arch} CoreCLR Perf Tests") - builder.triggerOnlyOnComment() - builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}\\W+perf.*") - builder.triggerForBranch(branch) - builder.emitTrigger(newJob) - } - else { - // Set a push trigger - TriggerBuilder builder = TriggerBuilder.triggerOnCommit() - builder.emitTrigger(newJob) + + if (isPR) { + TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest() + builder.setGithubContext("${os} ${arch} CoreCLR Perf Tests") + builder.triggerForBranch(branch) + builder.emitTrigger(newJob) + } + else { + // Set a push trigger + TriggerBuilder builder = TriggerBuilder.triggerOnCommit() + builder.emitTrigger(newJob) + } } } } diff --git a/tests/scripts/run-xunit-perf.cmd b/tests/scripts/run-xunit-perf.cmd index 80f8544a67..11279aeeda 100644 --- a/tests/scripts/run-xunit-perf.cmd +++ b/tests/scripts/run-xunit-perf.cmd @@ -5,6 +5,7 @@ @echo off @if defined _echo echo on +setlocal ENABLEDELAYEDEXPANSION setlocal set ERRORLEVEL= set BENCHVIEW_RUN_TYPE=local @@ -15,14 +16,17 @@ setlocal set TEST_CONFIG=Release set IS_SCENARIO_TEST= set USAGE_DISPLAYED= + set SHOULD_UPLOAD_TO_BENCHVIEW= + set BENCHVIEW_PATH= call :parse_command_line_arguments %* if defined USAGE_DISPLAYED exit /b %ERRORLEVEL% - call :set_test_architecture || exit /b 1 - call :verify_core_overlay || exit /b 1 - call :set_perf_run_log || exit /b 1 - call :setup_sandbox || exit /b 1 + call :set_test_architecture || exit /b 1 + call :verify_benchview_tools || exit /b 1 + call :verify_core_overlay || exit /b 1 + call :set_perf_run_log || exit /b 1 + call :setup_sandbox || exit /b 1 call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\project.json" || exit /b 1 call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" publish "%CORECLR_REPO%\tests\src\Common\PerfHarness\project.json" -c Release -o "%CORECLR_REPO%\sandbox" || exit /b 1 @@ -92,7 +96,7 @@ setlocal ) rem optionally generate results for benchview - if not [%BENCHVIEW_PATH%] == [] ( + if exist "%BENCHVIEW_PATH%" ( call :generate_results_for_benchview || exit /b 1 ) else ( type "%XMLOUT%" | findstr /i /c:"test name" @@ -119,6 +123,11 @@ rem **************************************************************************** shift goto :parse_command_line_arguments ) + IF /I [%~1] == [-uploadtobenchview] ( + set SHOULD_UPLOAD_TO_BENCHVIEW=1 + shift + goto :parse_command_line_arguments + ) IF /I [%~1] == [-runtype] ( set BENCHVIEW_RUN_TYPE=%~2 shift @@ -130,7 +139,7 @@ rem **************************************************************************** shift goto :parse_command_line_arguments ) - IF /I [%~1] == [-uploadtobenchview] ( + IF /I [%~1] == [-generatebenchviewdata] ( set BENCHVIEW_PATH=%~2 shift shift @@ -175,7 +184,19 @@ rem Sets the test architecture. rem **************************************************************************** set TEST_ARCH=%TEST_ARCHITECTURE% exit /b 0 - + +:verify_benchview_tools +rem **************************************************************************** +rem Verifies that the path to the benchview tools is correct. +rem **************************************************************************** + if defined BENCHVIEW_PATH ( + if not exist "%BENCHVIEW_PATH%" ( + call :print_error BenchView path: "%BENCHVIEW_PATH%" was specified, but it does not exist. + exit /b 1 + ) + ) + exit /b 0 + :verify_core_overlay rem **************************************************************************** rem Verify that the Core_Root folder exist. @@ -253,10 +274,12 @@ setlocal exit /b 1 ) - call :run_cmd py.exe "%BENCHVIEW_PATH%\upload.py" submission.json --container coreclr - IF %ERRORLEVEL% NEQ 0 ( - call :print_error Uploading to BenchView failed. - exit /b 1 + if defined SHOULD_UPLOAD_TO_BENCHVIEW ( + call :run_cmd py.exe "%BENCHVIEW_PATH%\upload.py" submission.json --container coreclr + IF !ERRORLEVEL! NEQ 0 ( + call :print_error Uploading to BenchView failed. + exit /b 1 + ) ) exit /b %ERRORLEVEL% @@ -265,14 +288,15 @@ rem **************************************************************************** rem Script's usage. rem **************************************************************************** set USAGE_DISPLAYED=1 - echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-uploadToBenchview] ^<path_to_benchview_tools^> [-runtype] ^<rolling^|private^> [-scenarioTest] + echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-generateBenchviewData] ^<path_to_benchview_tools^> [-runtype] ^<rolling^|private^> [-scenarioTest] echo/ echo For the path to the tests you can pass a parent directory and the script will grovel for echo all tests in subdirectories and run them. echo The library flag denotes whether the tests are build as libraries (.dll) or an executable (.exe) echo Architecture defaults to x64 and configuration defaults to release. - echo -uploadtoBenchview is used to specify a path to the Benchview tooling and when this flag is - echo set we will upload the results of the tests to the coreclr container in benchviewupload. + echo -generateBenchviewData is used to specify a path to the Benchview tooling and when this flag is + echo set we will generate the results for upload to benchview. + echo -uploadToBenchview If this flag is set the generated benchview test data will be uploaded. echo Runtype sets the runtype that we upload to Benchview, rolling for regular runs, and private for echo PRs. echo -scenarioTest should be included if you are running a scenario benchmark. |