summaryrefslogtreecommitdiff
path: root/tests/scripts/run-xunit-perf.cmd
diff options
context:
space:
mode:
Diffstat (limited to 'tests/scripts/run-xunit-perf.cmd')
-rw-r--r--tests/scripts/run-xunit-perf.cmd207
1 files changed, 139 insertions, 68 deletions
diff --git a/tests/scripts/run-xunit-perf.cmd b/tests/scripts/run-xunit-perf.cmd
index 7895b3f16e..e223a3bda9 100644
--- a/tests/scripts/run-xunit-perf.cmd
+++ b/tests/scripts/run-xunit-perf.cmd
@@ -5,7 +5,7 @@
@echo off
@if defined _echo echo on
-setlocal
+setlocal ENABLEDELAYEDEXPANSION
set ERRORLEVEL=
set BENCHVIEW_RUN_TYPE=local
set CORECLR_REPO=%CD%
@@ -15,17 +15,27 @@ setlocal
set TEST_CONFIG=Release
set IS_SCENARIO_TEST=
set USAGE_DISPLAYED=
+ set SHOULD_UPLOAD_TO_BENCHVIEW=
+ set BENCHVIEW_PATH=
+ set COLLECTION_FLAGS=stopwatch
+ set ETW_COLLECTION=Off
+ set STABILITY_PREFIX=
+ set BENCHVIEW_GROUP=CoreCLR
+ set HAS_WARMUP_RUN=--drop-first-value
+ set BETTER=desc
call :parse_command_line_arguments %*
if defined USAGE_DISPLAYED exit /b %ERRORLEVEL%
- call :set_test_architecture || exit /b 1
- call :verify_core_overlay || exit /b 1
- call :set_perf_run_log || exit /b 1
- call :setup_sandbox || exit /b 1
+ call :set_test_architecture || exit /b 1
+ call :set_collection_config || exit /b 1
+ call :verify_benchview_tools || exit /b 1
+ call :verify_core_overlay || exit /b 1
+ call :set_perf_run_log || exit /b 1
+ call :setup_sandbox || exit /b 1
- call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\project.json" || exit /b 1
- call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" publish "%CORECLR_REPO%\tests\src\Common\PerfHarness\project.json" -c Release -o "%CORECLR_REPO%\sandbox" || exit /b 1
+ call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" || exit /b 1
+ call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" publish "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" -c Release -o "%CORECLR_REPO%\sandbox" || exit /b 1
rem TODO: Remove the version of the package to copy. e.g.) if multiple version exist, then error out?
call :run_cmd xcopy /sy "%CORECLR_REPO%\packages\Microsoft.Diagnostics.Tracing.TraceEvent\1.0.3-alpha-experimental\lib\native"\* . >> %RUNLOG% || exit /b 1
@@ -34,7 +44,6 @@ setlocal
rem find and stage the tests
set /A "LV_FAILURES=0"
for /R %CORECLR_PERF% %%T in (*.%TEST_FILE_EXT%) do (
- rem Skip known failures
call :run_benchmark %%T || (
set /A "LV_FAILURES+=1"
)
@@ -60,17 +69,13 @@ rem ****************************************************************************
setlocal
set BENCHNAME=%~n1
set BENCHDIR=%~p1
- set PERFOUT=perf-%BENCHNAME%
- set XMLOUT=%PERFOUT%.xml
rem copy benchmark and any input files
- call :run_cmd xcopy /s %~1 . >> %RUNLOG% || exit /b 1
+ call :run_cmd xcopy /sy %~1 . >> %RUNLOG% || exit /b 1
if exist "%BENCHDIR%*.txt" (
- call :run_cmd xcopy /s %BENCHDIR%*.txt . >> %RUNLOG% || exit /b 1
+ call :run_cmd xcopy /sy %BENCHDIR%*.txt . >> %RUNLOG% || exit /b 1
)
- set CORE_ROOT=%CORECLR_REPO%\sandbox
-
rem setup additional environment variables
if DEFINED TEST_ENV (
if EXIST "%TEST_ENV%" (
@@ -78,13 +83,26 @@ setlocal
)
)
- set BENCHNAME_LOG_FILE_NAME=%BENCHNAME%.log
+ echo/
+ echo/ ----------
+ echo/ Running %BENCHNAME%
+ echo/ ----------
+
+ rem CORE_ROOT environment variable is used by some benchmarks such as Roslyn / CscBench.
+ set CORE_ROOT=%CORECLR_REPO%\sandbox
+
+ set LV_RUNID=Perf-%ETW_COLLECTION%
+ set BENCHNAME_LOG_FILE_NAME=%LV_RUNID%-%BENCHNAME%.log
+ set LV_CMD=
if defined IS_SCENARIO_TEST (
- call :run_cmd corerun.exe "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid Perf 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1
+ set "LV_CMD=corerun.exe "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid "%LV_RUNID%""
) else (
- call :run_cmd corerun.exe PerfHarness.dll "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid Perf 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1
+ set "LV_CMD=%STABILITY_PREFIX% corerun.exe PerfHarness.dll "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid "%LV_RUNID%" --perf:collect %COLLECTION_FLAGS%"
)
+ call :print_to_console $ !LV_CMD!
+ call :run_cmd !LV_CMD! 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1
+
IF %ERRORLEVEL% NEQ 0 (
call :print_error corerun.exe exited with %ERRORLEVEL% code.
if exist "%BENCHNAME_LOG_FILE_NAME%" type "%BENCHNAME_LOG_FILE_NAME%"
@@ -92,15 +110,16 @@ setlocal
)
rem optionally generate results for benchview
- if not [%BENCHVIEW_PATH%] == [] (
+ if exist "%BENCHVIEW_PATH%" (
call :generate_results_for_benchview || exit /b 1
- ) else (
- type "%XMLOUT%" | findstr /i /c:"test name"
)
rem Save off the results to the root directory for recovery later in Jenkins
- call :run_cmd xcopy "Perf-%BENCHNAME%*.xml" "%CORECLR_REPO%\" || exit /b 1
- call :run_cmd xcopy "Perf-%BENCHNAME%*.etl" "%CORECLR_REPO%\" || exit /b 1
+ for %%e in (xml etl log) do (
+ IF EXIST ".\%LV_RUNID%-%BENCHNAME%.%%e" (
+ call :run_cmd xcopy /vy ".\%LV_RUNID%-%BENCHNAME%.%%e" .. || exit /b 1
+ )
+ )
exit /b 0
@@ -114,23 +133,51 @@ rem ****************************************************************************
shift
goto :parse_command_line_arguments
)
+ IF /I [%~1] == [-stabilityPrefix] (
+ set STABILITY_PREFIX=%~2
+ shift
+ shift
+ goto :parse_command_line_arguments
+ )
IF /I [%~1] == [-scenarioTest] (
set IS_SCENARIO_TEST=1
shift
goto :parse_command_line_arguments
)
+ IF /I [%~1] == [-uploadtobenchview] (
+ set SHOULD_UPLOAD_TO_BENCHVIEW=1
+ shift
+ goto :parse_command_line_arguments
+ )
+ IF /I [%~1] == [-nowarmup] (
+ set HAS_WARMUP_RUN=
+ shift
+ goto :parse_command_line_arguments
+ )
+ IF /I [%~1] == [-better] (
+ set BETTER=%~2
+ shift
+ shift
+ goto :parse_command_line_arguments
+ )
IF /I [%~1] == [-runtype] (
set BENCHVIEW_RUN_TYPE=%~2
shift
shift
goto :parse_command_line_arguments
)
+ IF /I [%~1] == [-collectionflags] (
+ set COLLECTION_FLAGS=%~2
+ shift
+ shift
+ goto :parse_command_line_arguments
+ )
IF /I [%~1] == [-library] (
set TEST_FILE_EXT=dll
shift
goto :parse_command_line_arguments
)
- IF /I [%~1] == [-uploadtobenchview] (
+ IF /I [%~1] == [-generatebenchviewdata] (
set BENCHVIEW_PATH=%~2
shift
shift
@@ -154,7 +201,12 @@ rem ****************************************************************************
shift
goto :parse_command_line_arguments
)
-
+ IF /I [%~1] == [-group] (
+ set BENCHVIEW_GROUP=%~2
+ shift
+ shift
+ goto :parse_command_line_arguments
+ )
if /I [%~1] == [-?] (
call :USAGE
exit /b 0
@@ -173,10 +225,18 @@ rem ****************************************************************************
rem ****************************************************************************
rem Sets the test architecture.
rem ****************************************************************************
- IF /I [%TEST_ARCHITECTURE%] == [x86jit32] (
- set TEST_ARCH=x86
- ) ELSE (
- set TEST_ARCH=%TEST_ARCHITECTURE%
+ set TEST_ARCH=%TEST_ARCHITECTURE%
+ exit /b 0
+
+:verify_benchview_tools
+rem ****************************************************************************
+rem Verifies that the path to the benchview tools is correct.
+rem ****************************************************************************
+ if defined BENCHVIEW_PATH (
+ if not exist "%BENCHVIEW_PATH%" (
+ call :print_error BenchView path: "%BENCHVIEW_PATH%" was specified, but it does not exist.
+ exit /b 1
+ )
)
exit /b 0
@@ -191,6 +251,18 @@ rem ****************************************************************************
)
exit /b 0
+:set_collection_config
+rem ****************************************************************************
+rem Set's the config based on the providers used for collection
+rem ****************************************************************************
+ if /I [%COLLECTION_FLAGS%] == [stopwatch] (
+ set ETW_COLLECTION=Off
+ ) else (
+ set ETW_COLLECTION=On
+ )
+ exit /b 0
+
+
:set_perf_run_log
rem ****************************************************************************
rem Sets the script's output log file.
@@ -207,7 +279,7 @@ rem ****************************************************************************
rem Creates the sandbox folder used by the script to copy binaries locally,
rem and execute benchmarks.
rem ****************************************************************************
- if exist sandbox rd /s /q sandbox
+ if exist sandbox rmdir /s /q sandbox
if exist sandbox call :print_error Failed to remove the sandbox folder& exit /b 1
if not exist sandbox mkdir sandbox
if not exist sandbox call :print_error Failed to create the sandbox folder& exit /b 1
@@ -224,15 +296,19 @@ rem ****************************************************************************
set LV_MEASUREMENT_ARGS=
set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% %BENCHVIEW_MEASUREMENT_PARSER%
- set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% "Perf-%BENCHNAME%.xml"
- set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --better desc
- set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --drop-first-value
+ set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --better %BETTER%
+ set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% %HAS_WARMUP_RUN%
set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --append
- call :run_cmd py.exe "%BENCHVIEW_PATH%\measurement.py" %LV_MEASUREMENT_ARGS%
- IF %ERRORLEVEL% NEQ 0 (
- call :print_error Failed to generate BenchView measurement data.
- exit /b 1
+
+ for /f %%f in ('dir /b Perf-*%BENCHNAME%.xml 2^>nul') do (
+ call :run_cmd py.exe "%BENCHVIEW_PATH%\measurement.py" %LV_MEASUREMENT_ARGS% %%f
+
+ IF !ERRORLEVEL! NEQ 0 (
+ call :print_error Failed to generate BenchView measurement data.
+ exit /b 1
+ )
)
+
endlocal& exit /b %ERRORLEVEL%
:upload_to_benchview
@@ -244,23 +320,28 @@ setlocal
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --build ..\build.json
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machine-data ..\machinedata.json
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --metadata ..\submission-metadata.json
- set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --group "CoreCLR"
+ set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --group "%BENCHVIEW_GROUP%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --type "%BENCHVIEW_RUN_TYPE%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config-name "%TEST_CONFIG%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config Configuration "%TEST_CONFIG%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config OS "Windows_NT"
+ set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config Profile "%ETW_COLLECTION%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --arch "%TEST_ARCHITECTURE%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machinepool "PerfSnake"
+
call :run_cmd py.exe "%BENCHVIEW_PATH%\submission.py" measurement.json %LV_SUBMISSION_ARGS%
+
IF %ERRORLEVEL% NEQ 0 (
call :print_error Creating BenchView submission data failed.
exit /b 1
)
- call :run_cmd py.exe "%BENCHVIEW_PATH%\upload.py" submission.json --container coreclr
- IF %ERRORLEVEL% NEQ 0 (
- call :print_error Uploading to BenchView failed.
- exit /b 1
+ if defined SHOULD_UPLOAD_TO_BENCHVIEW (
+ call :run_cmd py.exe "%BENCHVIEW_PATH%\upload.py" submission.json --container coreclr
+ IF !ERRORLEVEL! NEQ 0 (
+ call :print_error Uploading to BenchView failed.
+ exit /b 1
+ )
)
exit /b %ERRORLEVEL%
@@ -269,17 +350,25 @@ rem ****************************************************************************
rem Script's usage.
rem ****************************************************************************
set USAGE_DISPLAYED=1
- echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-uploadToBenchview] ^<path_to_benchview_tools^> [-runtype] ^<rolling^|private^> [-scenarioTest]
+ echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-generateBenchviewData] ^<path_to_benchview_tools^> [-warmup] [-better] ^<asc ^| desc^> [-group] ^<group^> [-runtype] ^<rolling^|private^> [-scenarioTest] [-collectionFlags] ^<default^+CacheMisses^+InstructionRetired^+BranchMispredictions^+gcapi^>
echo/
echo For the path to the tests you can pass a parent directory and the script will grovel for
echo all tests in subdirectories and run them.
echo The library flag denotes whether the tests are build as libraries (.dll) or an executable (.exe)
echo Architecture defaults to x64 and configuration defaults to release.
- echo -uploadtoBenchview is used to specify a path to the Benchview tooling and when this flag is
- echo set we will upload the results of the tests to the coreclr container in benchviewupload.
+ echo -generateBenchviewData is used to specify a path to the Benchview tooling and when this flag is
+ echo set we will generate the results for upload to benchview.
+ echo -uploadToBenchview If this flag is set the generated benchview test data will be uploaded.
+ echo -nowarmup specifies not to discard the results of the first run
+ echo -better whether it is better to have ascending or descending numbers for the benchmark
+ echo -group specifies the Benchview group to which this data should be uploaded (default CoreCLR)
echo Runtype sets the runtype that we upload to Benchview, rolling for regular runs, and private for
echo PRs.
echo -scenarioTest should be included if you are running a scenario benchmark.
+ echo -collectionFlags This is used to specify what collectoin flags get passed to the performance
+ echo harness that is doing the test running. If this is not specified we only use stopwatch.
+ echo Other flags are "default", which is the whatever the test being run specified, "CacheMisses",
+ echo "BranchMispredictions", and "InstructionsRetired".
exit /b %ERRORLEVEL%
:print_error
@@ -287,19 +376,17 @@ rem ****************************************************************************
rem Function wrapper that unifies how errors are output by the script.
rem Functions output to the standard error.
rem ****************************************************************************
- echo [%DATE%][%TIME:~0,-3%][ERROR] %* 1>&2
+ call :print_to_console [ERROR] %* 1>&2
exit /b %ERRORLEVEL%
:print_to_console
rem ****************************************************************************
-rem Sends text to the console screen, no matter what (even when the script's
-rem output is redirected). This can be useful to provide information on where
-rem the script is executing.
+rem Sends text to the console screen. This can be useful to provide
+rem information on where the script is executing.
rem ****************************************************************************
- if defined _debug (
- echo [%DATE%][%TIME:~0,-3%] %* >CON
- )
- echo [%DATE%][%TIME:~0,-3%] %*
+ echo/
+ echo/%USERNAME%@%COMPUTERNAME% "%CD%"
+ echo/[%DATE%][%TIME:~0,-3%] %*
exit /b %ERRORLEVEL%
:run_cmd
@@ -315,19 +402,3 @@ rem ****************************************************************************
call :print_to_console $ %*
call %*
exit /b %ERRORLEVEL%
-
-:skip_failures
-rem ****************************************************************************
-rem Skip known failures
-rem ****************************************************************************
- IF /I [%TEST_ARCHITECTURE%] == [x86jit32] (
- IF /I "%~1" == "CscBench" (
- rem https://github.com/dotnet/coreclr/issues/11088
- exit /b 1
- )
- IF /I "%~1" == "SciMark2" (
- rem https://github.com/dotnet/coreclr/issues/11089
- exit /b 1
- )
- )
- exit /b 0