diff options
-rw-r--r-- | perf.groovy | 17 | ||||
-rw-r--r-- | tests/scripts/run-xunit-perf.cmd | 37 | ||||
-rw-r--r-- | tests/src/Common/PerfHarness/project.json | 2 | ||||
-rw-r--r-- | tests/src/Common/external/project.json | 8 | ||||
-rw-r--r-- | tests/src/JIT/config/benchmark+roslyn/project.json | 8 | ||||
-rw-r--r-- | tests/src/JIT/config/benchmark+serialize/project.json | 8 | ||||
-rw-r--r-- | tests/src/JIT/config/benchmark/project.json | 8 |
7 files changed, 67 insertions, 21 deletions
diff --git a/perf.groovy b/perf.groovy index 2d09c31c0a..8dfcabc43d 100644 --- a/perf.groovy +++ b/perf.groovy @@ -62,8 +62,16 @@ def static getOSGroup(def os) { { parameters { - stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '2', 'Sets the number of iterations to one. We want to do this so that we can run as fast as possible as this is just for smoke testing') - stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '2', 'Sets the number of iterations to one. We want to do this so that we can run as fast as possible as this is just for smoke testing') + stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '2', 'Sets the number of iterations to two. We want to do this so that we can run as fast as possible as this is just for smoke testing') + stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '2', 'Sets the number of iterations to two. We want to do this so that we can run as fast as possible as this is just for smoke testing') + } + } + else + { + parameters + { + stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enought to get a good sample') + stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enought to get a good sample') } } def configuration = 'Release' @@ -96,8 +104,13 @@ def static getOSGroup(def os) { batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly") + // Run with just stopwatch batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType}") batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType}") + + // Run with the full set of counters enabled + batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -collectionFlags deafult+BranchMispredictions+CacheMisses+InstructionRetired+gcapi") + batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -collectionFlags deafult+BranchMispredictions+CacheMisses+InstructionRetired+gcapi") } } diff --git a/tests/scripts/run-xunit-perf.cmd b/tests/scripts/run-xunit-perf.cmd index 11279aeeda..e279fa3f24 100644 --- a/tests/scripts/run-xunit-perf.cmd +++ b/tests/scripts/run-xunit-perf.cmd @@ -18,11 +18,15 @@ setlocal set USAGE_DISPLAYED= set SHOULD_UPLOAD_TO_BENCHVIEW= set BENCHVIEW_PATH= + set COLLECTION_FLAGS=stopwatch + set ETW_COLLECTION=Off + set STABILITY_PREFIX= call :parse_command_line_arguments %* if defined USAGE_DISPLAYED exit /b %ERRORLEVEL% call :set_test_architecture || exit /b 1 + call :set_collection_config || exit /b 1 call :verify_benchview_tools || exit /b 1 call :verify_core_overlay || exit /b 1 call :set_perf_run_log || exit /b 1 @@ -86,7 +90,7 @@ setlocal if defined IS_SCENARIO_TEST ( call :run_cmd corerun.exe "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid Perf 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1 ) else ( - call :run_cmd corerun.exe PerfHarness.dll "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid Perf 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1 + call :run_cmd %STABILITY_PREFIX% corerun.exe PerfHarness.dll "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid Perf --perf:collect %COLLECTION_FLAGS% 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1 ) IF %ERRORLEVEL% NEQ 0 ( @@ -118,6 +122,12 @@ rem **************************************************************************** shift goto :parse_command_line_arguments ) + IF /I [%~1] == [-stabilityPrefix] ( + set STABILITY_PREFIX=%~2 + shift + shift + goto :parse_command_line_arguments + ) IF /I [%~1] == [-scenarioTest] ( set IS_SCENARIO_TEST=1 shift @@ -134,6 +144,12 @@ rem **************************************************************************** shift goto :parse_command_line_arguments ) + IF /I [%~1] == [-collectionflags] ( + set COLLECTION_FLAGS=%~2 + shift + shift + goto :parse_command_line_arguments + ) IF /I [%~1] == [-library] ( set TEST_FILE_EXT=dll shift @@ -208,6 +224,18 @@ rem **************************************************************************** ) exit /b 0 + :set_collection_config +rem **************************************************************************** +rem Set's the config based on the providers used for collection +rem **************************************************************************** + if /I [%COLLECTION_FLAGS%] == [stopwatch] ( + set ETW_COLLECTION=Off + ) else ( + set ETW_COLLECTION=On + ) + exit /b 0 + + :set_perf_run_log rem **************************************************************************** rem Sets the script's output log file. @@ -266,6 +294,7 @@ setlocal set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config-name "%TEST_CONFIG%" set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config Configuration "%TEST_CONFIG%" set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config OS "Windows_NT" + set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config Profile "%ETW_COLLECTION%" set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --arch "%TEST_ARCHITECTURE%" set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machinepool "PerfSnake" call :run_cmd py.exe "%BENCHVIEW_PATH%\submission.py" measurement.json %LV_SUBMISSION_ARGS% @@ -288,7 +317,7 @@ rem **************************************************************************** rem Script's usage. rem **************************************************************************** set USAGE_DISPLAYED=1 - echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-generateBenchviewData] ^<path_to_benchview_tools^> [-runtype] ^<rolling^|private^> [-scenarioTest] + echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-generateBenchviewData] ^<path_to_benchview_tools^> [-runtype] ^<rolling^|private^> [-scenarioTest] [-collectionFlags] ^<default^+CacheMisses^+InstructionRetired^+BranchMispredictions^+gcapi^> echo/ echo For the path to the tests you can pass a parent directory and the script will grovel for echo all tests in subdirectories and run them. @@ -300,6 +329,10 @@ rem **************************************************************************** echo Runtype sets the runtype that we upload to Benchview, rolling for regular runs, and private for echo PRs. echo -scenarioTest should be included if you are running a scenario benchmark. + echo -collectionFlags This is used to specify what collectoin flags get passed to the performance + echo harness that is doing the test running. If this is not specified we only use stopwatch. + echo Other flags are "default", which is the whatever the test being run specified, "CacheMisses", + echo "BranchMispredictions", and "InstructionsRetired". exit /b %ERRORLEVEL% :print_error diff --git a/tests/src/Common/PerfHarness/project.json b/tests/src/Common/PerfHarness/project.json index a1ecb2867e..bed9544b65 100644 --- a/tests/src/Common/PerfHarness/project.json +++ b/tests/src/Common/PerfHarness/project.json @@ -12,7 +12,7 @@ "type": "platform", "version": "1.1.0" }, - "xunit.performance.api": "1.0.0-beta-build0003" + "xunit.performance.api": "1.0.0-beta-build0004" } } } diff --git a/tests/src/Common/external/project.json b/tests/src/Common/external/project.json index 12c986a98b..b5e378db94 100644 --- a/tests/src/Common/external/project.json +++ b/tests/src/Common/external/project.json @@ -1,10 +1,10 @@ { "dependencies": { "Microsoft.CodeAnalysis.Compilers": "1.1.1", - "xunit.performance.api": "1.0.0-beta-build0003", - "xunit.performance.core": "1.0.0-beta-build0003", - "xunit.performance.execution": "1.0.0-beta-build0003", - "xunit.performance.metrics": "1.0.0-beta-build0003", + "xunit.performance.api": "1.0.0-beta-build0004", + "xunit.performance.core": "1.0.0-beta-build0004", + "xunit.performance.execution": "1.0.0-beta-build0004", + "xunit.performance.metrics": "1.0.0-beta-build0004", "Microsoft.Diagnostics.Tracing.TraceEvent": "1.0.3-alpha-experimental", "Newtonsoft.Json": "9.0.1", "xunit": "2.2.0-beta2-build3300", diff --git a/tests/src/JIT/config/benchmark+roslyn/project.json b/tests/src/JIT/config/benchmark+roslyn/project.json index 6838ed1de8..d6e0d61c0e 100644 --- a/tests/src/JIT/config/benchmark+roslyn/project.json +++ b/tests/src/JIT/config/benchmark+roslyn/project.json @@ -1,10 +1,10 @@ { "dependencies": { "Microsoft.CodeAnalysis.Compilers": "1.1.1", - "xunit.performance.api": "1.0.0-beta-build0003", - "xunit.performance.core": "1.0.0-beta-build0003", - "xunit.performance.execution": "1.0.0-beta-build0003", - "xunit.performance.metrics": "1.0.0-beta-build0003", + "xunit.performance.api": "1.0.0-beta-build0004", + "xunit.performance.core": "1.0.0-beta-build0004", + "xunit.performance.execution": "1.0.0-beta-build0004", + "xunit.performance.metrics": "1.0.0-beta-build0004", "Microsoft.Diagnostics.Tracing.TraceEvent": "1.0.3-alpha-experimental", "Microsoft.NETCore.Platforms": "2.0.0-preview2-25305-01", "System.Console": "4.4.0-beta-24913-02", diff --git a/tests/src/JIT/config/benchmark+serialize/project.json b/tests/src/JIT/config/benchmark+serialize/project.json index e450d6156f..5c20a0970c 100644 --- a/tests/src/JIT/config/benchmark+serialize/project.json +++ b/tests/src/JIT/config/benchmark+serialize/project.json @@ -1,9 +1,9 @@ { "dependencies": { - "xunit.performance.api": "1.0.0-beta-build0003", - "xunit.performance.core": "1.0.0-beta-build0003", - "xunit.performance.execution": "1.0.0-beta-build0003", - "xunit.performance.metrics": "1.0.0-beta-build0003", + "xunit.performance.api": "1.0.0-beta-build0004", + "xunit.performance.core": "1.0.0-beta-build0004", + "xunit.performance.execution": "1.0.0-beta-build0004", + "xunit.performance.metrics": "1.0.0-beta-build0004", "Microsoft.Diagnostics.Tracing.TraceEvent": "1.0.3-alpha-experimental", "Microsoft.NETCore.Platforms": "2.0.0-preview2-25305-01", "Newtonsoft.Json": "7.0.1", diff --git a/tests/src/JIT/config/benchmark/project.json b/tests/src/JIT/config/benchmark/project.json index 9c41a91713..5f1fde0130 100644 --- a/tests/src/JIT/config/benchmark/project.json +++ b/tests/src/JIT/config/benchmark/project.json @@ -1,9 +1,9 @@ { "dependencies": { - "xunit.performance.api": "1.0.0-beta-build0003", - "xunit.performance.core": "1.0.0-beta-build0003", - "xunit.performance.execution": "1.0.0-beta-build0003", - "xunit.performance.metrics": "1.0.0-beta-build0003", + "xunit.performance.api": "1.0.0-beta-build0004", + "xunit.performance.core": "1.0.0-beta-build0004", + "xunit.performance.execution": "1.0.0-beta-build0004", + "xunit.performance.metrics": "1.0.0-beta-build0004", "Microsoft.Diagnostics.Tracing.TraceEvent": "1.0.3-alpha-experimental", "Microsoft.NETCore.Platforms": "2.0.0-preview2-25305-01", "System.Collections.NonGeneric": "4.4.0-beta-24913-02", |