diff options
-rw-r--r-- | perf.groovy | 42 | ||||
-rw-r--r-- | tests/scripts/run-xunit-perf.cmd | 545 | ||||
-rwxr-xr-x | tests/scripts/run-xunit-perf.py | 32 | ||||
-rwxr-xr-x | tests/scripts/run-xunit-perf.sh | 439 |
4 files changed, 33 insertions, 1025 deletions
diff --git a/perf.groovy b/perf.groovy index 0efcaba19f..5497316398 100644 --- a/perf.groovy +++ b/perf.groovy @@ -79,16 +79,16 @@ def static getOSGroup(def os) { batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly") - def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\"" + def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -stabilityPrefix \"START \\\"CORECLR_PERF_RUN\\\" /B /WAIT /HIGH /AFFINITY 0x2\"" // Run with just stopwatch: Profile=Off - batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library") - batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality") + batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library") + batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality") // Run with the full set of counters enabled: Profile=On if (opt_level != 'min_opt') { - batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi") - batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi") + batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi") + batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi") } } } @@ -266,11 +266,12 @@ def static getFullPerfJobName(def project, def os, def isPR) { def newBuildJob = job(fullBuildJobName) { steps { shell("./build.sh verbose ${architecture} ${configuration}") + shell("./build-test.sh generatelayoutonly ${architecture} ${configuration}") } } Utilities.setMachineAffinity(newBuildJob, os, 'latest-or-auto') Utilities.standardJobSetup(newBuildJob, project, isPR, "*/${branch}") - Utilities.addArchival(newBuildJob, "bin/Product/**,bin/obj/*/tests/**/*.dylib,bin/obj/*/tests/**/*.so", "bin/Product/**/.nuget/**") + Utilities.addArchival(newBuildJob, "bin/Product/**,bin/obj/*/tests/**/*.dylib,bin/obj/*/tests/**/*.so,bin/tests/**", "bin/Product/**/.nuget/**") } @@ -302,9 +303,12 @@ def static getFullPerfJobName(def project, def os, def isPR) { def osGroup = getOSGroup(os) def runType = isPR ? 'private' : 'rolling' def benchViewName = isPR ? 'coreclr private \$BenchviewCommitName' : 'coreclr rolling \$GIT_BRANCH_WITHOUT_ORIGIN \$GIT_COMMIT' + def uploadString = '-uploadToBenchview' + + def runXUnitCommonArgs = "-arch ${architecture} -os Ubuntu16.04 -configuration ${configuration} -stabilityPrefix \"taskset 0x00000002 nice --adjustment=-10\" -generateBenchviewData \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\" ${uploadString} -runtype ${runType} -outputdir \"\${WORKSPACE}/bin/sandbox_logs\"" steps { - shell("./tests/scripts/perf-prep.sh") + shell("./tests/scripts/perf-prep.sh --nocorefx") shell("./init-tools.sh") copyArtifacts(fullBuildJobName) { includePatterns("bin/**") @@ -315,24 +319,12 @@ def static getFullPerfJobName(def project, def os, def isPR) { shell("GIT_BRANCH_WITHOUT_ORIGIN=\$(echo \$GIT_BRANCH | sed \"s/[^/]*\\/\\(.*\\)/\\1 /\")\n" + "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/submission-metadata.py\" --name \" ${benchViewName} \" --user-email \"dotnet-bot@microsoft.com\"\n" + "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/build.py\" git --branch \$GIT_BRANCH_WITHOUT_ORIGIN --type ${runType}") - shell("""./tests/scripts/run-xunit-perf.sh \\ - --testRootDir=\"\${WORKSPACE}/bin/tests/Windows_NT.${architecture}.${configuration}\" \\ - --testNativeBinDir=\"\${WORKSPACE}/bin/obj/${osGroup}.${architecture}.${configuration}/tests\" \\ - --coreClrBinDir=\"\${WORKSPACE}/bin/Product/${osGroup}.${architecture}.${configuration}\" \\ - --mscorlibDir=\"\${WORKSPACE}/bin/Product/${osGroup}.${architecture}.${configuration}\" \\ - --coreFxBinDir=\"\${WORKSPACE}/corefx\" \\ - --runType=\"${runType}\" \\ - --benchViewOS=\"${os}\" \\ - --generatebenchviewdata=\"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\" \\ - --stabilityPrefix=\"taskset 0x00000002 nice --adjustment=-10\" \\ - --uploadToBenchview""") - shell("mkdir -p bin/toArchive/sandbox/Logs/") - shell("rsync -a bin/sandbox/Logs/Perf-*.* bin/toArchive/sandbox/Logs/") + shell("""python3 ./tests/scripts/run-xunit-perf.py -testBinLoc bin/tests/Windows_NT.${architecture}.${configuration}/JIT/Performance/CodeQuality ${runXUnitCommonArgs}""") } } def archiveSettings = new ArchivalSettings() - archiveSettings.addFiles('bin/toArchive/**') + archiveSettings.addFiles('bin/sandbox_logs/**') archiveSettings.addFiles('machinedata.json') Utilities.addArchival(newJob, archiveSettings) @@ -590,14 +582,14 @@ parallel( batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly") - def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH\" -scenarioTest" + def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -stabilityPrefix \"START \\\"CORECLR_PERF_RUN\\\" /B /WAIT /HIGH\" -scenarioTest" // Profile=Off - batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios") + batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios") // Profile=On if (opt_level != 'min_opt') { - batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios -collectionFlags BranchMispredictions+CacheMisses+InstructionRetired") + batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios -collectionFlags BranchMispredictions+CacheMisses+InstructionRetired") } } } @@ -790,7 +782,7 @@ parallel( def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -scenarioTest" // Scenario: ILLink - batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\linkbench\\linkbench -group ILLink -nowarmup") + batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\linkbench\\linkbench -group ILLink -nowarmup") } } diff --git a/tests/scripts/run-xunit-perf.cmd b/tests/scripts/run-xunit-perf.cmd deleted file mode 100644 index d41aabeb43..0000000000 --- a/tests/scripts/run-xunit-perf.cmd +++ /dev/null @@ -1,545 +0,0 @@ -@rem Licensed to the .NET Foundation under one or more agreements. -@rem The .NET Foundation licenses this file to you under the MIT license. -@rem See the LICENSE file in the project root for more information. - -@if not defined _echo echo off - -setlocal ENABLEDELAYEDEXPANSION - set ERRORLEVEL= - set DOTNET_MULTILEVEL_LOOKUP=0 - set UseSharedCompilation=false - - set BENCHVIEW_RUN_TYPE=local - set CORECLR_REPO=%CD% - set LV_SANDBOX_DIR=%CORECLR_REPO%\bin\sandbox - set LV_SANDBOX_OUTPUT_DIR=%LV_SANDBOX_DIR%\Logs - set TEST_FILE_EXT=exe - set TEST_ARCH=x64 - set TEST_ARCHITECTURE=x64 - set TEST_CONFIG=Release - set IS_SCENARIO_TEST= - set USAGE_DISPLAYED= - set SHOULD_UPLOAD_TO_BENCHVIEW= - set BENCHVIEW_PATH= - set COLLECTION_FLAGS=stopwatch - set ETW_COLLECTION=Off - set STABILITY_PREFIX= - set BENCHVIEW_GROUP=CoreCLR - set HAS_WARMUP_RUN=--drop-first-value - set BETTER=desc - set OPT_LEVEL=full_opt - set VALID_OPTLEVELS=min_opt full_opt tiered - - call :parse_command_line_arguments %* - if defined USAGE_DISPLAYED exit /b %ERRORLEVEL% - - call :is_valid_optlevel || exit /b 1 - call :set_test_architecture || exit /b 1 - call :set_collection_config || exit /b 1 - call :verify_benchview_tools || exit /b 1 - call :verify_core_overlay || exit /b 1 - call :setup_sandbox || exit /b 1 - call :set_perf_run_log || exit /b 1 - call :build_perfharness || exit /b 1 - - call :run_cmd xcopy /sy "%CORECLR_REPO%\bin\tests\Windows_NT.%TEST_ARCH%.%TEST_CONFIG%\Tests\Core_Root"\* . >> %RUNLOG% || exit /b 1 - - rem find and stage the tests - set /A "LV_FAILURES=0" - for /R %CORECLR_PERF% %%T in (*.%TEST_FILE_EXT%) do ( - call :run_benchmark %%T || ( - set /A "LV_FAILURES+=1" - ) - ) - - if not defined JIT_NAME ( - set JIT_NAME=ryujit - ) - if not defined PGO_OPTIMIZED ( - set PGO_OPTIMIZED=pgo - ) - - rem optionally upload results to benchview - if not [%BENCHVIEW_PATH%] == [] ( - call :upload_to_benchview || exit /b 1 - ) - - rem Numbers are limited to 32-bits of precision (Int32.MAX == 2^32 - 1). - if %LV_FAILURES% NEQ 0 ( - call :print_error %LV_FAILURES% benchmarks has failed. - exit /b %LV_FAILURES% - ) - - exit /b %ERRORLEVEL% - -:run_benchmark -rem **************************************************************************** -rem Executes the xUnit Performance benchmarks -rem **************************************************************************** -setlocal - set BENCHNAME=%~n1 - set BENCHDIR=%~p1 - - rem copy benchmark and any input files - call :run_cmd xcopy /sy %~1 . >> %RUNLOG% || exit /b 1 - if exist "%BENCHDIR%*.txt" ( - call :run_cmd xcopy /sy %BENCHDIR%*.txt . >> %RUNLOG% || exit /b 1 - ) - - rem setup additional environment variables - if DEFINED TEST_ENV ( - if EXIST "%TEST_ENV%" ( - call "%TEST_ENV%" - ) - ) - - call :setup_optimization_level - - rem CORE_ROOT environment variable is used by some benchmarks such as Roslyn / CscBench. - set CORE_ROOT=%LV_SANDBOX_DIR% - set LV_RUNID=Perf-%ETW_COLLECTION% - - if defined IS_SCENARIO_TEST ( - set "LV_BENCHMARK_OUTPUT_DIR=%LV_SANDBOX_OUTPUT_DIR%\Scenarios" - ) else ( - set "LV_BENCHMARK_OUTPUT_DIR=%LV_SANDBOX_OUTPUT_DIR%\Microbenchmarks" - ) - set "LV_BENCHMARK_OUTPUT_DIR=%LV_BENCHMARK_OUTPUT_DIR%\%ETW_COLLECTION%\%BENCHNAME%" - - set BENCHNAME_LOG_FILE_NAME=%LV_BENCHMARK_OUTPUT_DIR%\%LV_RUNID%-%BENCHNAME%.log - - if not defined LV_BENCHMARK_OUTPUT_DIR ( - call :print_error LV_BENCHMARK_OUTPUT_DIR was not defined. - exit /b 1 - ) - if not exist "%LV_BENCHMARK_OUTPUT_DIR%" mkdir "%LV_BENCHMARK_OUTPUT_DIR%" - if not exist "%LV_BENCHMARK_OUTPUT_DIR%" ( - call :print_error Failed to create the "%LV_BENCHMARK_OUTPUT_DIR%" directory. - exit /b 1 - ) - - echo/ - echo/ ---------- - echo/ Running %LV_RUNID% %BENCHNAME% - echo/ ---------- - - set "LV_COMMON_ARGS="%LV_SANDBOX_DIR%\%BENCHNAME%.%TEST_FILE_EXT%" --perf:outputdir "%LV_BENCHMARK_OUTPUT_DIR%" --perf:runid "%LV_RUNID%"" - if defined IS_SCENARIO_TEST ( - set "LV_COMMON_ARGS=%LV_COMMON_ARGS% --target-architecture "%TEST_ARCHITECTURE%"" - ) else ( - set "LV_COMMON_ARGS=PerfHarness.dll %LV_COMMON_ARGS%" - ) - - set "LV_CMD=%STABILITY_PREFIX% corerun.exe %LV_COMMON_ARGS% --perf:collect %COLLECTION_FLAGS%" - call :print_to_console $ !LV_CMD! - call :run_cmd !LV_CMD! 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1 - - IF %ERRORLEVEL% NEQ 0 ( - call :print_error corerun.exe exited with %ERRORLEVEL% code. - if exist "%BENCHNAME_LOG_FILE_NAME%" type "%BENCHNAME_LOG_FILE_NAME%" - exit /b 1 - ) - - rem optionally generate results for benchview - if exist "%BENCHVIEW_PATH%" ( - call :generate_results_for_benchview || exit /b 1 - ) - - exit /b 0 - -:parse_command_line_arguments -rem **************************************************************************** -rem Parses the script's command line arguments. -rem **************************************************************************** - IF /I [%~1] == [-testBinLoc] ( - set CORECLR_PERF=%CORECLR_REPO%\%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-stabilityPrefix] ( - set STABILITY_PREFIX=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-scenarioTest] ( - set IS_SCENARIO_TEST=1 - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-uploadtobenchview] ( - set SHOULD_UPLOAD_TO_BENCHVIEW=1 - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-nowarmup] ( - set HAS_WARMUP_RUN= - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-better] ( - set BETTER=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-runtype] ( - set BENCHVIEW_RUN_TYPE=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-collectionflags] ( - set COLLECTION_FLAGS=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-library] ( - set TEST_FILE_EXT=dll - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-generatebenchviewdata] ( - set BENCHVIEW_PATH=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-arch] ( - set TEST_ARCHITECTURE=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-testEnv] ( - set TEST_ENV=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-optLevel] ( - set OPT_LEVEL=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-jitName] ( - set JIT_NAME=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-nopgo] ( - set PGO_OPTIMIZED=nopgo - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-configuration] ( - set TEST_CONFIG=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-group] ( - set BENCHVIEW_GROUP=%~2 - shift - shift - goto :parse_command_line_arguments - ) - IF /I [%~1] == [-outputdir] ( - set LV_SANDBOX_OUTPUT_DIR=%~2 - shift - shift - goto :parse_command_line_arguments - ) - - if /I [%~1] == [-?] ( - call :USAGE - exit /b 0 - ) - if /I [%~1] == [-help] ( - call :USAGE - exit /b 0 - ) - - if not defined CORECLR_PERF call :USAGE - if not exist "%CORECLR_PERF%" ( - call :print_error Specified testBinLoc: "%CORECLR_PERF%" does not exist. - call :USAGE - ) - - exit /b %ERRORLEVEL% - -:set_test_architecture -rem **************************************************************************** -rem Sets the test architecture. -rem **************************************************************************** - set TEST_ARCH=%TEST_ARCHITECTURE% - exit /b 0 - -:verify_benchview_tools -rem **************************************************************************** -rem Verifies that the path to the benchview tools is correct. -rem **************************************************************************** - if defined BENCHVIEW_PATH ( - if not exist "%BENCHVIEW_PATH%" ( - call :print_error BenchView path: "%BENCHVIEW_PATH%" was specified, but it does not exist. - exit /b 1 - ) - ) - exit /b 0 - -:verify_core_overlay -rem **************************************************************************** -rem Verify that the Core_Root folder exist. -rem **************************************************************************** - set CORECLR_OVERLAY=%CORECLR_REPO%\bin\tests\Windows_NT.%TEST_ARCH%.%TEST_CONFIG%\Tests\Core_Root - if NOT EXIST "%CORECLR_OVERLAY%" ( - call :print_error Can't find test overlay directory '%CORECLR_OVERLAY%'. Please build and run Release CoreCLR tests. - exit /B 1 - ) - exit /b 0 - -:set_collection_config -rem **************************************************************************** -rem Set's the config based on the providers used for collection -rem **************************************************************************** - if /I [%COLLECTION_FLAGS%] == [stopwatch] ( - set ETW_COLLECTION=Off - ) else ( - set ETW_COLLECTION=On - ) - exit /b 0 - -:set_perf_run_log -rem **************************************************************************** -rem Sets the script's output log file. -rem **************************************************************************** - if NOT EXIST "%LV_SANDBOX_OUTPUT_DIR%" mkdir "%LV_SANDBOX_OUTPUT_DIR%" - if NOT EXIST "%LV_SANDBOX_OUTPUT_DIR%" ( - call :print_error Cannot create the Logs folder "%LV_SANDBOX_OUTPUT_DIR%". - exit /b 1 - ) - set "RUNLOG=%LV_SANDBOX_OUTPUT_DIR%\perfrun.log" - exit /b 0 - -:setup_sandbox -rem **************************************************************************** -rem Creates the sandbox folder used by the script to copy binaries locally, -rem and execute benchmarks. -rem **************************************************************************** - if not defined LV_SANDBOX_DIR ( - call :print_error LV_SANDBOX_DIR was not defined. - exit /b 1 - ) - - if exist "%LV_SANDBOX_DIR%" rmdir /s /q "%LV_SANDBOX_DIR%" - if exist "%LV_SANDBOX_DIR%" ( - call :print_error Failed to remove the "%LV_SANDBOX_DIR%" folder - exit /b 1 - ) - - if not exist "%LV_SANDBOX_DIR%" mkdir "%LV_SANDBOX_DIR%" - if not exist "%LV_SANDBOX_DIR%" ( - call :print_error Failed to create the "%LV_SANDBOX_DIR%" folder. - exit /b 1 - ) - - cd "%LV_SANDBOX_DIR%" - exit /b %ERRORLEVEL% - -:build_perfharness -rem **************************************************************************** -rem Restores and publish the PerfHarness. -rem **************************************************************************** - call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" --info || ( - call :print_error Failed to get information about the CLI tool. - exit /b 1 - ) - call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" || ( - call :print_error Failed to restore PerfHarness.csproj - exit /b 1 - ) - call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" publish "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" -c Release -o "%LV_SANDBOX_DIR%" || ( - call :print_error Failed to publish PerfHarness.csproj - exit /b 1 - ) - exit /b 0 - -:generate_results_for_benchview -rem **************************************************************************** -rem Generates results for BenchView, by appending new data to the existing -rem measurement.json file. -rem **************************************************************************** - if not defined LV_RUNID ( - call :print_error LV_RUNID was not defined before calling generate_results_for_benchview. - exit /b 1 - ) - set BENCHVIEW_MEASUREMENT_PARSER=xunit - if defined IS_SCENARIO_TEST set BENCHVIEW_MEASUREMENT_PARSER=xunitscenario - - set LV_MEASUREMENT_ARGS= - set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% %BENCHVIEW_MEASUREMENT_PARSER% - set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --better %BETTER% - set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% %HAS_WARMUP_RUN% - set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --append - - rem Currently xUnit Performance Api saves the scenario output - rem files on the current working directory. - set "LV_PATTERN=%LV_BENCHMARK_OUTPUT_DIR%\%LV_RUNID%-*.xml" - for %%f in (%LV_PATTERN%) do ( - if exist "%%~f" ( - call :run_cmd py.exe "%BENCHVIEW_PATH%\measurement.py" %LV_MEASUREMENT_ARGS% "%%~f" || ( - call :print_error - type "%%~f" - exit /b 1 - ) - ) - ) - -endlocal& exit /b %ERRORLEVEL% - -:upload_to_benchview -rem **************************************************************************** -rem Generates BenchView's submission data and upload it -rem **************************************************************************** -setlocal - if not exist measurement.json ( - call :print_error measurement.json does not exist. There is no data to be uploaded. - exit /b 1 - ) - - set LV_SUBMISSION_ARGS= - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --build "%CORECLR_REPO%\build.json" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machine-data "%CORECLR_REPO%\machinedata.json" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --metadata "%CORECLR_REPO%\submission-metadata.json" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --group "%BENCHVIEW_GROUP%" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --type "%BENCHVIEW_RUN_TYPE%" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config-name "%TEST_CONFIG%" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config Configuration "%TEST_CONFIG%" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config OS "Windows_NT" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config Profile "%ETW_COLLECTION%" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config OptLevel "%OPT_LEVEL%" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config JitName "%JIT_NAME%" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config PGO "%PGO_OPTIMIZED%" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --architecture "%TEST_ARCHITECTURE%" - set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machinepool "PerfSnake" - - call :run_cmd py.exe "%BENCHVIEW_PATH%\submission.py" measurement.json %LV_SUBMISSION_ARGS% - - IF %ERRORLEVEL% NEQ 0 ( - call :print_error Creating BenchView submission data failed. - exit /b 1 - ) - - if defined SHOULD_UPLOAD_TO_BENCHVIEW ( - call :run_cmd py.exe "%BENCHVIEW_PATH%\upload.py" submission.json --container coreclr - IF !ERRORLEVEL! NEQ 0 ( - call :print_error Uploading to BenchView failed. - exit /b 1 - ) - ) - exit /b %ERRORLEVEL% - -:USAGE -rem **************************************************************************** -rem Script's usage. -rem **************************************************************************** - set USAGE_DISPLAYED=1 - echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-generateBenchviewData] ^<path_to_benchview_tools^> [-warmup] [-better] ^<asc ^| desc^> [-group] ^<group^> [-runtype] ^<rolling^|private^> [-scenarioTest] [-collectionFlags] ^<default^+CacheMisses^+InstructionRetired^+BranchMispredictions^+gcapi^> [-outputdir] ^<outputdir^> [-optLevel] ^<%VALID_OPTLEVELS: =^|%^> - echo/ - echo For the path to the tests you can pass a parent directory and the script will grovel for - echo all tests in subdirectories and run them. - echo The library flag denotes whether the tests are build as libraries (.dll) or an executable (.exe) - echo Architecture defaults to x64 and configuration defaults to release. - echo -generateBenchviewData is used to specify a path to the Benchview tooling and when this flag is - echo set we will generate the results for upload to benchview. - echo -uploadToBenchview If this flag is set the generated benchview test data will be uploaded. - echo -nowarmup specifies not to discard the results of the first run - echo -better whether it is better to have ascending or descending numbers for the benchmark - echo -group specifies the Benchview group to which this data should be uploaded (default CoreCLR) - echo Runtype sets the runtype that we upload to Benchview, rolling for regular runs, and private for - echo PRs. - echo -scenarioTest should be included if you are running a scenario benchmark. - echo -outputdir Specifies the directory where the generated performance output will be saved. - echo -collectionFlags This is used to specify what collectoin flags get passed to the performance - echo -optLevel Specifies the optimization level to be used by the jit. - echo harness that is doing the test running. If this is not specified we only use stopwatch. - echo Other flags are "default", which is the whatever the test being run specified, "CacheMisses", - echo "BranchMispredictions", and "InstructionsRetired". - exit /b %ERRORLEVEL% - -:print_error -rem **************************************************************************** -rem Function wrapper that unifies how errors are output by the script. -rem Functions output to the standard error. -rem **************************************************************************** - call :print_to_console [ERROR] %* 1>&2 - exit /b %ERRORLEVEL% - -:print_to_console -rem **************************************************************************** -rem Sends text to the console screen. This can be useful to provide -rem information on where the script is executing. -rem **************************************************************************** - echo/ - echo/%USERNAME%@%COMPUTERNAME% "%CD%" - echo/[%DATE%][%TIME:~0,-3%] %* - exit /b %ERRORLEVEL% - -:is_valid_optlevel -rem **************************************************************************** -rem Validates the optlevel flag set by the user. -rem **************************************************************************** -setlocal - if not defined OPT_LEVEL ( - call :print_error OPT_LEVEL is undefined. - exit /b 1 - ) - - set "LV_IS_VALID_OPTLEVEL=" - for %%i in (%VALID_OPTLEVELS%) do ( - if /i "%%~i" == "%OPT_LEVEL%" ( - set "LV_IS_VALID_OPTLEVEL=1" - ) - ) - - if not defined LV_IS_VALID_OPTLEVEL ( - call :print_error Unknown OPT_LEVEL=%OPT_LEVEL% - exit /b 1 - ) -endlocal& exit /b 0 - -:setup_optimization_level -rem **************************************************************************** -rem Setup the appropriate environment variables needed for the selected -rem optlevel. -rem **************************************************************************** - set "COMPlus_JITMinOpts=" - set "COMPLUS_EXPERIMENTAL_TieredCompilation=" - - if /I "%OPT_LEVEL%" == "min_opt" ( - set COMPlus_JITMinOpts=1 - exit /b 0 - ) - if /I "%OPT_LEVEL%" == "tiered" ( - set COMPLUS_EXPERIMENTAL_TieredCompilation=1 - exit /b 0 - ) -exit /b 0 - -:run_cmd -rem **************************************************************************** -rem Function wrapper used to send the command line being executed to the -rem console screen, before the command is executed. -rem **************************************************************************** - if "%~1" == "" ( - call :print_error No command was specified. - exit /b 1 - ) - - call :print_to_console $ %* - call %* - exit /b %ERRORLEVEL% diff --git a/tests/scripts/run-xunit-perf.py b/tests/scripts/run-xunit-perf.py index c23b8bed57..33e7cc7f74 100755 --- a/tests/scripts/run-xunit-perf.py +++ b/tests/scripts/run-xunit-perf.py @@ -148,8 +148,9 @@ def run_command(runArgs, environment, errorMessage): try: subprocess.check_output(runArgs, stderr=subprocess.PIPE, env=environment) except subprocess.CalledProcessError as e: + log(errorMessage) log(e.output.decode('utf-8')) - raise RuntimeException(errorMessage); + raise ########################################################################## # Execution Functions @@ -214,7 +215,6 @@ def run_benchmark(benchname, benchdir, env, sandboxDir, benchmarkOutputDir, test log(" ".join(runArgs)) error = 0 - expectedOutputFile = os.path.join(benchmarkOutputDir, lvRunId + '-' + benchname + '.xml') with open(benchnameLogFileName, 'wb') as out: proc = subprocess.Popen(' '.join(runArgs), shell=True, stdout=out, stderr=out, env=myEnv) proc.communicate() @@ -226,9 +226,6 @@ def run_benchmark(benchname, benchdir, env, sandboxDir, benchmarkOutputDir, test if os.path.isfile(benchnameLogFileName): with open(benchnameLogFileName, 'r') as f: print(f.read()) - return error - elif not os.path.isfile(expectedOutputFile): - log("CoreRun.exe failed to generate results in %s." % expectedOutputFile) return 1 return 0 @@ -245,18 +242,18 @@ def generate_results_for_benchview(python, lvRunId, benchname, isScenarioTest, b benchviewPath (str): path to benchview tools """ benchviewMeasurementParser = 'xunitscenario' if isScenarioTest else 'xunit' - warmupRun = '--drop-first-value' if hasWarmupRun else '' lvMeasurementArgs = [benchviewMeasurementParser, '--better', - better, - warmupRun, - '--append'] + better] + if hasWarmupRun: + lvMeasurementArgs = lvMeasurementArgs + ['--drop-first-value'] - filename = os.path.join(benchmarkOutputDir, lvRunId + '-' + benchname + '.xml') + lvMeasurementArgs = lvMeasurementArgs + ['--append'] - runArgs = [python, os.path.join(benchviewPath, 'measurement.py')] + lvMeasurementArgs + [filename] - - run_command(runArgs, os.environ, 'Call to %s failed' % runArgs[1]) + files = glob.iglob(os.path.join(benchmarkOutputDir, "*.xml")) + for filename in files: + runArgs = [python, os.path.join(benchviewPath, 'measurement.py')] + lvMeasurementArgs + [filename] + run_command(runArgs, os.environ, 'Call to %s failed' % runArgs[1]) def upload_to_benchview(python, coreclrRepo, benchviewPath, uploadToBenchview, benchviewGroup, runType, configuration, operatingSystem, etwCollection, optLevel, jitName, pgoOptimized, architecture): """ Upload results to benchview @@ -438,6 +435,7 @@ def main(args): myEnv = dict(os.environ) myEnv['DOTNET_MULTILEVEL_LOOKUP'] = '0' myEnv['UseSharedCompilation'] = 'false' + myEnv['CORECLR_REPO'] = coreclrRepo # Setup directories log('Setting up directories') @@ -475,12 +473,13 @@ def main(args): else: # If slice was not specified, run everything in the coreclrPerf directory. Set benchmarks to an empty string - benchmarks = [{ 'directory' : '', 'extraFlags': ''}] + benchmarks = [{ 'directory' : '', 'extraFlags': '-library' if isLibrary else ''}] testFileExt = 'dll' if isLibrary else 'exe' # Run benchmarks failures = 0 + totalBenchmarks = 0 lvRunId = 'Perf-%s' % etwCollection for benchmark in benchmarks: @@ -492,6 +491,7 @@ def main(args): for root, dirs, files in os.walk(testPath): for f in files: if f.endswith(testFileExt): + totalBenchmarks += 1 benchname, ext = os.path.splitext(f) benchmarkOutputDir = os.path.join(sandboxOutputDir, 'Scenarios') if isScenarioTest else os.path.join(sandboxOutputDir, 'Microbenchmarks') @@ -505,8 +505,8 @@ def main(args): # Setup variables for uploading to benchview pgoOptimized = 'pgo' if isPgoOptimized else 'nopgo' - # Upload to benchview - if benchviewPath is not None: + # Upload to benchview only if we did not fail all benchmarks + if benchviewPath is not None and failures != totalBenchmarks: upload_to_benchview(python, coreclrRepo, benchviewPath, uploadToBenchview, benchviewGroup, runType, configuration, operatingSystem, etwCollection, optLevel, jitName, pgoOptimized, arch) if failures != 0: diff --git a/tests/scripts/run-xunit-perf.sh b/tests/scripts/run-xunit-perf.sh deleted file mode 100755 index 51185dcfa8..0000000000 --- a/tests/scripts/run-xunit-perf.sh +++ /dev/null @@ -1,439 +0,0 @@ -#!/usr/bin/env bash - -dp0=$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) - -function run_command { - echo "" - echo $USER@`hostname` "$PWD" - echo `date +"[%m/%d/%Y %H:%M:%S]"`" $ $@" - "$@" - return $? -} - -function print_usage { - echo '' - echo 'CoreCLR perf test script on Linux.' - echo '' - echo 'Typical command line:' - echo '' - echo 'coreclr/tests/scripts/run-xunit-perf.sh' - echo ' --testRootDir="temp/Windows_NT.x64.Debug"' - echo ' --testNativeBinDir="coreclr/bin/obj/Linux.x64.Debug/tests"' - echo ' --coreClrBinDir="coreclr/bin/Product/Linux.x64.Debug"' - echo ' --mscorlibDir="windows/coreclr/bin/Product/Linux.x64.Debug"' - echo ' --coreFxBinDir="corefx/bin/Linux.AnyCPU.Debug"' - echo '' - echo 'Required arguments:' - echo ' --testRootDir=<path> : Root directory of the test build (e.g. coreclr/bin/tests/Windows_NT.x64.Debug).' - echo ' --testNativeBinDir=<path> : Directory of the native CoreCLR test build (e.g. coreclr/bin/obj/Linux.x64.Debug/tests).' - echo ' (Also required: Either --coreOverlayDir, or all of the switches --coreOverlayDir overrides)' - echo '' - echo 'Optional arguments:' - echo ' --coreOverlayDir=<path> : Directory containing core binaries and test dependencies. If not specified, the' - echo ' default is testRootDir/Tests/coreoverlay. This switch overrides --coreClrBinDir,' - echo ' --mscorlibDir, and --coreFxBinDir.' - echo ' --coreClrBinDir=<path> : Directory of the CoreCLR build (e.g. coreclr/bin/Product/Linux.x64.Debug).' - echo ' --mscorlibDir=<path> : Directory containing the built mscorlib.dll. If not specified, it is expected to be' - echo ' in the directory specified by --coreClrBinDir.' - echo ' --coreFxBinDir="<path>" : The path to the unpacked runtime folder that is produced as part of a CoreFX build' - echo ' --generatebenchviewdata : BenchView tools directory.' - echo ' --uploadToBenchview : Specify this flag in order to have the results of the run uploaded to Benchview.' - echo ' This requires that the generatebenchviewdata, os and runtype flags to be set, and' - echo ' also have the BV_UPLOAD_SAS_TOKEN set to a SAS token for the Benchview upload container' - echo ' --benchViewOS=<os> : Specify the os that will be used to insert data into Benchview.' - echo ' --runType=<local|private|rolling> : Specify the runType for Benchview. [Default: local]' - echo ' --outputdir : Specifies the directory where the generated performance output will be saved.' - echo ' --optLevel=<min_opt|full_opt|tiered>' - echo ' : Specifies the optimization level to be used by the jit.' -} - -# libExtension determines extension for dynamic library files -OSName=$(uname -s) -libExtension= -case $OSName in - Darwin) - libExtension="dylib" - ;; - - Linux) - libExtension="so" - ;; - - NetBSD) - libExtension="so" - ;; - - *) - echo "Unsupported OS $OSName detected, configuring as if for Linux" - libExtension="so" - ;; -esac - -function exit_with_error { - local errorSource=$1 - local errorMessage=$2 - local printUsage=$3 - - if [ -z "$printUsage" ]; then - ((printUsage = 0)) - fi - - echo "$errorMessage" - if ((printUsage != 0)); then - print_usage - fi - - echo "Exiting script with error code: $EXIT_CODE_EXCEPTION" - exit $EXIT_CODE_EXCEPTION -} - -# Handle Ctrl-C. We will stop execution and print the results that -# we gathered so far. -function handle_ctrl_c { - local errorSource='handle_ctrl_c' - - echo "" - echo "*** Stopping... ***" - print_results - exit_with_error "$errorSource" "Test run aborted by Ctrl+C." -} - -function is_valid_optlevel { - if [ -z "$optLevel" ]; then - echo "[ERROR] --optLevel is required." - return 1 - fi - - declare -A valid_optlevels=( - [min_opt]=1 [full_opt]=1 [tiered]=1 - ) - [[ -n "${valid_optlevels[$optLevel]}" ]] || { - echo "[ERROR] Specified an unknown optLevel=$optLevel"; - return 1; - } - return 0 -} - -function setup_optimization_level { - unset COMPlus_JITMinOpts - unset COMPLUS_EXPERIMENTAL_TieredCompilation - - if [ "$optLevel" == "min_opt" ]; then - export COMPlus_JITMinOpts=1 - return 0 - fi - if [ "$optLevel" == "tiered" ]; then - export COMPLUS_EXPERIMENTAL_TieredCompilation=1 - return 0 - fi - return 0 -} - -# Register the Ctrl-C handler -trap handle_ctrl_c INT - -function create_core_overlay { - local errorSource='create_core_overlay' - local printUsage=1 - - if [ -n "$coreOverlayDir" ]; then - export CORE_ROOT="$coreOverlayDir" - return 0 - fi - - # Check inputs to make sure we have enough information to create the core - # layout. $testRootDir/Tests/Core_Root should already exist and contain test - # dependencies that are not built. - local testDependenciesDir=$testRootDir/Tests/Core_Root - if [ ! -d "$testDependenciesDir" ]; then - exit_with_error "$errorSource" "Did not find the test dependencies directory: $testDependenciesDir" - fi - if [ -z "$coreClrBinDir" ]; then - exit_with_error "$errorSource" "One of --coreOverlayDir or --coreClrBinDir must be specified." "$printUsage" - fi - if [ ! -d "$coreClrBinDir" ]; then - exit_with_error "$errorSource" "Directory specified by --coreClrBinDir does not exist: $coreClrBinDir" - fi - if [ -z "$coreFxBinDir" ]; then - exit_with_error "$errorSource" "One of --coreOverlayDir or --coreFxBinDir must be specified." "$printUsage" - fi - - # Create the overlay - coreOverlayDir=$testRootDir/Tests/coreoverlay - export CORE_ROOT="$coreOverlayDir" - if [ -e "$coreOverlayDir" ]; then - rm -rf "$coreOverlayDir" || exit 1 - fi - - mkdir "$coreOverlayDir" - - cp -f -v "$coreFxBinDir/"* "$coreOverlayDir/" || exit 2 - cp -f -p -v "$coreClrBinDir/"* "$coreOverlayDir/" # || exit 3 - if [ -d "$mscorlibDir/bin" ]; then - cp -f -v "$mscorlibDir/bin/"* "$coreOverlayDir/" || exit 4 - fi - cp -f -v "$testDependenciesDir/"xunit* "$coreOverlayDir/" || exit 5 - cp -n -v "$testDependenciesDir/"* "$coreOverlayDir/" # || exit 6 - if [ -f "$coreOverlayDir/mscorlib.ni.dll" ]; then - # Test dependencies come from a Windows build, and mscorlib.ni.dll would be the one from Windows - rm -f "$coreOverlayDir/mscorlib.ni.dll" || exit 7 - fi - if [ -f "$coreOverlayDir/System.Private.CoreLib.ni.dll" ]; then - # Test dependencies come from a Windows build, and System.Private.CoreLib.ni.dll would be the one from Windows - rm -f "$coreOverlayDir/System.Private.CoreLib.ni.dll" || exit 8 - fi - - copy_test_native_bin_to_test_root || exit 9 - - return 0 -} - -function precompile_overlay_assemblies { - - if [ "$doCrossgen" == "1" ]; then - - local overlayDir=$CORE_ROOT - - filesToPrecompile=$(ls -trh $overlayDir/*.dll) - for fileToPrecompile in ${filesToPrecompile} - do - local filename=${fileToPrecompile} - echo "Precompiling $filename" - $overlayDir/crossgen /Platform_Assemblies_Paths $overlayDir $filename 2>/dev/null - local exitCode=$? - if [ $exitCode == -2146230517 ]; then - echo "$filename is not a managed assembly." - elif [ $exitCode != 0 ]; then - echo "Unable to precompile $filename." - else - echo "Successfully precompiled $filename" - fi - done - else - echo "Skipping crossgen of FX assemblies." - fi -} - -function copy_test_native_bin_to_test_root { - local errorSource='copy_test_native_bin_to_test_root' - - if [ -z "$testNativeBinDir" ]; then - exit_with_error "$errorSource" "--testNativeBinDir is required." - fi - testNativeBinDir=$testNativeBinDir/src - if [ ! -d "$testNativeBinDir" ]; then - exit_with_error "$errorSource" "Directory specified by --testNativeBinDir does not exist: $testNativeBinDir" - fi - - # Copy native test components from the native test build into the respective test directory in the test root directory - find "$testNativeBinDir" -type f -iname '*.$libExtension' | - while IFS='' read -r filePath || [ -n "$filePath" ]; do - local dirPath=$(dirname "$filePath") - local destinationDirPath=${testRootDir}${dirPath:${#testNativeBinDir}} - if [ ! -d "$destinationDirPath" ]; then - exit_with_error "$errorSource" "Cannot copy native test bin '$filePath' to '$destinationDirPath/', as the destination directory does not exist." - fi - cp -f "$filePath" "$destinationDirPath/" - done -} - -export DOTNET_MULTILEVEL_LOOKUP=0 - -# Exit code constants -readonly EXIT_CODE_SUCCESS=0 # Script ran normally. -readonly EXIT_CODE_EXCEPTION=1 # Script exited because something exceptional happened (e.g. bad arguments, Ctrl-C interrupt). -readonly EXIT_CODE_TEST_FAILURE=2 # Script completed successfully, but one or more tests failed. - -# Argument variables -testRootDir= -testNativeBinDir= -coreOverlayDir= -coreClrBinDir= -mscorlibDir= -coreFxBinDir= -uploadToBenchview= -benchViewOS=`lsb_release -i -s``lsb_release -r -s` -runType=local -optLevel=full_opt -BENCHVIEW_TOOLS_PATH= -benchViewGroup=CoreCLR -perfCollection= -collectionflags=stopwatch -hasWarmupRun=--drop-first-value -stabilityPrefix= -benchmarksOutputDir=$dp0/../../bin/sandbox/Logs -pgoOptimized=pgo - -for i in "$@" -do - case $i in - -h|--help) - print_usage - exit $EXIT_CODE_SUCCESS - ;; - --testRootDir=*) - testRootDir=${i#*=} - ;; - --testNativeBinDir=*) - testNativeBinDir=${i#*=} - ;; - --coreOverlayDir=*) - coreOverlayDir=${i#*=} - ;; - --coreClrBinDir=*) - coreClrBinDir=${i#*=} - ;; - --mscorlibDir=*) - mscorlibDir=${i#*=} - ;; - --coreFxBinDir=*) - coreFxBinDir=${i#*=} - ;; - --benchViewOS=*) - benchViewOS=${i#*=} - ;; - --runType=*) - runType=${i#*=} - ;; - --optLevel=*) - optLevel=${i#*=} - ;; - --nopgo) - pgoOptimized=nopgo - ;; - --collectionflags=*) - collectionflags=${i#*=} - ;; - --generatebenchviewdata=*) - BENCHVIEW_TOOLS_PATH=${i#*=} - ;; - --stabilityPrefix=*) - stabilityPrefix=${i#*=} - ;; - --outputdir=*) - benchmarksOutputDir=${i#*=} - ;; - --uploadToBenchview) - uploadToBenchview=TRUE - ;; - *) - echo "Unknown switch: $i" - print_usage - exit $EXIT_CODE_EXCEPTION - ;; - esac -done - -if [ -z "$testRootDir" ]; then - echo "--testRootDir is required." - print_usage - exit $EXIT_CODE_EXCEPTION -fi -if [ ! -d "$testRootDir" ]; then - echo "Directory specified by --testRootDir does not exist: $testRootDir" - exit $EXIT_CODE_EXCEPTION -fi -if [ ! -z "$BENCHVIEW_TOOLS_PATH" ] && { [ ! -d "$BENCHVIEW_TOOLS_PATH" ]; }; then - echo BenchView path: "$BENCHVIEW_TOOLS_PATH" was specified, but it does not exist. - exit $EXIT_CODE_EXCEPTION -fi -is_valid_optlevel || exit $EXIT_CODE_EXCEPTION -if [ "$collectionflags" == "stopwatch" ]; then - perfCollection=Off -else - perfCollection=On -fi - -# Install xunit performance packages -CORECLR_REPO=$testNativeBinDir/../../../.. -DOTNETCLI_PATH=$CORECLR_REPO/Tools/dotnetcli - -export NUGET_PACKAGES=$CORECLR_REPO/packages - -# Creat coreoverlay dir which contains all dependent binaries -create_core_overlay || { echo "Creating core overlay failed."; exit 1; } -precompile_overlay_assemblies || { echo "Precompiling overlay assemblies failed."; exit 1; } - -# If the output Logs folder exist, it was from a previous run (It needs to be deleted). -if [ ! -d "$benchmarksOutputDir" ]; then - mkdir -p "$benchmarksOutputDir" || { echo "Failed to delete $benchmarksOutputDir"; exit 1; } -fi - -setup_optimization_level || exit $EXIT_CODE_EXCEPTION - -cd $CORE_ROOT - -DO_SETUP=TRUE -if [ ${DO_SETUP} == "TRUE" ]; then - # Deploy xunit performance packages - $DOTNETCLI_PATH/dotnet restore $CORECLR_REPO/tests/src/Common/PerfHarness/PerfHarness.csproj || { echo "dotnet restore failed."; exit 1; } - $DOTNETCLI_PATH/dotnet publish $CORECLR_REPO/tests/src/Common/PerfHarness/PerfHarness.csproj -c Release -o "$coreOverlayDir" || { echo "dotnet publish failed."; exit 1; } -fi - -# Run coreclr performance tests -echo "Test root dir: $testRootDir" -tests=($(find $testRootDir/JIT/Performance/CodeQuality -name '*.exe') $(find $testRootDir/performance/perflab/PerfLab -name '*.dll')) - -if [ -f measurement.json ]; then - rm measurement.json || exit $EXIT_CODE_EXCEPTION; -fi - -for testcase in ${tests[@]}; do - directory=$(dirname "$testcase") - filename=$(basename "$testcase") - filename="${filename%.*}" - - test=$(basename $testcase) - testname=$(basename $testcase .exe) - - cp $testcase . || exit 1 - if [ stat -t "$directory/$filename"*.txt 1>/dev/null 2>&1 ]; then - cp "$directory/$filename"*.txt . || exit 1 - fi - - # FIXME: We should not need this here. - chmod u+x ./corerun - - xUnitRunId=Perf-$perfCollection - perfLogFileName=$benchmarksOutputDir/$xUnitRunId-$filename.log - perfXmlFileName=$benchmarksOutputDir/$xUnitRunId-$filename.xml - - echo "" - echo "----------" - echo " Running $xUnitRunId $testname" - echo "----------" - - run_command $stabilityPrefix ./corerun PerfHarness.dll $test --perf:runid "$xUnitRunId" --perf:outputdir "$benchmarksOutputDir" --perf:collect $collectionflags 1>"$perfLogFileName" 2>&1 || exit 1 - if [ -d "$BENCHVIEW_TOOLS_PATH" ]; then - run_command python3.5 "$BENCHVIEW_TOOLS_PATH/measurement.py" xunit "$perfXmlFileName" --better desc $hasWarmupRun --append || { - echo [ERROR] Failed to generate BenchView data; - exit 1; - } - fi -done - -if [ -d "$BENCHVIEW_TOOLS_PATH" ]; then - args=measurement.json - args+=" --build $CORECLR_REPO/build.json" - args+=" --machine-data $CORECLR_REPO/machinedata.json" - args+=" --metadata $CORECLR_REPO/submission-metadata.json" - args+=" --group $benchViewGroup" - args+=" --type $runType" - args+=" --config-name Release" - args+=" --config Configuration Release" - args+=" --config OS $benchViewOS" - args+=" --config PGO $pgoOptimized" - args+=" --config Profile $perfCollection" - args+=" --config JitName ryujit" - args+=" --config OptLevel $optLevel" - args+=" --architecture x64" - args+=" --machinepool Perfsnake" - run_command python3.5 "$BENCHVIEW_TOOLS_PATH/submission.py" $args || { - echo [ERROR] Failed to generate BenchView submission data; - exit 1; - } -fi - -if [ -d "$BENCHVIEW_TOOLS_PATH" ] && { [ "$uploadToBenchview" == "TRUE" ]; }; then - run_command python3.5 "$BENCHVIEW_TOOLS_PATH/upload.py" submission.json --container coreclr -fi |