summaryrefslogtreecommitdiff
path: root/tests/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'tests/scripts')
-rwxr-xr-xtests/scripts/arm32_ci_script.sh8
-rwxr-xr-xtests/scripts/arm32_ci_test.sh16
-rw-r--r--tests/scripts/arm64_post_build.py51
-rwxr-xr-xtests/scripts/optdata/bootstrap.py15
-rw-r--r--tests/scripts/optdata/optdata.csproj29
-rw-r--r--tests/scripts/optdata/project.json12
-rw-r--r--tests/scripts/project.json2
-rw-r--r--tests/scripts/run-gc-reliability-framework.cmd10
-rwxr-xr-xtests/scripts/run-gc-reliability-framework.sh6
-rw-r--r--tests/scripts/run-throughput-perf.py6
-rw-r--r--tests/scripts/run-xunit-perf.cmd207
-rwxr-xr-xtests/scripts/run-xunit-perf.sh316
-rw-r--r--tests/scripts/scripts.csproj36
13 files changed, 434 insertions, 280 deletions
diff --git a/tests/scripts/arm32_ci_script.sh b/tests/scripts/arm32_ci_script.sh
index 209ab39869..98f35066d4 100755
--- a/tests/scripts/arm32_ci_script.sh
+++ b/tests/scripts/arm32_ci_script.sh
@@ -265,7 +265,9 @@ function cross_build_coreclr_with_docker {
# For armel Tizen, we are going to construct RootFS on the fly.
case $__linuxCodeName in
tizen)
- __dockerImage=" t2wish/dotnetcore:ubuntu1404_cross_prereqs_v4"
+ __dockerImage=" hqueue/dotnetcore:ubuntu1404_cross_prereqs_v4-tizen_rootfs"
+ __skipRootFS=1
+ __dockerEnvironmentVariables+=" -e ROOTFS_DIR=/crossrootfs/armel.tizen.build"
__runtimeOS="tizen.4.0.0"
;;
*)
@@ -381,7 +383,9 @@ function run_tests_using_docker {
elif [ "$__buildArch" == "armel" ]; then
case $__linuxCodeName in
tizen)
- __dockerImage=" t2wish/dotnetcore:ubuntu1404_cross_prereqs_v3"
+ __dockerImage=" hqueue/dotnetcore:ubuntu1404_cross_prereqs_v4-tizen_rootfs"
+ __skipRootFS=1
+ __dockerEnvironmentVariables=" -e ROOTFS_DIR=/crossrootfs/armel.tizen.test"
;;
*)
exit_with_error "ERROR: $__linuxCodeName is not a supported linux name for $__buildArch" false
diff --git a/tests/scripts/arm32_ci_test.sh b/tests/scripts/arm32_ci_test.sh
index 2f0b03150b..3fb36e12ec 100755
--- a/tests/scripts/arm32_ci_test.sh
+++ b/tests/scripts/arm32_ci_test.sh
@@ -81,25 +81,11 @@ else
fi
if [ "$__abi" == "armel" ]; then
- # TODO: Make use of a single Tizen rootfs for build and test
-
- # TODO-cleanup: the latest docker image already has mic installed.
- # Prepare Tizen (armel) environment
- #echo "deb http://download.tizen.org/tools/latest-release/Ubuntu_14.04 /" >> /etc/apt/sources.list
- #apt-get update
- #apt-get -y -qq --force-yes install mic
-
+ # Prepare armel emulation environment
pushd ${CORECLR_DIR}/cross/armel/tizen
- mic --non-interactive create fs --pack-to=tizen.tar.gz tizen-dotnet.ks
- if [ -d ${__ROOTFS_DIR} ]; then
- mv ${__ROOTFS_DIR} ${__ROOTFS_DIR}_build
- fi
- mkdir -p ${__ROOTFS_DIR}
- tar -zxf mic-output/tizen.tar.gz -C ${__ROOTFS_DIR}
apt-get update
apt-get -y -qq --force-yes --reinstall install qemu binfmt-support qemu-user-static
__qemuARM=$(which qemu-arm-static)
- cp $__qemuARM ${CORECLR_DIR}/cross/rootfs/armel/usr/bin/
cp $__qemuARM ${__ROOTFS_DIR}/usr/bin/
popd
fi
diff --git a/tests/scripts/arm64_post_build.py b/tests/scripts/arm64_post_build.py
index 4ed8032fc2..62818c433b 100644
--- a/tests/scripts/arm64_post_build.py
+++ b/tests/scripts/arm64_post_build.py
@@ -34,8 +34,7 @@ from collections import defaultdict
g_arm64ci_path = os.path.join(os.environ["USERPROFILE"], "bin")
g_dotnet_url = "https://go.microsoft.com/fwlink/?LinkID=831469"
-g_test_url = "https://clrjit.blob.core.windows.net/arm64ci/CoreCLR-Pri1Testing.zip"
-g_x64_client_url = "https://clrjit.blob.core.windows.net/arm64ci/x64_client.zip"
+g_x64_client_url = "https://clrjit.blob.core.windows.net/arm64ci/x64_client_live_tests.zip"
################################################################################
# Argument Parser
@@ -88,6 +87,34 @@ def copy_core_root(core_root):
except OSError as error:
log("Core Root not copied. Error: %s" % error)
+ sys.exit(1)
+
+ return new_location
+
+def copy_tests(test_location):
+ """ Copy the test directory to the current dir as "tests"
+ Args:
+ test_location (str): location of the tests directory
+ Returns:
+ copy_location (str): name of the location, for now hardcoded to tests
+ : for backcompat in the old system
+ """
+
+ new_location = "tests"
+
+ # Delete used instances.
+ if os.path.isdir(new_location):
+ try:
+ shutil.rmtree(new_location)
+ except:
+ assert not os.path.isdir(new_location)
+
+ try:
+ shutil.copytree(test_location, new_location)
+
+ except OSError as error:
+ log("Test location not copied. Error: %s" % error)
+ sys.exit(1)
return new_location
@@ -249,15 +276,28 @@ def validate_args(args):
def main(args):
global g_arm64ci_path
- global g_test_url
repo_root, arch, build_type, scenario, key_location, force_update = validate_args(args)
+ cwd = os.getcwd()
+ os.chdir(repo_root)
+
+ runtest_location = os.path.join(repo_root, "tests", "runtest.cmd")
+ args = [runtest_location, "GenerateLayoutOnly", arch, build_type]
+ subprocess.check_call(args)
+
+ os.chdir(cwd)
+
core_root = os.path.join(repo_root,
"bin",
"Product",
"Windows_NT.%s.%s" % (arch, build_type))
+ test_location = os.path.join(repo_root,
+ "bin",
+ "tests",
+ "Windows_NT.%s.%s" % (arch, build_type))
+
cli_location = setup_cli(force_update=force_update)
add_item_to_path(cli_location)
@@ -269,6 +309,9 @@ def main(args):
core_root = copy_core_root(core_root)
log("Copied core_root to %s." % core_root)
+ test_location = copy_tests(test_location)
+ log("Copied test location to %s." % test_location)
+
# Make sure the lst file is copied into the core_root
lst_file = os.path.join(repo_root, "tests", arch, "Tests.lst")
shutil.copy2(lst_file, core_root)
@@ -280,7 +323,7 @@ def main(args):
build_type,
scenario,
core_root,
- g_test_url]
+ test_location]
log(" ".join(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
diff --git a/tests/scripts/optdata/bootstrap.py b/tests/scripts/optdata/bootstrap.py
index 1cf55fa70c..8dcecca779 100755
--- a/tests/scripts/optdata/bootstrap.py
+++ b/tests/scripts/optdata/bootstrap.py
@@ -7,12 +7,12 @@
"""
import argparse
-import json
import os
from os import path
import shutil
import subprocess
import sys
+import xml.etree.ElementTree as ET
# Display the docstring if the user passes -h|--help
argparse.ArgumentParser(description=__doc__).parse_args()
@@ -24,8 +24,8 @@ NUGET_SRC_DIR = path.join(REPO_ROOT, 'src', '.nuget')
assert path.exists(NUGET_SRC_DIR), \
"Expected %s to exist; please check whether REPO_ROOT is really %s" % (NUGET_SRC_DIR, REPO_ROOT)
-ORIGIN_FILE = path.join(SCRIPT_ROOT, 'project.json')
-TARGET_FILE = path.join(NUGET_SRC_DIR, 'optdata', 'project.json')
+ORIGIN_FILE = path.join(SCRIPT_ROOT, 'optdata.csproj')
+TARGET_FILE = path.join(NUGET_SRC_DIR, 'optdata', 'optdata.csproj')
ARCH_LIST = ['x64', 'x86']
TOOL_LIST = ['IBC', 'PGO']
@@ -40,9 +40,12 @@ def get_buildos():
def get_optdata_version(tool):
"""Returns the version string specified in project.json for the given tool."""
- package_name = 'optimization.%s.CoreCLR' % (tool)
- with open(ORIGIN_FILE) as json_file:
- return json.load(json_file)['dependencies'][package_name]
+ element_name = {
+ 'IBC': 'IbcDataPackageVersion',
+ 'PGO': 'PgoDataPackageVersion',
+ }[tool]
+ root = ET.parse(ORIGIN_FILE)
+ return root.findtext('./PropertyGroup/{}'.format(element_name))
def get_optdata_dir(tool, arch):
"""Returns an absolute path to the directory that should contain optdata given a tool,arch"""
diff --git a/tests/scripts/optdata/optdata.csproj b/tests/scripts/optdata/optdata.csproj
new file mode 100644
index 0000000000..ac7360643f
--- /dev/null
+++ b/tests/scripts/optdata/optdata.csproj
@@ -0,0 +1,29 @@
+<Project Sdk="Microsoft.NET.Sdk">
+
+ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" />
+
+ <PropertyGroup>
+ <TargetFramework>netstandard</TargetFramework>
+ <DisableImplicitFrameworkReferences>true</DisableImplicitFrameworkReferences>
+ <RuntimeIdentifiers>win7-x64;win7-x86;linux-x64</RuntimeIdentifiers>
+ </PropertyGroup>
+
+ <PropertyGroup>
+ <PgoDataPackageVersion>99.99.99-test</PgoDataPackageVersion>
+ <IbcDataPackageVersion>99.99.99-test</IbcDataPackageVersion>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <PackageReference Include="optimization.PGO.CoreCLR" Version="$(PgoDataPackageVersion)" Condition="'$(PgoDataPackageVersion)'!=''" />
+ <PackageReference Include="optimization.IBC.CoreCLR" Version="$(IbcDataPackageVersion)" Condition="'$(IbcDataPackageVersion)'!=''" />
+ </ItemGroup>
+
+ <Target Name="DumpPgoDataPackageVersion">
+ <Message Importance="high" Text="$(PgoDataPackageVersion)" />
+ </Target>
+
+ <Target Name="DumpIbcDataPackageVersion">
+ <Message Importance="high" Text="$(IbcDataPackageVersion)" />
+ </Target>
+
+</Project>
diff --git a/tests/scripts/optdata/project.json b/tests/scripts/optdata/project.json
deleted file mode 100644
index ae8f9463c4..0000000000
--- a/tests/scripts/optdata/project.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "dependencies": {
- "optimization.IBC.CoreCLR": "99.99.99-test",
- "optimization.PGO.CoreCLR": "99.99.99-test"
- },
- "frameworks": {
- "netstandard": {}
- },
- "runtimes": {
- "win7-x64": {}
- }
-}
diff --git a/tests/scripts/project.json b/tests/scripts/project.json
index b3c3a15f62..394cd2f922 100644
--- a/tests/scripts/project.json
+++ b/tests/scripts/project.json
@@ -15,4 +15,4 @@
]
}
}
-}
+} \ No newline at end of file
diff --git a/tests/scripts/run-gc-reliability-framework.cmd b/tests/scripts/run-gc-reliability-framework.cmd
new file mode 100644
index 0000000000..f9a6ae277d
--- /dev/null
+++ b/tests/scripts/run-gc-reliability-framework.cmd
@@ -0,0 +1,10 @@
+@rem Licensed to the .NET Foundation under one or more agreements.
+@rem The .NET Foundation licenses this file to you under the MIT license.
+@rem See the LICENSE file in the project root for more information.
+
+@echo off
+
+set CORE_ROOT=%CD%\bin\tests\Windows_NT.%1.%2\Tests\Core_Root
+set FRAMEWORK_DIR=%CD%\bin\tests\Windows_NT.%1.%2\GC\Stress\Framework\ReliabilityFramework
+powershell "%CORE_ROOT%\CoreRun.exe %FRAMEWORK_DIR%\ReliabilityFramework.exe %FRAMEWORK_DIR%\testmix_gc.config | tee stdout.txt"
+
diff --git a/tests/scripts/run-gc-reliability-framework.sh b/tests/scripts/run-gc-reliability-framework.sh
new file mode 100755
index 0000000000..d1c200ef02
--- /dev/null
+++ b/tests/scripts/run-gc-reliability-framework.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+export CORE_ROOT=`pwd`/bin/tests/Windows_NT.$1.$2/Tests/coreoverlay
+FRAMEWORK_DIR=`pwd`/bin/tests/Windows_NT.$1.$2/GC/Stress/Framework/ReliabilityFramework
+$CORE_ROOT/corerun $FRAMEWORK_DIR/ReliabilityFramework.exe $FRAMEWORK_DIR/testmix_gc.config | tee stdout.txt
+
diff --git a/tests/scripts/run-throughput-perf.py b/tests/scripts/run-throughput-perf.py
index ee6e4a3c58..cc1e151b41 100644
--- a/tests/scripts/run-throughput-perf.py
+++ b/tests/scripts/run-throughput-perf.py
@@ -53,7 +53,6 @@ jit_list = {
'Windows_NT': {
'x64': 'clrjit.dll',
'x86': 'clrjit.dll',
- 'x86jit32': 'compatjit.dll'
},
'Linux': {
'x64': 'libclrjit.so'
@@ -126,7 +125,7 @@ def validate_args(args):
if not helper(arg):
raise Exception('Argument: %s is not valid.' % (arg))
- valid_archs = {'Windows_NT': ['x86', 'x64', 'x86jit32'], 'Linux': ['x64']}
+ valid_archs = {'Windows_NT': ['x86', 'x64'], 'Linux': ['x64']}
valid_build_types = ['Release']
valid_run_types = ['rolling', 'private']
valid_os = ['Windows_NT', 'Ubuntu14.04']
@@ -281,9 +280,6 @@ def main(args):
architecture, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path = validate_args(args)
arch = architecture
- if architecture == 'x86jit32':
- arch = 'x86'
-
current_dir = os.getcwd()
jit = jit_list[os_group][architecture]
crossgen = 'crossgen'
diff --git a/tests/scripts/run-xunit-perf.cmd b/tests/scripts/run-xunit-perf.cmd
index 7895b3f16e..e223a3bda9 100644
--- a/tests/scripts/run-xunit-perf.cmd
+++ b/tests/scripts/run-xunit-perf.cmd
@@ -5,7 +5,7 @@
@echo off
@if defined _echo echo on
-setlocal
+setlocal ENABLEDELAYEDEXPANSION
set ERRORLEVEL=
set BENCHVIEW_RUN_TYPE=local
set CORECLR_REPO=%CD%
@@ -15,17 +15,27 @@ setlocal
set TEST_CONFIG=Release
set IS_SCENARIO_TEST=
set USAGE_DISPLAYED=
+ set SHOULD_UPLOAD_TO_BENCHVIEW=
+ set BENCHVIEW_PATH=
+ set COLLECTION_FLAGS=stopwatch
+ set ETW_COLLECTION=Off
+ set STABILITY_PREFIX=
+ set BENCHVIEW_GROUP=CoreCLR
+ set HAS_WARMUP_RUN=--drop-first-value
+ set BETTER=desc
call :parse_command_line_arguments %*
if defined USAGE_DISPLAYED exit /b %ERRORLEVEL%
- call :set_test_architecture || exit /b 1
- call :verify_core_overlay || exit /b 1
- call :set_perf_run_log || exit /b 1
- call :setup_sandbox || exit /b 1
+ call :set_test_architecture || exit /b 1
+ call :set_collection_config || exit /b 1
+ call :verify_benchview_tools || exit /b 1
+ call :verify_core_overlay || exit /b 1
+ call :set_perf_run_log || exit /b 1
+ call :setup_sandbox || exit /b 1
- call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\project.json" || exit /b 1
- call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" publish "%CORECLR_REPO%\tests\src\Common\PerfHarness\project.json" -c Release -o "%CORECLR_REPO%\sandbox" || exit /b 1
+ call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" || exit /b 1
+ call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" publish "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" -c Release -o "%CORECLR_REPO%\sandbox" || exit /b 1
rem TODO: Remove the version of the package to copy. e.g.) if multiple version exist, then error out?
call :run_cmd xcopy /sy "%CORECLR_REPO%\packages\Microsoft.Diagnostics.Tracing.TraceEvent\1.0.3-alpha-experimental\lib\native"\* . >> %RUNLOG% || exit /b 1
@@ -34,7 +44,6 @@ setlocal
rem find and stage the tests
set /A "LV_FAILURES=0"
for /R %CORECLR_PERF% %%T in (*.%TEST_FILE_EXT%) do (
- rem Skip known failures
call :run_benchmark %%T || (
set /A "LV_FAILURES+=1"
)
@@ -60,17 +69,13 @@ rem ****************************************************************************
setlocal
set BENCHNAME=%~n1
set BENCHDIR=%~p1
- set PERFOUT=perf-%BENCHNAME%
- set XMLOUT=%PERFOUT%.xml
rem copy benchmark and any input files
- call :run_cmd xcopy /s %~1 . >> %RUNLOG% || exit /b 1
+ call :run_cmd xcopy /sy %~1 . >> %RUNLOG% || exit /b 1
if exist "%BENCHDIR%*.txt" (
- call :run_cmd xcopy /s %BENCHDIR%*.txt . >> %RUNLOG% || exit /b 1
+ call :run_cmd xcopy /sy %BENCHDIR%*.txt . >> %RUNLOG% || exit /b 1
)
- set CORE_ROOT=%CORECLR_REPO%\sandbox
-
rem setup additional environment variables
if DEFINED TEST_ENV (
if EXIST "%TEST_ENV%" (
@@ -78,13 +83,26 @@ setlocal
)
)
- set BENCHNAME_LOG_FILE_NAME=%BENCHNAME%.log
+ echo/
+ echo/ ----------
+ echo/ Running %BENCHNAME%
+ echo/ ----------
+
+ rem CORE_ROOT environment variable is used by some benchmarks such as Roslyn / CscBench.
+ set CORE_ROOT=%CORECLR_REPO%\sandbox
+
+ set LV_RUNID=Perf-%ETW_COLLECTION%
+ set BENCHNAME_LOG_FILE_NAME=%LV_RUNID%-%BENCHNAME%.log
+ set LV_CMD=
if defined IS_SCENARIO_TEST (
- call :run_cmd corerun.exe "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid Perf 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1
+ set "LV_CMD=corerun.exe "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid "%LV_RUNID%""
) else (
- call :run_cmd corerun.exe PerfHarness.dll "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid Perf 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1
+ set "LV_CMD=%STABILITY_PREFIX% corerun.exe PerfHarness.dll "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid "%LV_RUNID%" --perf:collect %COLLECTION_FLAGS%"
)
+ call :print_to_console $ !LV_CMD!
+ call :run_cmd !LV_CMD! 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1
+
IF %ERRORLEVEL% NEQ 0 (
call :print_error corerun.exe exited with %ERRORLEVEL% code.
if exist "%BENCHNAME_LOG_FILE_NAME%" type "%BENCHNAME_LOG_FILE_NAME%"
@@ -92,15 +110,16 @@ setlocal
)
rem optionally generate results for benchview
- if not [%BENCHVIEW_PATH%] == [] (
+ if exist "%BENCHVIEW_PATH%" (
call :generate_results_for_benchview || exit /b 1
- ) else (
- type "%XMLOUT%" | findstr /i /c:"test name"
)
rem Save off the results to the root directory for recovery later in Jenkins
- call :run_cmd xcopy "Perf-%BENCHNAME%*.xml" "%CORECLR_REPO%\" || exit /b 1
- call :run_cmd xcopy "Perf-%BENCHNAME%*.etl" "%CORECLR_REPO%\" || exit /b 1
+ for %%e in (xml etl log) do (
+ IF EXIST ".\%LV_RUNID%-%BENCHNAME%.%%e" (
+ call :run_cmd xcopy /vy ".\%LV_RUNID%-%BENCHNAME%.%%e" .. || exit /b 1
+ )
+ )
exit /b 0
@@ -114,23 +133,51 @@ rem ****************************************************************************
shift
goto :parse_command_line_arguments
)
+ IF /I [%~1] == [-stabilityPrefix] (
+ set STABILITY_PREFIX=%~2
+ shift
+ shift
+ goto :parse_command_line_arguments
+ )
IF /I [%~1] == [-scenarioTest] (
set IS_SCENARIO_TEST=1
shift
goto :parse_command_line_arguments
)
+ IF /I [%~1] == [-uploadtobenchview] (
+ set SHOULD_UPLOAD_TO_BENCHVIEW=1
+ shift
+ goto :parse_command_line_arguments
+ )
+ IF /I [%~1] == [-nowarmup] (
+ set HAS_WARMUP_RUN=
+ shift
+ goto :parse_command_line_arguments
+ )
+ IF /I [%~1] == [-better] (
+ set BETTER=%~2
+ shift
+ shift
+ goto :parse_command_line_arguments
+ )
IF /I [%~1] == [-runtype] (
set BENCHVIEW_RUN_TYPE=%~2
shift
shift
goto :parse_command_line_arguments
)
+ IF /I [%~1] == [-collectionflags] (
+ set COLLECTION_FLAGS=%~2
+ shift
+ shift
+ goto :parse_command_line_arguments
+ )
IF /I [%~1] == [-library] (
set TEST_FILE_EXT=dll
shift
goto :parse_command_line_arguments
)
- IF /I [%~1] == [-uploadtobenchview] (
+ IF /I [%~1] == [-generatebenchviewdata] (
set BENCHVIEW_PATH=%~2
shift
shift
@@ -154,7 +201,12 @@ rem ****************************************************************************
shift
goto :parse_command_line_arguments
)
-
+ IF /I [%~1] == [-group] (
+ set BENCHVIEW_GROUP=%~2
+ shift
+ shift
+ goto :parse_command_line_arguments
+ )
if /I [%~1] == [-?] (
call :USAGE
exit /b 0
@@ -173,10 +225,18 @@ rem ****************************************************************************
rem ****************************************************************************
rem Sets the test architecture.
rem ****************************************************************************
- IF /I [%TEST_ARCHITECTURE%] == [x86jit32] (
- set TEST_ARCH=x86
- ) ELSE (
- set TEST_ARCH=%TEST_ARCHITECTURE%
+ set TEST_ARCH=%TEST_ARCHITECTURE%
+ exit /b 0
+
+:verify_benchview_tools
+rem ****************************************************************************
+rem Verifies that the path to the benchview tools is correct.
+rem ****************************************************************************
+ if defined BENCHVIEW_PATH (
+ if not exist "%BENCHVIEW_PATH%" (
+ call :print_error BenchView path: "%BENCHVIEW_PATH%" was specified, but it does not exist.
+ exit /b 1
+ )
)
exit /b 0
@@ -191,6 +251,18 @@ rem ****************************************************************************
)
exit /b 0
+:set_collection_config
+rem ****************************************************************************
+rem Set's the config based on the providers used for collection
+rem ****************************************************************************
+ if /I [%COLLECTION_FLAGS%] == [stopwatch] (
+ set ETW_COLLECTION=Off
+ ) else (
+ set ETW_COLLECTION=On
+ )
+ exit /b 0
+
+
:set_perf_run_log
rem ****************************************************************************
rem Sets the script's output log file.
@@ -207,7 +279,7 @@ rem ****************************************************************************
rem Creates the sandbox folder used by the script to copy binaries locally,
rem and execute benchmarks.
rem ****************************************************************************
- if exist sandbox rd /s /q sandbox
+ if exist sandbox rmdir /s /q sandbox
if exist sandbox call :print_error Failed to remove the sandbox folder& exit /b 1
if not exist sandbox mkdir sandbox
if not exist sandbox call :print_error Failed to create the sandbox folder& exit /b 1
@@ -224,15 +296,19 @@ rem ****************************************************************************
set LV_MEASUREMENT_ARGS=
set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% %BENCHVIEW_MEASUREMENT_PARSER%
- set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% "Perf-%BENCHNAME%.xml"
- set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --better desc
- set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --drop-first-value
+ set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --better %BETTER%
+ set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% %HAS_WARMUP_RUN%
set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --append
- call :run_cmd py.exe "%BENCHVIEW_PATH%\measurement.py" %LV_MEASUREMENT_ARGS%
- IF %ERRORLEVEL% NEQ 0 (
- call :print_error Failed to generate BenchView measurement data.
- exit /b 1
+
+ for /f %%f in ('dir /b Perf-*%BENCHNAME%.xml 2^>nul') do (
+ call :run_cmd py.exe "%BENCHVIEW_PATH%\measurement.py" %LV_MEASUREMENT_ARGS% %%f
+
+ IF !ERRORLEVEL! NEQ 0 (
+ call :print_error Failed to generate BenchView measurement data.
+ exit /b 1
+ )
)
+
endlocal& exit /b %ERRORLEVEL%
:upload_to_benchview
@@ -244,23 +320,28 @@ setlocal
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --build ..\build.json
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machine-data ..\machinedata.json
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --metadata ..\submission-metadata.json
- set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --group "CoreCLR"
+ set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --group "%BENCHVIEW_GROUP%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --type "%BENCHVIEW_RUN_TYPE%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config-name "%TEST_CONFIG%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config Configuration "%TEST_CONFIG%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config OS "Windows_NT"
+ set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config Profile "%ETW_COLLECTION%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --arch "%TEST_ARCHITECTURE%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machinepool "PerfSnake"
+
call :run_cmd py.exe "%BENCHVIEW_PATH%\submission.py" measurement.json %LV_SUBMISSION_ARGS%
+
IF %ERRORLEVEL% NEQ 0 (
call :print_error Creating BenchView submission data failed.
exit /b 1
)
- call :run_cmd py.exe "%BENCHVIEW_PATH%\upload.py" submission.json --container coreclr
- IF %ERRORLEVEL% NEQ 0 (
- call :print_error Uploading to BenchView failed.
- exit /b 1
+ if defined SHOULD_UPLOAD_TO_BENCHVIEW (
+ call :run_cmd py.exe "%BENCHVIEW_PATH%\upload.py" submission.json --container coreclr
+ IF !ERRORLEVEL! NEQ 0 (
+ call :print_error Uploading to BenchView failed.
+ exit /b 1
+ )
)
exit /b %ERRORLEVEL%
@@ -269,17 +350,25 @@ rem ****************************************************************************
rem Script's usage.
rem ****************************************************************************
set USAGE_DISPLAYED=1
- echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-uploadToBenchview] ^<path_to_benchview_tools^> [-runtype] ^<rolling^|private^> [-scenarioTest]
+ echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-generateBenchviewData] ^<path_to_benchview_tools^> [-warmup] [-better] ^<asc ^| desc^> [-group] ^<group^> [-runtype] ^<rolling^|private^> [-scenarioTest] [-collectionFlags] ^<default^+CacheMisses^+InstructionRetired^+BranchMispredictions^+gcapi^>
echo/
echo For the path to the tests you can pass a parent directory and the script will grovel for
echo all tests in subdirectories and run them.
echo The library flag denotes whether the tests are build as libraries (.dll) or an executable (.exe)
echo Architecture defaults to x64 and configuration defaults to release.
- echo -uploadtoBenchview is used to specify a path to the Benchview tooling and when this flag is
- echo set we will upload the results of the tests to the coreclr container in benchviewupload.
+ echo -generateBenchviewData is used to specify a path to the Benchview tooling and when this flag is
+ echo set we will generate the results for upload to benchview.
+ echo -uploadToBenchview If this flag is set the generated benchview test data will be uploaded.
+ echo -nowarmup specifies not to discard the results of the first run
+ echo -better whether it is better to have ascending or descending numbers for the benchmark
+ echo -group specifies the Benchview group to which this data should be uploaded (default CoreCLR)
echo Runtype sets the runtype that we upload to Benchview, rolling for regular runs, and private for
echo PRs.
echo -scenarioTest should be included if you are running a scenario benchmark.
+ echo -collectionFlags This is used to specify what collectoin flags get passed to the performance
+ echo harness that is doing the test running. If this is not specified we only use stopwatch.
+ echo Other flags are "default", which is the whatever the test being run specified, "CacheMisses",
+ echo "BranchMispredictions", and "InstructionsRetired".
exit /b %ERRORLEVEL%
:print_error
@@ -287,19 +376,17 @@ rem ****************************************************************************
rem Function wrapper that unifies how errors are output by the script.
rem Functions output to the standard error.
rem ****************************************************************************
- echo [%DATE%][%TIME:~0,-3%][ERROR] %* 1>&2
+ call :print_to_console [ERROR] %* 1>&2
exit /b %ERRORLEVEL%
:print_to_console
rem ****************************************************************************
-rem Sends text to the console screen, no matter what (even when the script's
-rem output is redirected). This can be useful to provide information on where
-rem the script is executing.
+rem Sends text to the console screen. This can be useful to provide
+rem information on where the script is executing.
rem ****************************************************************************
- if defined _debug (
- echo [%DATE%][%TIME:~0,-3%] %* >CON
- )
- echo [%DATE%][%TIME:~0,-3%] %*
+ echo/
+ echo/%USERNAME%@%COMPUTERNAME% "%CD%"
+ echo/[%DATE%][%TIME:~0,-3%] %*
exit /b %ERRORLEVEL%
:run_cmd
@@ -315,19 +402,3 @@ rem ****************************************************************************
call :print_to_console $ %*
call %*
exit /b %ERRORLEVEL%
-
-:skip_failures
-rem ****************************************************************************
-rem Skip known failures
-rem ****************************************************************************
- IF /I [%TEST_ARCHITECTURE%] == [x86jit32] (
- IF /I "%~1" == "CscBench" (
- rem https://github.com/dotnet/coreclr/issues/11088
- exit /b 1
- )
- IF /I "%~1" == "SciMark2" (
- rem https://github.com/dotnet/coreclr/issues/11089
- exit /b 1
- )
- )
- exit /b 0
diff --git a/tests/scripts/run-xunit-perf.sh b/tests/scripts/run-xunit-perf.sh
index 6f49bf6e0a..e6758e59fb 100755
--- a/tests/scripts/run-xunit-perf.sh
+++ b/tests/scripts/run-xunit-perf.sh
@@ -1,5 +1,13 @@
#!/usr/bin/env bash
+function run_command {
+ echo ""
+ echo $USER@`hostname` "$PWD"
+ echo `date +"[%m/%d/%Y %H:%M:%S]"`" $ $@"
+ "$@"
+ return $?
+}
+
function print_usage {
echo ''
echo 'CoreCLR perf test script on Linux.'
@@ -14,29 +22,26 @@ function print_usage {
echo ' --coreFxBinDir="corefx/bin/Linux.AnyCPU.Debug"'
echo ''
echo 'Required arguments:'
- echo ' --testRootDir=<path> : Root directory of the test build (e.g. coreclr/bin/tests/Windows_NT.x64.Debug).'
- echo ' --testNativeBinDir=<path> : Directory of the native CoreCLR test build (e.g. coreclr/bin/obj/Linux.x64.Debug/tests).'
+ echo ' --testRootDir=<path> : Root directory of the test build (e.g. coreclr/bin/tests/Windows_NT.x64.Debug).'
+ echo ' --testNativeBinDir=<path> : Directory of the native CoreCLR test build (e.g. coreclr/bin/obj/Linux.x64.Debug/tests).'
echo ' (Also required: Either --coreOverlayDir, or all of the switches --coreOverlayDir overrides)'
echo ''
echo 'Optional arguments:'
- echo ' --coreOverlayDir=<path> : Directory containing core binaries and test dependencies. If not specified, the'
- echo ' default is testRootDir/Tests/coreoverlay. This switch overrides --coreClrBinDir,'
- echo ' --mscorlibDir, and --coreFxBinDir.'
- echo ' --coreClrBinDir=<path> : Directory of the CoreCLR build (e.g. coreclr/bin/Product/Linux.x64.Debug).'
- echo ' --mscorlibDir=<path> : Directory containing the built mscorlib.dll. If not specified, it is expected to be'
+ echo ' --coreOverlayDir=<path> : Directory containing core binaries and test dependencies. If not specified, the'
+ echo ' default is testRootDir/Tests/coreoverlay. This switch overrides --coreClrBinDir,'
+ echo ' --mscorlibDir, and --coreFxBinDir.'
+ echo ' --coreClrBinDir=<path> : Directory of the CoreCLR build (e.g. coreclr/bin/Product/Linux.x64.Debug).'
+ echo ' --mscorlibDir=<path> : Directory containing the built mscorlib.dll. If not specified, it is expected to be'
echo ' in the directory specified by --coreClrBinDir.'
- echo ' --coreFxBinDir="<path>" : The path to the unpacked runtime folder that is produced as part of a CoreFX build'
- echo ' --uploadToBenchview : Specify this flag in order to have the results of the run uploaded to Benchview.'
- echo ' This also requires that the os flag and runtype flag to be set. Lastly you must'
- echo ' also have the BV_UPLOAD_SAS_TOKEN set to a SAS token for the Benchview upload container'
- echo ' --benchViewOS=<os> : Specify the os that will be used to insert data into Benchview.'
- echo ' --runType=<private|rolling> : Specify the runType for Benchview.'
+ echo ' --coreFxBinDir="<path>" : The path to the unpacked runtime folder that is produced as part of a CoreFX build'
+ echo ' --generatebenchviewdata : BenchView tools directory.'
+ echo ' --uploadToBenchview : Specify this flag in order to have the results of the run uploaded to Benchview.'
+ echo ' This requires that the generatebenchviewdata, os and runtype flags to be set, and'
+ echo ' also have the BV_UPLOAD_SAS_TOKEN set to a SAS token for the Benchview upload container'
+ echo ' --benchViewOS=<os> : Specify the os that will be used to insert data into Benchview.'
+ echo ' --runType=<local|private|rolling> : Specify the runType for Benchview. [Default: local]'
}
-# Variables for xUnit-style XML output. XML format: https://xunit.github.io/docs/format-xml-v2.html
-xunitOutputPath=
-xunitTestOutputPath=
-
# libExtension determines extension for dynamic library files
OSName=$(uname -s)
libExtension=
@@ -59,95 +64,6 @@ case $OSName in
;;
esac
-function xunit_output_end {
- local errorSource=$1
- local errorMessage=$2
-
- local errorCount
- if [ -z "$errorSource" ]; then
- ((errorCount = 0))
- else
- ((errorCount = 1))
- fi
-
- echo '<?xml version="1.0" encoding="utf-8"?>' >>"$xunitOutputPath"
- echo '<assemblies>' >>"$xunitOutputPath"
-
- local line
-
- # <assembly ...>
- line=" "
- line="${line}<assembly"
- line="${line} name=\"CoreClrTestAssembly\""
- line="${line} total=\"${countTotalTests}\""
- line="${line} passed=\"${countPassedTests}\""
- line="${line} failed=\"${countFailedTests}\""
- line="${line} skipped=\"${countSkippedTests}\""
- line="${line} errors=\"${errorCount}\""
- line="${line}>"
- echo "$line" >>"$xunitOutputPath"
-
- # <collection ...>
- line=" "
- line="${line}<collection"
- line="${line} name=\"CoreClrTestCollection\""
- line="${line} total=\"${countTotalTests}\""
- line="${line} passed=\"${countPassedTests}\""
- line="${line} failed=\"${countFailedTests}\""
- line="${line} skipped=\"${countSkippedTests}\""
- line="${line}>"
- echo "$line" >>"$xunitOutputPath"
-
- # <test .../> <test .../> ...
- if [ -f "$xunitTestOutputPath" ]; then
- cat "$xunitTestOutputPath" >>"$xunitOutputPath"
- rm -f "$xunitTestOutputPath"
- fi
-
- # </collection>
- line=" "
- line="${line}</collection>"
- echo "$line" >>"$xunitOutputPath"
-
- if [ -n "$errorSource" ]; then
- # <errors>
- line=" "
- line="${line}<errors>"
- echo "$line" >>"$xunitOutputPath"
-
- # <error ...>
- line=" "
- line="${line}<error"
- line="${line} type=\"TestHarnessError\""
- line="${line} name=\"${errorSource}\""
- line="${line}>"
- echo "$line" >>"$xunitOutputPath"
-
- # <failure .../>
- line=" "
- line="${line}<failure>${errorMessage}</failure>"
- echo "$line" >>"$xunitOutputPath"
-
- # </error>
- line=" "
- line="${line}</error>"
- echo "$line" >>"$xunitOutputPath"
-
- # </errors>
- line=" "
- line="${line}</errors>"
- echo "$line" >>"$xunitOutputPath"
- fi
-
- # </assembly>
- line=" "
- line="${line}</assembly>"
- echo "$line" >>"$xunitOutputPath"
-
- # </assemblies>
- echo '</assemblies>' >>"$xunitOutputPath"
-}
-
function exit_with_error {
local errorSource=$1
local errorMessage=$2
@@ -158,10 +74,11 @@ function exit_with_error {
fi
echo "$errorMessage"
- xunit_output_end "$errorSource" "$errorMessage"
if ((printUsage != 0)); then
print_usage
fi
+
+ echo "Exiting script with error code: $EXIT_CODE_EXCEPTION"
exit $EXIT_CODE_EXCEPTION
}
@@ -185,11 +102,12 @@ function create_core_overlay {
if [ -n "$coreOverlayDir" ]; then
export CORE_ROOT="$coreOverlayDir"
- return
+ return 0
fi
- # Check inputs to make sure we have enough information to create the core layout. $testRootDir/Tests/Core_Root should
- # already exist and contain test dependencies that are not built.
+ # Check inputs to make sure we have enough information to create the core
+ # layout. $testRootDir/Tests/Core_Root should already exist and contain test
+ # dependencies that are not built.
local testDependenciesDir=$testRootDir/Tests/Core_Root
if [ ! -d "$testDependenciesDir" ]; then
exit_with_error "$errorSource" "Did not find the test dependencies directory: $testDependenciesDir"
@@ -208,13 +126,30 @@ function create_core_overlay {
coreOverlayDir=$testRootDir/Tests/coreoverlay
export CORE_ROOT="$coreOverlayDir"
if [ -e "$coreOverlayDir" ]; then
- rm -f -r "$coreOverlayDir"
+ rm -rf "$coreOverlayDir" || exit 1
fi
+
mkdir "$coreOverlayDir"
- cp -f -v "$coreFxBinDir"/* "$coreOverlayDir/" 2>/dev/null
- cp -f -v "$coreClrBinDir/"* "$coreOverlayDir/" 2>/dev/null
- cp -n -v "$testDependenciesDir"/* "$coreOverlayDir/" 2>/dev/null
+ cp -f -v "$coreFxBinDir/"* "$coreOverlayDir/" || exit 2
+ cp -f -p -v "$coreClrBinDir/"* "$coreOverlayDir/" # || exit 3
+ if [ -d "$mscorlibDir/bin" ]; then
+ cp -f -v "$mscorlibDir/bin/"* "$coreOverlayDir/" || exit 4
+ fi
+ cp -f -v "$testDependenciesDir/"xunit* "$coreOverlayDir/" || exit 5
+ cp -n -v "$testDependenciesDir/"* "$coreOverlayDir/" # || exit 6
+ if [ -f "$coreOverlayDir/mscorlib.ni.dll" ]; then
+ # Test dependencies come from a Windows build, and mscorlib.ni.dll would be the one from Windows
+ rm -f "$coreOverlayDir/mscorlib.ni.dll" || exit 7
+ fi
+ if [ -f "$coreOverlayDir/System.Private.CoreLib.ni.dll" ]; then
+ # Test dependencies come from a Windows build, and System.Private.CoreLib.ni.dll would be the one from Windows
+ rm -f "$coreOverlayDir/System.Private.CoreLib.ni.dll" || exit 8
+ fi
+
+ copy_test_native_bin_to_test_root || exit 9
+
+ return 0
}
function precompile_overlay_assemblies {
@@ -227,19 +162,19 @@ function precompile_overlay_assemblies {
for fileToPrecompile in ${filesToPrecompile}
do
local filename=${fileToPrecompile}
- echo Precompiling $filename
+ echo "Precompiling $filename"
$overlayDir/crossgen /Platform_Assemblies_Paths $overlayDir $filename 2>/dev/null
local exitCode=$?
if [ $exitCode == -2146230517 ]; then
- echo $filename is not a managed assembly.
+ echo "$filename is not a managed assembly."
elif [ $exitCode != 0 ]; then
- echo Unable to precompile $filename.
+ echo "Unable to precompile $filename."
else
- echo Successfully precompiled $filename
+ echo "Successfully precompiled $filename"
fi
done
else
- echo Skipping crossgen of FX assemblies.
+ echo "Skipping crossgen of FX assemblies."
fi
}
@@ -279,8 +214,14 @@ coreClrBinDir=
mscorlibDir=
coreFxBinDir=
uploadToBenchview=
-benchViewOS=
-runType=
+benchViewOS=`lsb_release -i -s``lsb_release -r -s`
+runType=local
+BENCHVIEW_TOOLS_PATH=
+benchViewGroup=CoreCLR
+perfCollection=
+collectionflags=stopwatch
+hasWarmupRun=--drop-first-value
+stabilityPrefix=
for i in "$@"
do
@@ -307,12 +248,21 @@ do
--coreFxBinDir=*)
coreFxBinDir=${i#*=}
;;
- --benchViewOS=*)
+ --benchViewOS=*)
benchViewOS=${i#*=}
;;
- --runType=*)
+ --runType=*)
runType=${i#*=}
;;
+ --collectionflags=*)
+ collectionflags=${i#*=}
+ ;;
+ --generatebenchviewdata=*)
+ BENCHVIEW_TOOLS_PATH=${i#*=}
+ ;;
+ --stabilityPrefix=*)
+ stabilityPrefix=${i#*=}
+ ;;
--uploadToBenchview)
uploadToBenchview=TRUE
;;
@@ -333,69 +283,101 @@ if [ ! -d "$testRootDir" ]; then
echo "Directory specified by --testRootDir does not exist: $testRootDir"
exit $EXIT_CODE_EXCEPTION
fi
-
-# Copy native interop test libraries over to the mscorlib path in
-# order for interop tests to run on linux.
-if [ -z "$mscorlibDir" ]; then
- mscorlibDir=$coreClrBinDir
+if [ ! -z "$BENCHVIEW_TOOLS_PATH" ] && { [ ! -d "$BENCHVIEW_TOOLS_PATH" ]; }; then
+ echo BenchView path: "$BENCHVIEW_TOOLS_PATH" was specified, but it does not exist.
+ exit $EXIT_CODE_EXCEPTION
fi
-if [ -d "$mscorlibDir" ] && [ -d "$mscorlibDir/bin" ]; then
- cp $mscorlibDir/bin/* $mscorlibDir
+if [ "$collectionflags" == "stopwatch" ]; then
+ perfCollection=Off
+else
+ perfCollection=On
fi
# Install xunit performance packages
-export NUGET_PACKAGES=$testNativeBinDir/../../../../packages
-echo "NUGET_PACKAGES = $NUGET_PACKAGES"
+CORECLR_REPO=$testNativeBinDir/../../../..
+DOTNETCLI_PATH=$CORECLR_REPO/Tools/dotnetcli
-pushd $testNativeBinDir/../../../../tests/scripts
-$testNativeBinDir/../../../../Tools/dotnetcli/dotnet restore --fallbacksource https://dotnet.myget.org/F/dotnet-buildtools/ --fallbacksource https://dotnet.myget.org/F/dotnet-core/
-popd
+export NUGET_PACKAGES=$CORECLR_REPO/packages
# Creat coreoverlay dir which contains all dependent binaries
-create_core_overlay
-precompile_overlay_assemblies
-copy_test_native_bin_to_test_root
+create_core_overlay || { echo "Creating core overlay failed."; exit 1; }
+precompile_overlay_assemblies || { echo "Precompiling overlay assemblies failed."; exit 1; }
# Deploy xunit performance packages
cd $CORE_ROOT
-echo "CORE_ROOT dir = $CORE_ROOT"
DO_SETUP=TRUE
-
if [ ${DO_SETUP} == "TRUE" ]; then
-cp $testNativeBinDir/../../../../../packages/Microsoft.DotNet.xunit.performance.runner.cli/1.0.0-alpha-build0040/lib/netstandard1.3/Microsoft.DotNet.xunit.performance.runner.cli.dll .
-cp $testNativeBinDir/../../../../../packages/Microsoft.DotNet.xunit.performance.analysis.cli/1.0.0-alpha-build0040/lib/netstandard1.3/Microsoft.DotNet.xunit.performance.analysis.cli.dll .
-cp $testNativeBinDir/../../../../../packages/Microsoft.DotNet.xunit.performance.run.core/1.0.0-alpha-build0040/lib/dotnet/*.dll .
+ $DOTNETCLI_PATH/dotnet restore $CORECLR_REPO/tests/src/Common/PerfHarness/PerfHarness.csproj || { echo "dotnet restore failed."; exit 1; }
+ $DOTNETCLI_PATH/dotnet publish $CORECLR_REPO/tests/src/Common/PerfHarness/PerfHarness.csproj -c Release -o "$coreOverlayDir" || { echo "dotnet publish failed."; exit 1; }
fi
# Run coreclr performance tests
-echo "Test root dir is: $testRootDir"
+echo "Test root dir: $testRootDir"
tests=($(find $testRootDir/JIT/Performance/CodeQuality -name '*.exe') $(find $testRootDir/performance/perflab/PerfLab -name '*.dll'))
-echo "current dir is $PWD"
-rm measurement.json
+if [ -f measurement.json ]; then
+ rm measurement.json || exit $EXIT_CODE_EXCEPTION;
+fi
+
for testcase in ${tests[@]}; do
+ directory=$(dirname "$testcase")
+ filename=$(basename "$testcase")
+ filename="${filename%.*}"
-test=$(basename $testcase)
-testname=$(basename $testcase .exe)
-echo "....Running $testname"
-cp $testcase .
-cp $testcase-*.txt .
-
-chmod u+x ./corerun
-echo "./corerun Microsoft.DotNet.xunit.performance.runner.cli.dll $test -runner xunit.console.netcore.exe -runnerhost ./corerun -verbose -runid perf-$testname"
-./corerun Microsoft.DotNet.xunit.performance.runner.cli.dll $test -runner xunit.console.netcore.exe -runnerhost ./corerun -verbose -runid perf-$testname
-echo "./corerun Microsoft.DotNet.xunit.performance.analysis.cli.dll perf-$testname.xml -xml perf-$testname-summary.xml"
-./corerun Microsoft.DotNet.xunit.performance.analysis.cli.dll perf-$testname.xml -xml perf-$testname-summary.xml
-if [ "$uploadToBenchview" == "TRUE" ]
- then
- python3.5 ../../../../../tests/scripts/Microsoft.BenchView.JSONFormat/tools/measurement.py xunit perf-$testname.xml --better desc --drop-first-value --append
-fi
+ test=$(basename $testcase)
+ testname=$(basename $testcase .exe)
+
+ cp $testcase . || exit 1
+ if [ stat -t "$directory/$filename"*.txt 1>/dev/null 2>&1 ]; then
+ cp "$directory/$filename"*.txt . || exit 1
+ fi
+
+ # TODO: Do we need this here.
+ chmod u+x ./corerun
+
+ echo ""
+ echo "----------"
+ echo " Running $testname"
+ echo "----------"
+ run_command $stabilityPrefix ./corerun PerfHarness.dll $test --perf:runid Perf --perf:collect $collectionflags 1>"Perf-$filename.log" 2>&1 || exit 1
+ if [ -d "$BENCHVIEW_TOOLS_PATH" ]; then
+ run_command python3.5 "$BENCHVIEW_TOOLS_PATH/measurement.py" xunit "Perf-$filename.xml" --better desc $hasWarmupRun --append || {
+ echo [ERROR] Failed to generate BenchView data;
+ exit 1;
+ }
+ fi
+
+ # Rename file to be archived by Jenkins.
+ mv -f "Perf-$filename.log" "$CORECLR_REPO/Perf-$filename-$perfCollection.log" || {
+ echo [ERROR] Failed to move "Perf-$filename.log" to "$CORECLR_REPO".
+ exit 1;
+ }
+ mv -f "Perf-$filename.xml" "$CORECLR_REPO/Perf-$filename-$perfCollection.xml" || {
+ echo [ERROR] Failed to move "Perf-$filename.xml" to "$CORECLR_REPO".
+ exit 1;
+ }
done
-if [ "$uploadToBenchview" == "TRUE" ]
- then
- python3.5 ../../../../../tests/scripts/Microsoft.BenchView.JSONFormat/tools/submission.py measurement.json --build ../../../../../build.json --machine-data ../../../../../machinedata.json --metadata ../../../../../submission-metadata.json --group "CoreCLR" --type "$runType" --config-name "Release" --config Configuration "Release" --config OS "$benchViewOS" --arch "x64" --machinepool "Perfsnake"
- python3.5 ../../../../../tests/scripts/Microsoft.BenchView.JSONFormat/tools/upload.py submission.json --container coreclr
+
+if [ -d "$BENCHVIEW_TOOLS_PATH" ]; then
+ args=measurement.json
+ args+=" --build ../../../../../build.json"
+ args+=" --machine-data ../../../../../machinedata.json"
+ args+=" --metadata ../../../../../submission-metadata.json"
+ args+=" --group $benchViewGroup"
+ args+=" --type $runType"
+ args+=" --config-name Release"
+ args+=" --config Configuration Release"
+ args+=" --config OS $benchViewOS"
+ args+=" --config Profile $perfCollection"
+ args+=" --arch x64"
+ args+=" --machinepool Perfsnake"
+ run_command python3.5 "$BENCHVIEW_TOOLS_PATH/submission.py" $args || {
+ echo [ERROR] Failed to generate BenchView submission data;
+ exit 1;
+ }
+fi
+
+if [ -d "$BENCHVIEW_TOOLS_PATH" ] && { [ "$uploadToBenchview" == "TRUE" ]; }; then
+ run_command python3.5 "$BENCHVIEW_TOOLS_PATH/upload.py" submission.json --container coreclr
fi
-mkdir ../../../../../sandbox
-cp *.xml ../../../../../sandbox
diff --git a/tests/scripts/scripts.csproj b/tests/scripts/scripts.csproj
new file mode 100644
index 0000000000..b1a1aab6e5
--- /dev/null
+++ b/tests/scripts/scripts.csproj
@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" />
+ <ItemGroup>
+ <PackageReference Include="Microsoft.DotNet.xunit.performance.run.core">
+ <Version>1.0.0-alpha-build0040</Version>
+ </PackageReference>
+ <PackageReference Include="Microsoft.DotNet.xunit.performance.analysis.cli">
+ <Version>1.0.0-alpha-build0040</Version>
+ </PackageReference>
+ <PackageReference Include="Microsoft.DotNet.xunit.performance.runner.cli">
+ <Version>1.0.0-alpha-build0040</Version>
+ </PackageReference>
+ <PackageReference Include="Microsoft.DotNet.xunit.performance">
+ <Version>1.0.0-alpha-build0040</Version>
+ </PackageReference>
+ <PackageReference Include="xunit.console.netcore">
+ <Version>1.0.2-prerelease-00177</Version>
+ </PackageReference>
+ <PackageReference Include="Microsoft.DotNet.BuildTools.TestSuite">
+ <Version>1.0.0-prerelease-00629-04</Version>
+ </PackageReference>
+ </ItemGroup>
+ <PropertyGroup>
+ <TargetFramework>netstandard1.3</TargetFramework>
+ <TargetFrameworkIdentifier>.NETStandard</TargetFrameworkIdentifier>
+ <PackageTargetFallback>$(PackageTargetFallback);dnxcore50;portable-net45+win8</PackageTargetFallback>
+ <ContainsPackageReferences>true</ContainsPackageReferences>
+ <PrereleaseResolveNuGetPackages>false</PrereleaseResolveNuGetPackages>
+ <RuntimeIdentifiers>win7-x86;win7-x64</RuntimeIdentifiers>
+ <IsTestProject>false</IsTestProject>
+ </PropertyGroup>
+ <Import Project="$(MSBuildThisFileDirectory)..\src\dir.targets" />
+ <Target Name="Build"
+ DependsOnTargets="ResolveReferences" />
+</Project> \ No newline at end of file