summaryrefslogtreecommitdiff
path: root/tests/scripts/run-xunit-perf.cmd
blob: c352dcfc24a043bc19f6f4f233f4df89b8da92cf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
@rem Licensed to the .NET Foundation under one or more agreements.
@rem The .NET Foundation licenses this file to you under the MIT license.
@rem See the LICENSE file in the project root for more information.

@if not defined _echo echo off

setlocal ENABLEDELAYEDEXPANSION
  set ERRORLEVEL=
  set BENCHVIEW_RUN_TYPE=local
  set CORECLR_REPO=%CD%
  set LV_SANDBOX_DIR=%CORECLR_REPO%\bin\sandbox
  set LV_BENCHMARKS_OUTPUT_DIR=%LV_SANDBOX_DIR%\Logs
  set TEST_FILE_EXT=exe
  set TEST_ARCH=x64
  set TEST_ARCHITECTURE=x64
  set TEST_CONFIG=Release
  set IS_SCENARIO_TEST=
  set USAGE_DISPLAYED=
  set SHOULD_UPLOAD_TO_BENCHVIEW=
  set BENCHVIEW_PATH=
  set COLLECTION_FLAGS=stopwatch
  set ETW_COLLECTION=Off
  set STABILITY_PREFIX=
  set BENCHVIEW_GROUP=CoreCLR
  set HAS_WARMUP_RUN=--drop-first-value
  set BETTER=desc

  call :parse_command_line_arguments %*
  if defined USAGE_DISPLAYED exit /b %ERRORLEVEL%

  call :set_test_architecture  || exit /b 1
  call :set_collection_config  || exit /b 1
  call :verify_benchview_tools || exit /b 1
  call :verify_core_overlay    || exit /b 1
  call :set_perf_run_log       || exit /b 1
  call :setup_sandbox          || exit /b 1

  call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" || (
    call :print_error Failed to restore PerfHarness.csproj
    exit /b 1
  )
  call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" publish "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" -c Release -o "%LV_SANDBOX_DIR%" || (
    call :print_error Failed to publish PerfHarness.csproj
    exit /b 1
  )

  rem TODO: Remove the version of the package to copy. e.g.) if multiple version exist, then error out?
  call :run_cmd xcopy /sy "%CORECLR_REPO%\bin\tests\Windows_NT.%TEST_ARCH%.%TEST_CONFIG%\Tests\Core_Root"\* . >> %RUNLOG% || exit /b 1

  rem find and stage the tests
  set /A "LV_FAILURES=0"
  for /R %CORECLR_PERF% %%T in (*.%TEST_FILE_EXT%) do (
    call :run_benchmark %%T || (
      set /A "LV_FAILURES+=1"
    )
  )

  if not defined OPT_LEVEL (
    set OPT_LEVEL=full_opt
  )

  if not defined JIT_NAME (
    set JIT_NAME=ryujit
  )

  rem optionally upload results to benchview
  if not [%BENCHVIEW_PATH%] == [] (
    call :upload_to_benchview || exit /b 1
  )

  rem Numbers are limited to 32-bits of precision (Int32.MAX == 2^32 - 1).
  if %LV_FAILURES% NEQ 0 (
    call :print_error %LV_FAILURES% benchmarks has failed.
    exit /b %LV_FAILURES%
  )

  exit /b %ERRORLEVEL%

:run_benchmark
rem ****************************************************************************
rem   Executes the xUnit Performance benchmarks
rem ****************************************************************************
setlocal
  set BENCHNAME=%~n1
  set BENCHDIR=%~p1

  rem copy benchmark and any input files
  call :run_cmd xcopy /sy %~1 . >> %RUNLOG%  || exit /b 1
  if exist "%BENCHDIR%*.txt" (
    call :run_cmd xcopy /sy %BENCHDIR%*.txt . >> %RUNLOG%  || exit /b 1
  )

  rem setup additional environment variables
  if DEFINED TEST_ENV (
    if EXIST "%TEST_ENV%" (
        call "%TEST_ENV%"
    )
  )

  rem setup optimisation level
  if DEFINED OPT_LEVEL (
    if /I "%OPT_LEVEL%" == "min_opt" (
        set COMPlus_JITMinOpts=1
    )
  )

  rem CORE_ROOT environment variable is used by some benchmarks such as Roslyn / CscBench.
  set CORE_ROOT=%LV_SANDBOX_DIR%
  set LV_RUNID=Perf-%ETW_COLLECTION%
  set BENCHNAME_LOG_FILE_NAME=%LV_BENCHMARKS_OUTPUT_DIR%\%LV_RUNID%-%BENCHNAME%.log


  echo/
  echo/  ----------
  echo/  Running %LV_RUNID% %BENCHNAME%
  echo/  ----------

  set LV_CMD=
  if defined IS_SCENARIO_TEST (
    set "LV_CMD=corerun.exe "%LV_SANDBOX_DIR%\%BENCHNAME%.%TEST_FILE_EXT%" --perf:outputdir "%LV_BENCHMARKS_OUTPUT_DIR%" --perf:runid "%LV_RUNID%" --target-architecture "%TEST_ARCHITECTURE%""
  ) else (
    set "LV_CMD=%STABILITY_PREFIX% corerun.exe PerfHarness.dll "%LV_SANDBOX_DIR%\%BENCHNAME%.%TEST_FILE_EXT%" --perf:outputdir "%LV_BENCHMARKS_OUTPUT_DIR%" --perf:runid "%LV_RUNID%" --perf:collect %COLLECTION_FLAGS%"
  )

  call :print_to_console $ !LV_CMD!
  call :run_cmd !LV_CMD! 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1

  IF %ERRORLEVEL% NEQ 0 (
    call :print_error corerun.exe exited with %ERRORLEVEL% code.
    if exist "%BENCHNAME_LOG_FILE_NAME%" type "%BENCHNAME_LOG_FILE_NAME%"
    exit /b 1
  )

  rem optionally generate results for benchview
  if exist "%BENCHVIEW_PATH%" (
    call :generate_results_for_benchview || exit /b 1
  )

  exit /b 0

:parse_command_line_arguments
rem ****************************************************************************
rem   Parses the script's command line arguments.
rem ****************************************************************************
  IF /I [%~1] == [-testBinLoc] (
    set CORECLR_PERF=%CORECLR_REPO%\%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-stabilityPrefix] (
    set STABILITY_PREFIX=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-scenarioTest] (
    set IS_SCENARIO_TEST=1
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-uploadtobenchview] (
    set SHOULD_UPLOAD_TO_BENCHVIEW=1
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-nowarmup] (
    set HAS_WARMUP_RUN=
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-better] (
    set BETTER=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-runtype] (
    set BENCHVIEW_RUN_TYPE=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-collectionflags] (
    set COLLECTION_FLAGS=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-library] (
    set TEST_FILE_EXT=dll
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-generatebenchviewdata] (
    set BENCHVIEW_PATH=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-arch] (
    set TEST_ARCHITECTURE=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-testEnv] (
    set TEST_ENV=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-optLevel] (
    set OPT_LEVEL=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-jitName] (
    set JIT_NAME=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-configuration] (
    set TEST_CONFIG=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-group] (
    set BENCHVIEW_GROUP=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  IF /I [%~1] == [-outputdir] (
    set LV_BENCHMARKS_OUTPUT_DIR=%~2
    shift
    shift
    goto :parse_command_line_arguments
  )
  if /I [%~1] == [-?] (
    call :USAGE
    exit /b 0
  )
  if /I [%~1] == [-help] (
    call :USAGE
    exit /b 0
  )
  if [%CORECLR_PERF%] == [] (
    call :USAGE
  )

  exit /b %ERRORLEVEL%

:set_test_architecture
rem ****************************************************************************
rem   Sets the test architecture.
rem ****************************************************************************
  set TEST_ARCH=%TEST_ARCHITECTURE%
  exit /b 0

:verify_benchview_tools
rem ****************************************************************************
rem   Verifies that the path to the benchview tools is correct.
rem ****************************************************************************
  if defined BENCHVIEW_PATH (
    if not exist "%BENCHVIEW_PATH%" (
      call :print_error BenchView path: "%BENCHVIEW_PATH%" was specified, but it does not exist.
      exit /b 1
    )
  )
  exit /b 0

:verify_core_overlay
rem ****************************************************************************
rem   Verify that the Core_Root folder exist.
rem ****************************************************************************
  set CORECLR_OVERLAY=%CORECLR_REPO%\bin\tests\Windows_NT.%TEST_ARCH%.%TEST_CONFIG%\Tests\Core_Root
  if NOT EXIST "%CORECLR_OVERLAY%" (
    call :print_error Can't find test overlay directory '%CORECLR_OVERLAY%'. Please build and run Release CoreCLR tests.
    exit /B 1
  )
  exit /b 0

:set_collection_config
rem ****************************************************************************
rem   Set's the config based on the providers used for collection
rem ****************************************************************************
  if /I [%COLLECTION_FLAGS%] == [stopwatch] (
    set ETW_COLLECTION=Off
  ) else (
    set ETW_COLLECTION=On
  )
  exit /b 0


:set_perf_run_log
rem ****************************************************************************
rem   Sets the script's output log file.
rem ****************************************************************************
  if NOT EXIST "%CORECLR_REPO%\bin\Logs" (
    call :print_error Cannot find the Logs folder '%CORECLR_REPO%\bin\Logs'.
    exit /b 1
  )
  set RUNLOG=%CORECLR_REPO%\bin\Logs\perfrun.log
  exit /b 0

:setup_sandbox
rem ****************************************************************************
rem   Creates the sandbox folder used by the script to copy binaries locally,
rem   and execute benchmarks.
rem ****************************************************************************
  if not defined LV_SANDBOX_DIR (
    call :print_error LV_SANDBOX_DIR was not defined.
    exit /b 1
  )

  if exist "%LV_SANDBOX_DIR%" rmdir /s /q "%LV_SANDBOX_DIR%"
  if exist "%LV_SANDBOX_DIR%" call :print_error Failed to remove the "%LV_SANDBOX_DIR%" folder& exit /b 1

  if not exist "%LV_SANDBOX_DIR%" mkdir "%LV_SANDBOX_DIR%"
  if not exist "%LV_SANDBOX_DIR%" (
    call :print_error Failed to create the "%LV_SANDBOX_DIR%" folder.
    exit /b 1
  )

  if not exist "%LV_BENCHMARKS_OUTPUT_DIR%" mkdir "%LV_BENCHMARKS_OUTPUT_DIR%"
  if not exist "%LV_BENCHMARKS_OUTPUT_DIR%" (
    call :print_error Failed to create the "%LV_BENCHMARKS_OUTPUT_DIR%" folder.
    exit /b 1
  )

  cd "%LV_SANDBOX_DIR%"
  exit /b %ERRORLEVEL%

:generate_results_for_benchview
rem ****************************************************************************
rem   Generates results for BenchView, by appending new data to the existing
rem   measurement.json file.
rem ****************************************************************************
  if not defined LV_RUNID (
    call :print_error LV_RUNID was not defined before calling generate_results_for_benchview.
    exit /b 1
  )
  set BENCHVIEW_MEASUREMENT_PARSER=xunit
  if defined IS_SCENARIO_TEST set BENCHVIEW_MEASUREMENT_PARSER=xunitscenario

  set LV_MEASUREMENT_ARGS=
  set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% %BENCHVIEW_MEASUREMENT_PARSER%
  set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --better %BETTER%
  set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% %HAS_WARMUP_RUN%
  set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --append

  rem Currently xUnit Performance Api saves the scenario output
  rem   files on the current working directory.
  set LV_PATTERN="%LV_BENCHMARKS_OUTPUT_DIR%\%LV_RUNID%-%BENCHNAME%.xml"
  if defined IS_SCENARIO_TEST set LV_PATTERN="%LV_RUNID%-*-%BENCHNAME%.xml"

  for %%f in (%LV_PATTERN%) do (
    call :run_cmd py.exe "%BENCHVIEW_PATH%\measurement.py" %LV_MEASUREMENT_ARGS% "%%~f"

    IF !ERRORLEVEL! NEQ 0 (
      call :print_error Failed to generate BenchView measurement data.
      exit /b 1
    )
  )

endlocal& exit /b %ERRORLEVEL%

:upload_to_benchview
rem ****************************************************************************
rem   Generates BenchView's submission data and upload it
rem ****************************************************************************
setlocal
  set LV_SUBMISSION_ARGS=
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --build "%CORECLR_REPO%\build.json"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machine-data "%CORECLR_REPO%\machinedata.json"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --metadata "%CORECLR_REPO%\submission-metadata.json"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --group "%BENCHVIEW_GROUP%"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --type "%BENCHVIEW_RUN_TYPE%"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config-name "%TEST_CONFIG%"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config Configuration "%TEST_CONFIG%"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config OS "Windows_NT"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config Profile "%ETW_COLLECTION%"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config OptLevel "%OPT_LEVEL%"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config JitName  "%JIT_NAME%"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --architecture "%TEST_ARCHITECTURE%"
  set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machinepool "PerfSnake"

  call :run_cmd py.exe "%BENCHVIEW_PATH%\submission.py" measurement.json %LV_SUBMISSION_ARGS%

  IF %ERRORLEVEL% NEQ 0 (
    call :print_error Creating BenchView submission data failed.
    exit /b 1
  )

  if defined SHOULD_UPLOAD_TO_BENCHVIEW (
    call :run_cmd py.exe "%BENCHVIEW_PATH%\upload.py" submission.json --container coreclr
    IF !ERRORLEVEL! NEQ 0 (
      call :print_error Uploading to BenchView failed.
      exit /b 1
    )
  )
  exit /b %ERRORLEVEL%

:USAGE
rem ****************************************************************************
rem   Script's usage.
rem ****************************************************************************
  set USAGE_DISPLAYED=1
  echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-generateBenchviewData] ^<path_to_benchview_tools^> [-warmup] [-better] ^<asc ^| desc^> [-group] ^<group^> [-runtype] ^<rolling^|private^> [-scenarioTest] [-collectionFlags] ^<default^+CacheMisses^+InstructionRetired^+BranchMispredictions^+gcapi^> [-outputdir] ^<outputdir^>
  echo/
  echo For the path to the tests you can pass a parent directory and the script will grovel for
  echo all tests in subdirectories and run them.
  echo The library flag denotes whether the tests are build as libraries (.dll) or an executable (.exe)
  echo Architecture defaults to x64 and configuration defaults to release.
  echo -generateBenchviewData is used to specify a path to the Benchview tooling and when this flag is
  echo set we will generate the results for upload to benchview.
  echo -uploadToBenchview If this flag is set the generated benchview test data will be uploaded.
  echo -nowarmup specifies not to discard the results of the first run
  echo -better whether it is better to have ascending or descending numbers for the benchmark
  echo -group specifies the Benchview group to which this data should be uploaded (default CoreCLR)
  echo Runtype sets the runtype that we upload to Benchview, rolling for regular runs, and private for
  echo PRs.
  echo -scenarioTest should be included if you are running a scenario benchmark.
  echo -outputdir Specifies the directory where the generated performance output will be saved.
  echo -collectionFlags This is used to specify what collectoin flags get passed to the performance
  echo harness that is doing the test running.  If this is not specified we only use stopwatch.
  echo Other flags are "default", which is the whatever the test being run specified, "CacheMisses",
  echo "BranchMispredictions", and "InstructionsRetired".
  exit /b %ERRORLEVEL%

:print_error
rem ****************************************************************************
rem   Function wrapper that unifies how errors are output by the script.
rem   Functions output to the standard error.
rem ****************************************************************************
  call :print_to_console [ERROR] %*   1>&2
  exit /b %ERRORLEVEL%

:print_to_console
rem ****************************************************************************
rem   Sends text to the console screen. This can be useful to provide
rem   information on where the script is executing.
rem ****************************************************************************
  echo/
  echo/%USERNAME%@%COMPUTERNAME% "%CD%"
  echo/[%DATE%][%TIME:~0,-3%] %*
  exit /b %ERRORLEVEL%

:run_cmd
rem ****************************************************************************
rem   Function wrapper used to send the command line being executed to the
rem   console screen, before the command is executed.
rem ****************************************************************************
  if "%~1" == "" (
    call :print_error No command was specified.
    exit /b 1
  )

  call :print_to_console $ %*
  call %*
  exit /b %ERRORLEVEL%