summaryrefslogtreecommitdiff
path: root/tests/scripts/run-throughput-perf.py
blob: cc1e151b4185b3035825abf538bce81be5023cdf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
#!/usr/bin/env python
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
#
##########################################################################
##########################################################################
#
# Module: run-throughput-tests.py
#
# Notes: runs throughput testing for coreclr and uploads the timing results
#        to benchview
#
#
##########################################################################
##########################################################################

import argparse
import distutils.dir_util
import os
import re
import shutil
import subprocess
import sys
import time
import timeit
import stat
import csv

##########################################################################
# Globals
##########################################################################

# List of dlls we want to exclude
dll_exclude_list = {
    'Windows_NT': [
        # Require Newtonsoft.Json
        "Microsoft.DotNet.ProjectModel.dll",
        "Microsoft.Extensions.DependencyModel.dll",
        # Require System.Security.Principal.Windows
        "System.Net.Requests.dll",
        "System.Net.Security.dll",
        "System.Net.Sockets.dll"
    ],
    'Linux' : [
        # Required System.Runtime.WindowsRuntime
        "System.Runtime.WindowsRuntime.UI.Xaml.dll"
    ]
}

jit_list = {
    'Windows_NT': {
        'x64': 'clrjit.dll',
        'x86': 'clrjit.dll',
    },
    'Linux': {
        'x64': 'libclrjit.so'
    }
}

os_group_list = {
    'Windows_NT': 'Windows_NT',
    'Ubuntu14.04': 'Linux'
}

python_exe_list = {
    'Windows_NT': 'py',
    'Linux': 'python3.5'
}

##########################################################################
# Argument Parser
##########################################################################

description = 'Tool to collect throughtput performance data'

parser = argparse.ArgumentParser(description=description)

parser.add_argument('-arch', dest='arch', default='x64')
parser.add_argument('-configuration', dest='build_type', default='Release')
parser.add_argument('-run_type', dest='run_type', default='rolling')
parser.add_argument('-os', dest='operating_system', default='Windows_NT')
parser.add_argument('-clr_root', dest='clr_root', default=None)
parser.add_argument('-assembly_root', dest='assembly_root', default=None)
parser.add_argument('-benchview_path', dest='benchview_path', default=None)

##########################################################################
# Helper Functions
##########################################################################

def validate_args(args):
    """ Validate all of the arguments parsed.
    Args:
        args (argparser.ArgumentParser): Args parsed by the argument parser.
    Returns:
        (arch, build_type, clr_root, fx_root, fx_branch, fx_commit, env_script)
            (str, str, str, str, str, str, str)
    Notes:
    If the arguments are valid then return them all in a tuple. If not, raise
    an exception stating x argument is incorrect.
    """

    arch = args.arch
    build_type = args.build_type
    run_type = args.run_type
    operating_system = args.operating_system
    clr_root = args.clr_root
    assembly_root = args.assembly_root
    benchview_path = args.benchview_path

    def validate_arg(arg, check):
        """ Validate an individual arg
        Args:
           arg (str|bool): argument to be validated
           check (lambda: x-> bool): test that returns either True or False
                                   : based on whether the check passes.

        Returns:
           is_valid (bool): Is the argument valid?
        """

        helper = lambda item: item is not None and check(item)

        if not helper(arg):
            raise Exception('Argument: %s is not valid.' % (arg))

    valid_archs = {'Windows_NT': ['x86', 'x64'], 'Linux': ['x64']}
    valid_build_types = ['Release']
    valid_run_types = ['rolling', 'private']
    valid_os = ['Windows_NT', 'Ubuntu14.04']

    arch = next((a for a in valid_archs if a.lower() == arch.lower()), arch)
    build_type = next((b for b in valid_build_types if b.lower() == build_type.lower()), build_type)

    validate_arg(operating_system, lambda item: item in valid_os)

    os_group = os_group_list[operating_system]

    validate_arg(arch, lambda item: item in valid_archs[os_group])
    validate_arg(build_type, lambda item: item in valid_build_types)
    validate_arg(run_type, lambda item: item in valid_run_types)

    if clr_root is None:
        raise Exception('--clr_root must be set')
    else:
        clr_root = os.path.normpath(clr_root)
        validate_arg(clr_root, lambda item: os.path.isdir(clr_root))

    if assembly_root is None:
        raise Exception('--assembly_root must be set')
    else:
        assembly_root = os.path.normpath(assembly_root)
        validate_arg(assembly_root, lambda item: os.path.isdir(assembly_root))

    if not benchview_path is None:
        benchview_path = os.path.normpath(benchview_path)
        validate_arg(benchview_path, lambda item: os.path.isdir(benchview_path))

    args = (arch, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path)

    # Log configuration
    log('Configuration:')
    log(' arch: %s' % arch)
    log(' os: %s' % operating_system)
    log(' os_group: %s' % os_group)
    log(' build_type: %s' % build_type)
    log(' run_type: %s' % run_type)
    log(' clr_root: %s' % clr_root)
    log(' assembly_root: %s' % assembly_root)
    if not benchview_path is None:
        log('benchview_path : %s' % benchview_path)

    return args

def nth_dirname(path, n):
    """ Find the Nth parent directory of the given path
    Args:
        path (str): path name containing at least N components
        n (int): num of basenames to remove
    Returns:
        outpath (str): path with the last n components removed
    Notes:
        If n is 0, path is returned unmodified
    """

    assert n >= 0

    for i in range(0, n):
        path = os.path.dirname(path)

    return path

def del_rw(action, name, exc):
    os.chmod(name, stat.S_IWRITE)
    os.remove(name)

def log(message):
    """ Print logging information
    Args:
        message (str): message to be printed
    """

    print('[%s]: %s' % (sys.argv[0], message))

def generateCSV(dll_name, dll_runtimes):
    """ Write throuput performance data to a csv file to be consumed by measurement.py
    Args:
        dll_name (str): the name of the dll
        dll_runtimes (float[]): A list of runtimes for each iteration of the performance test
    """

    csv_file_name = "throughput-%s.csv" % (dll_name)
    csv_file_path = os.path.join(os.getcwd(), csv_file_name)

    with open(csv_file_path, 'w') as csvfile:
        output_file = csv.writer(csvfile, delimiter=',', lineterminator='\n')

        for iteration in dll_runtimes:
            output_file.writerow(["default", "coreclr-crossgen-tp", dll_name, iteration])

    return csv_file_name

def runIterations(dll_name, dll_path, iterations, crossgen_path, jit_path, assemblies_path):
    """ Run throughput testing for a given dll
    Args:
        dll_name: the name of the dll
        dll_path: the path to the dll
        iterations: the number of times to run crossgen on the dll
        crossgen_path: the path to crossgen
        jit_path: the path to the jit
        assemblies_path: the path to the assemblies that may be needed for the crossgen run
    Returns:
        dll_elapsed_times: a list of the elapsed times for the dll
    """

    dll_elapsed_times = []

    # Set up arguments for running crossgen
    run_args = [crossgen_path,
            '/JITPath',
            jit_path,
            '/Platform_Assemblies_Paths',
            assemblies_path,
            dll_path
            ]

    log(" ".join(run_args))

    # Time.clock() returns seconds, with a resolution of 0.4 microseconds, so multiply by the multiplier to get milliseconds
    multiplier = 1000

    for iteration in range(0,iterations):
        proc = subprocess.Popen(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

        start_time = timeit.default_timer()
        (out, err) = proc.communicate()
        end_time = timeit.default_timer()

        if proc.returncode == 0:
            # Calculate the runtime
            elapsed_time = (end_time - start_time) * multiplier
            dll_elapsed_times.append(elapsed_time)
        else:
            log("Error in %s" % (dll_name))
            log(err.decode("utf-8"))

    return dll_elapsed_times

##########################################################################
# Main
##########################################################################

def main(args):
    global dll_exclude_list
    global jit_list
    global os_group_list
    global python_exe_list

    architecture, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path = validate_args(args)
    arch = architecture

    current_dir = os.getcwd()
    jit = jit_list[os_group][architecture]
    crossgen = 'crossgen'

    if os_group == 'Windows_NT':
        crossgen += '.exe'

    # Make sandbox
    sandbox_path = os.path.join(clr_root, "sandbox")
    if os.path.isdir(sandbox_path):
        shutil.rmtree(sandbox_path, onerror=del_rw)

    os.makedirs(sandbox_path)
    os.chdir(sandbox_path)

    # Set up paths
    bin_path = os.path.join(clr_root, 'bin', 'Product',  os_group + '.' + arch + '.' + build_type)

    crossgen_path = os.path.join(bin_path,crossgen)
    jit_path = os.path.join(bin_path, jit)

    iterations = 6

    python_exe = python_exe_list[os_group]

    # Run throughput testing
    for dll_file_name in os.listdir(assembly_root):
        # Find all framework dlls in the assembly_root dir, which we will crossgen
        if (dll_file_name.endswith(".dll") and
                (not ".ni." in dll_file_name) and
                ("Microsoft" in dll_file_name or "System" in dll_file_name) and
                (not dll_file_name in dll_exclude_list[os_group])):
            dll_name = dll_file_name.replace(".dll", "")
            dll_path = os.path.join(assembly_root, dll_file_name)
            dll_elapsed_times = runIterations(dll_file_name, dll_path, iterations, crossgen_path, jit_path, assembly_root)

            if len(dll_elapsed_times) != 0:
                if not benchview_path is None:
                    # Generate the csv file
                    csv_file_name = generateCSV(dll_name, dll_elapsed_times)
                    shutil.copy(csv_file_name, clr_root)

                    # For each benchmark, call measurement.py
                    measurement_args = [python_exe,
                            os.path.join(benchview_path, "measurement.py"),
                            "csv",
                            os.path.join(os.getcwd(), csv_file_name),
                            "--metric",
                            "execution_time",
                            "--unit",
                            "milliseconds",
                            "--better",
                            "desc",
                            "--drop-first-value",
                            "--append"]
                    log(" ".join(measurement_args))
                    proc = subprocess.Popen(measurement_args)
                    proc.communicate()
                else:
                    # Write output to console if we are not publishing
                    log("%s" % (dll_name))
                    log("Duration: [%s]" % (", ".join(str(x) for x in dll_elapsed_times)))

    # Upload the data
    if not benchview_path is None:
        # Call submission.py
        submission_args = [python_exe,
                os.path.join(benchview_path, "submission.py"),
                "measurement.json",
                "--build",
                os.path.join(clr_root, "build.json"),
                "--machine-data",
                os.path.join(clr_root, "machinedata.json"),
                "--metadata",
                os.path.join(clr_root, "submission-metadata.json"),
                "--group",
                "CoreCLR-throughput",
                "--type",
                run_type,
                "--config-name",
                build_type,
                "--config",
                "Configuration",
                build_type,
                "--config",
                "OS",
                operating_system,
                "--arch",
                architecture,
                "--machinepool",
                "PerfSnake"
                ]
        log(" ".join(submission_args))
        proc = subprocess.Popen(submission_args)
        proc.communicate()

        # Call upload.py
        upload_args = [python_exe,
                os.path.join(benchview_path, "upload.py"),
                "submission.json",
                "--container",
                "coreclr"
                ]
        log(" ".join(upload_args))
        proc = subprocess.Popen(upload_args)
        proc.communicate()

    os.chdir(current_dir)

    return 0

if __name__ == "__main__":
    Args = parser.parse_args(sys.argv[1:])
    main(Args)