xref: /openbsd/gnu/llvm/llvm/utils/lit/lit/main.py (revision d415bd75)
1"""
2lit - LLVM Integrated Tester.
3
4See lit.pod for more information.
5"""
6
7import itertools
8import os
9import platform
10import sys
11import time
12
13import lit.cl_arguments
14import lit.discovery
15import lit.display
16import lit.LitConfig
17import lit.reports
18import lit.run
19import lit.Test
20import lit.util
21from lit.formats.googletest import GoogleTest
22from lit.TestTimes import record_test_times
23
24
25def main(builtin_params={}):
26    opts = lit.cl_arguments.parse_args()
27    params = create_params(builtin_params, opts.user_params)
28    is_windows = platform.system() == 'Windows'
29
30    lit_config = lit.LitConfig.LitConfig(
31        progname=os.path.basename(sys.argv[0]),
32        path=opts.path,
33        quiet=opts.quiet,
34        useValgrind=opts.useValgrind,
35        valgrindLeakCheck=opts.valgrindLeakCheck,
36        valgrindArgs=opts.valgrindArgs,
37        noExecute=opts.noExecute,
38        debug=opts.debug,
39        isWindows=is_windows,
40        order=opts.order,
41        params=params,
42        config_prefix=opts.configPrefix,
43        echo_all_commands=opts.echoAllCommands)
44
45    discovered_tests = lit.discovery.find_tests_for_inputs(lit_config, opts.test_paths,
46                                                           opts.indirectlyRunCheck)
47    if not discovered_tests:
48        sys.stderr.write('error: did not discover any tests for provided path(s)\n')
49        sys.exit(2)
50
51    if opts.show_suites or opts.show_tests:
52        print_discovered(discovered_tests, opts.show_suites, opts.show_tests)
53        sys.exit(0)
54
55    if opts.show_used_features:
56        features = set(itertools.chain.from_iterable(t.getUsedFeatures() for t in discovered_tests if t.gtest_json_file is None))
57        print(' '.join(sorted(features)))
58        sys.exit(0)
59
60    # Command line overrides configuration for maxIndividualTestTime.
61    if opts.maxIndividualTestTime is not None:  # `not None` is important (default: 0)
62        if opts.maxIndividualTestTime != lit_config.maxIndividualTestTime:
63            lit_config.note(('The test suite configuration requested an individual'
64                ' test timeout of {0} seconds but a timeout of {1} seconds was'
65                ' requested on the command line. Forcing timeout to be {1}'
66                ' seconds')
67                .format(lit_config.maxIndividualTestTime,
68                        opts.maxIndividualTestTime))
69            lit_config.maxIndividualTestTime = opts.maxIndividualTestTime
70
71    determine_order(discovered_tests, opts.order)
72
73    selected_tests = [t for t in discovered_tests if
74        opts.filter.search(t.getFullName()) and not
75        opts.filter_out.search(t.getFullName())]
76
77    if not selected_tests:
78        sys.stderr.write('error: filter did not match any tests '
79                         '(of %d discovered).  ' % len(discovered_tests))
80        if opts.allow_empty_runs:
81            sys.stderr.write("Suppressing error because '--allow-empty-runs' "
82                             'was specified.\n')
83            sys.exit(0)
84        else:
85            sys.stderr.write("Use '--allow-empty-runs' to suppress this "
86                             'error.\n')
87            sys.exit(2)
88
89    # When running multiple shards, don't include skipped tests in the xunit
90    # output since merging the files will result in duplicates.
91    if opts.shard:
92        (run, shards) = opts.shard
93        selected_tests = filter_by_shard(selected_tests, run, shards, lit_config)
94        if not selected_tests:
95            sys.stderr.write('warning: shard does not contain any tests.  '
96                             'Consider decreasing the number of shards.\n')
97            sys.exit(0)
98
99    selected_tests = selected_tests[:opts.max_tests]
100
101    mark_xfail(discovered_tests, opts)
102
103    mark_excluded(discovered_tests, selected_tests)
104
105    start = time.time()
106    run_tests(selected_tests, lit_config, opts, len(discovered_tests))
107    elapsed = time.time() - start
108
109    record_test_times(selected_tests, lit_config)
110
111    selected_tests, discovered_tests = GoogleTest.post_process_shard_results(
112        selected_tests, discovered_tests)
113
114    if opts.time_tests:
115        print_histogram(discovered_tests)
116
117    print_results(discovered_tests, elapsed, opts)
118
119    tests_for_report = selected_tests if opts.shard else discovered_tests
120    for report in opts.reports:
121        report.write_results(tests_for_report, elapsed)
122
123    if lit_config.numErrors:
124        sys.stderr.write('\n%d error(s) in tests\n' % lit_config.numErrors)
125        sys.exit(2)
126
127    if lit_config.numWarnings:
128        sys.stderr.write('\n%d warning(s) in tests\n' % lit_config.numWarnings)
129
130    has_failure = any(t.isFailure() for t in discovered_tests)
131    if has_failure:
132        if opts.ignoreFail:
133            sys.stderr.write("\nExiting with status 0 instead of 1 because "
134                             "'--ignore-fail' was specified.\n")
135        else:
136            sys.exit(1)
137
138def create_params(builtin_params, user_params):
139    def parse(p):
140        return p.split('=', 1) if '=' in p else (p, '')
141
142    params = dict(builtin_params)
143    params.update([parse(p) for p in user_params])
144    return params
145
146
147def print_discovered(tests, show_suites, show_tests):
148    tests.sort(key=lit.reports.by_suite_and_test_path)
149
150    if show_suites:
151        tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
152        print('-- Test Suites --')
153        for suite, test_iter in tests_by_suite:
154            test_count = sum(1 for _ in test_iter)
155            print('  %s - %d tests' % (suite.name, test_count))
156            print('    Source Root: %s' % suite.source_root)
157            print('    Exec Root  : %s' % suite.exec_root)
158            features = ' '.join(sorted(suite.config.available_features))
159            print('    Available Features: %s' % features)
160            substitutions = sorted(suite.config.substitutions)
161            substitutions = ('%s => %s' % (x, y) for (x, y) in substitutions)
162            substitutions = '\n'.ljust(30).join(substitutions)
163            print('    Available Substitutions: %s' % substitutions)
164
165    if show_tests:
166        print('-- Available Tests --')
167        for t in tests:
168            print('  %s' % t.getFullName())
169
170
171def determine_order(tests, order):
172    from lit.cl_arguments import TestOrder
173    enum_order = TestOrder(order)
174    if enum_order == TestOrder.RANDOM:
175        import random
176        random.shuffle(tests)
177    elif enum_order == TestOrder.LEXICAL:
178        tests.sort(key=lambda t: t.getFullName())
179    else:
180        assert enum_order == TestOrder.SMART, 'Unknown TestOrder value'
181        tests.sort(key=lambda t: (not t.previous_failure, -t.previous_elapsed, t.getFullName()))
182
183
184def filter_by_shard(tests, run, shards, lit_config):
185    test_ixs = range(run - 1, len(tests), shards)
186    selected_tests = [tests[i] for i in test_ixs]
187
188    # For clarity, generate a preview of the first few test indices in the shard
189    # to accompany the arithmetic expression.
190    preview_len = 3
191    preview = ', '.join([str(i + 1) for i in test_ixs[:preview_len]])
192    if len(test_ixs) > preview_len:
193        preview += ', ...'
194    msg = f'Selecting shard {run}/{shards} = ' \
195          f'size {len(selected_tests)}/{len(tests)} = ' \
196          f'tests #({shards}*k)+{run} = [{preview}]'
197    lit_config.note(msg)
198    return selected_tests
199
200
201def mark_xfail(selected_tests, opts):
202    for t in selected_tests:
203        test_file = os.sep.join(t.path_in_suite)
204        test_full_name = t.getFullName()
205        if test_file in opts.xfail or test_full_name in opts.xfail:
206            t.xfails += '*'
207        if test_file in opts.xfail_not or test_full_name in opts.xfail_not:
208            t.xfail_not = True
209
210def mark_excluded(discovered_tests, selected_tests):
211    excluded_tests = set(discovered_tests) - set(selected_tests)
212    result = lit.Test.Result(lit.Test.EXCLUDED)
213    for t in excluded_tests:
214        t.setResult(result)
215
216
217def run_tests(tests, lit_config, opts, discovered_tests):
218    workers = min(len(tests), opts.workers)
219    display = lit.display.create_display(opts, tests, discovered_tests, workers)
220
221    run = lit.run.Run(tests, lit_config, workers, display.update,
222                      opts.max_failures, opts.timeout)
223
224    display.print_header()
225
226    interrupted = False
227    error = None
228    try:
229        execute_in_tmp_dir(run, lit_config)
230    except KeyboardInterrupt:
231        interrupted = True
232        error = '  interrupted by user'
233    except lit.run.MaxFailuresError:
234        error = 'warning: reached maximum number of test failures'
235    except lit.run.TimeoutError:
236        error = 'warning: reached timeout'
237
238    display.clear(interrupted)
239    if error:
240        sys.stderr.write('%s, skipping remaining tests\n' % error)
241
242
243def execute_in_tmp_dir(run, lit_config):
244    # Create a temp directory inside the normal temp directory so that we can
245    # try to avoid temporary test file leaks. The user can avoid this behavior
246    # by setting LIT_PRESERVES_TMP in the environment, so they can easily use
247    # their own temp directory to monitor temporary file leaks or handle them at
248    # the buildbot level.
249    tmp_dir = None
250    if 'LIT_PRESERVES_TMP' not in os.environ:
251        import tempfile
252        # z/OS linker does not support '_' in paths, so use '-'.
253        tmp_dir = tempfile.mkdtemp(prefix='lit-tmp-')
254        tmp_dir_envs = {k: tmp_dir for k in ['TMP', 'TMPDIR', 'TEMP', 'TEMPDIR']}
255        os.environ.update(tmp_dir_envs)
256        for cfg in {t.config for t in run.tests}:
257            cfg.environment.update(tmp_dir_envs)
258    try:
259        run.execute()
260    finally:
261        if tmp_dir:
262            try:
263                import shutil
264                shutil.rmtree(tmp_dir)
265            except Exception as e:
266                lit_config.warning("Failed to delete temp directory '%s', try upgrading your version of Python to fix this" % tmp_dir)
267
268
269def print_histogram(tests):
270    test_times = [(t.getFullName(), t.result.elapsed)
271                  for t in tests if t.result.elapsed]
272    if test_times:
273        lit.util.printHistogram(test_times, title='Tests')
274
275
276def print_results(tests, elapsed, opts):
277    tests_by_code = {code: [] for code in lit.Test.ResultCode.all_codes()}
278    for test in tests:
279        tests_by_code[test.result.code].append(test)
280
281    for code in lit.Test.ResultCode.all_codes():
282        print_group(sorted(tests_by_code[code], key=lambda t: t.getFullName()), code, opts.shown_codes)
283
284    print_summary(tests_by_code, opts.quiet, elapsed)
285
286
287def print_group(tests, code, shown_codes):
288    if not tests:
289        return
290    if not code.isFailure and code not in shown_codes:
291        return
292    print('*' * 20)
293    print('{} Tests ({}):'.format(code.label, len(tests)))
294    for test in tests:
295        print('  %s' % test.getFullName())
296    sys.stdout.write('\n')
297
298
299def print_summary(tests_by_code, quiet, elapsed):
300    if not quiet:
301        print('\nTesting Time: %.2fs' % elapsed)
302
303    codes = [c for c in lit.Test.ResultCode.all_codes()
304             if not quiet or c.isFailure]
305    groups = [(c.label, len(tests_by_code[c])) for c in codes]
306    groups = [(label, count) for label, count in groups if count]
307    if not groups:
308        return
309
310    max_label_len = max(len(label) for label, _ in groups)
311    max_count_len = max(len(str(count)) for _, count in groups)
312
313    for (label, count) in groups:
314        label = label.ljust(max_label_len)
315        count = str(count).rjust(max_count_len)
316        print('  %s: %s' % (label, count))
317