1# Copyright 2016-2017 The Meson development team
2
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6
7#     http://www.apache.org/licenses/LICENSE-2.0
8
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15# A tool to run tests in many different ways.
16
17from pathlib import Path
18from collections import deque
19from copy import deepcopy
20from itertools import islice
21import argparse
22import asyncio
23import datetime
24import enum
25import json
26import multiprocessing
27import os
28import pickle
29import platform
30import random
31import re
32import signal
33import subprocess
34import shlex
35import sys
36import textwrap
37import time
38import typing as T
39import unicodedata
40import xml.etree.ElementTree as et
41
42from . import build
43from . import environment
44from . import mlog
45from .coredata import major_versions_differ, MesonVersionMismatchException
46from .coredata import version as coredata_version
47from .mesonlib import (MesonException, OrderedSet, RealPathAction,
48                       get_wine_shortpath, join_args, split_args)
49from .mintro import get_infodir, load_info_file
50from .programs import ExternalProgram
51from .backend.backends import TestProtocol, TestSerialisation
52
53# GNU autotools interprets a return code of 77 from tests it executes to
54# mean that the test should be skipped.
55GNU_SKIP_RETURNCODE = 77
56
57# GNU autotools interprets a return code of 99 from tests it executes to
58# mean that the test failed even before testing what it is supposed to test.
59GNU_ERROR_RETURNCODE = 99
60
61# Exit if 3 Ctrl-C's are received within one second
62MAX_CTRLC = 3
63
64def is_windows() -> bool:
65    platname = platform.system().lower()
66    return platname == 'windows'
67
68def is_cygwin() -> bool:
69    return sys.platform == 'cygwin'
70
71UNIWIDTH_MAPPING = {'F': 2, 'H': 1, 'W': 2, 'Na': 1, 'N': 1, 'A': 1}
72def uniwidth(s: str) -> int:
73    result = 0
74    for c in s:
75        w = unicodedata.east_asian_width(c)
76        result += UNIWIDTH_MAPPING[w]
77    return result
78
79def determine_worker_count() -> int:
80    varname = 'MESON_TESTTHREADS'
81    if varname in os.environ:
82        try:
83            num_workers = int(os.environ[varname])
84        except ValueError:
85            print(f'Invalid value in {varname}, using 1 thread.')
86            num_workers = 1
87    else:
88        try:
89            # Fails in some weird environments such as Debian
90            # reproducible build.
91            num_workers = multiprocessing.cpu_count()
92        except Exception:
93            num_workers = 1
94    return num_workers
95
96def add_arguments(parser: argparse.ArgumentParser) -> None:
97    parser.add_argument('--repeat', default=1, dest='repeat', type=int,
98                        help='Number of times to run the tests.')
99    parser.add_argument('--no-rebuild', default=False, action='store_true',
100                        help='Do not rebuild before running tests.')
101    parser.add_argument('--gdb', default=False, dest='gdb', action='store_true',
102                        help='Run test under gdb.')
103    parser.add_argument('--gdb-path', default='gdb', dest='gdb_path',
104                        help='Path to the gdb binary (default: gdb).')
105    parser.add_argument('--list', default=False, dest='list', action='store_true',
106                        help='List available tests.')
107    parser.add_argument('--wrapper', default=None, dest='wrapper', type=split_args,
108                        help='wrapper to run tests with (e.g. Valgrind)')
109    parser.add_argument('-C', dest='wd', action=RealPathAction,
110                        # https://github.com/python/typeshed/issues/3107
111                        # https://github.com/python/mypy/issues/7177
112                        type=os.path.abspath,  # type: ignore
113                        help='directory to cd into before running')
114    parser.add_argument('--suite', default=[], dest='include_suites', action='append', metavar='SUITE',
115                        help='Only run tests belonging to the given suite.')
116    parser.add_argument('--no-suite', default=[], dest='exclude_suites', action='append', metavar='SUITE',
117                        help='Do not run tests belonging to the given suite.')
118    parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false',
119                        help='Do not split stderr and stdout in test logs.')
120    parser.add_argument('--print-errorlogs', default=False, action='store_true',
121                        help="Whether to print failing tests' logs.")
122    parser.add_argument('--benchmark', default=False, action='store_true',
123                        help="Run benchmarks instead of tests.")
124    parser.add_argument('--logbase', default='testlog',
125                        help="Base name for log file.")
126    parser.add_argument('--num-processes', default=determine_worker_count(), type=int,
127                        help='How many parallel processes to use.')
128    parser.add_argument('-v', '--verbose', default=False, action='store_true',
129                        help='Do not redirect stdout and stderr')
130    parser.add_argument('-q', '--quiet', default=False, action='store_true',
131                        help='Produce less output to the terminal.')
132    parser.add_argument('-t', '--timeout-multiplier', type=float, default=None,
133                        help='Define a multiplier for test timeout, for example '
134                        ' when running tests in particular conditions they might take'
135                        ' more time to execute. (<= 0 to disable timeout)')
136    parser.add_argument('--setup', default=None, dest='setup',
137                        help='Which test setup to use.')
138    parser.add_argument('--test-args', default=[], type=split_args,
139                        help='Arguments to pass to the specified test(s) or all tests')
140    parser.add_argument('args', nargs='*',
141                        help='Optional list of test names to run. "testname" to run all tests with that name, '
142                        '"subprojname:testname" to specifically run "testname" from "subprojname", '
143                        '"subprojname:" to run all tests defined by "subprojname".')
144
145
146def print_safe(s: str) -> None:
147    end = '' if s[-1] == '\n' else '\n'
148    try:
149        print(s, end=end)
150    except UnicodeEncodeError:
151        s = s.encode('ascii', errors='backslashreplace').decode('ascii')
152        print(s, end=end)
153
154def join_lines(a: str, b: str) -> str:
155    if not a:
156        return b
157    if not b:
158        return a
159    return a + '\n' + b
160
161def dashes(s: str, dash: str, cols: int) -> str:
162    if not s:
163        return dash * cols
164    s = ' ' + s + ' '
165    width = uniwidth(s)
166    first = (cols - width) // 2
167    s = dash * first + s
168    return s + dash * (cols - first - width)
169
170def returncode_to_status(retcode: int) -> str:
171    # Note: We can't use `os.WIFSIGNALED(result.returncode)` and the related
172    # functions here because the status returned by subprocess is munged. It
173    # returns a negative value if the process was killed by a signal rather than
174    # the raw status returned by `wait()`. Also, If a shell sits between Meson
175    # the the actual unit test that shell is likely to convert a termination due
176    # to a signal into an exit status of 128 plus the signal number.
177    if retcode < 0:
178        signum = -retcode
179        try:
180            signame = signal.Signals(signum).name
181        except ValueError:
182            signame = 'SIGinvalid'
183        return f'killed by signal {signum} {signame}'
184
185    if retcode <= 128:
186        return f'exit status {retcode}'
187
188    signum = retcode - 128
189    try:
190        signame = signal.Signals(signum).name
191    except ValueError:
192        signame = 'SIGinvalid'
193    return f'(exit status {retcode} or signal {signum} {signame})'
194
195# TODO for Windows
196sh_quote: T.Callable[[str], str] = lambda x: x
197if not is_windows():
198    sh_quote = shlex.quote
199
200def env_tuple_to_str(env: T.Iterable[T.Tuple[str, str]]) -> str:
201    return ''.join(["{}={} ".format(k, sh_quote(v)) for k, v in env])
202
203
204class TestException(MesonException):
205    pass
206
207
208@enum.unique
209class ConsoleUser(enum.Enum):
210
211    # the logger can use the console
212    LOGGER = 0
213
214    # the console is used by gdb
215    GDB = 1
216
217    # the console is used to write stdout/stderr
218    STDOUT = 2
219
220
221@enum.unique
222class TestResult(enum.Enum):
223
224    PENDING = 'PENDING'
225    RUNNING = 'RUNNING'
226    OK = 'OK'
227    TIMEOUT = 'TIMEOUT'
228    INTERRUPT = 'INTERRUPT'
229    SKIP = 'SKIP'
230    FAIL = 'FAIL'
231    EXPECTEDFAIL = 'EXPECTEDFAIL'
232    UNEXPECTEDPASS = 'UNEXPECTEDPASS'
233    ERROR = 'ERROR'
234
235    @staticmethod
236    def maxlen() -> int:
237        return 14 # len(UNEXPECTEDPASS)
238
239    def is_ok(self) -> bool:
240        return self in {TestResult.OK, TestResult.EXPECTEDFAIL}
241
242    def is_bad(self) -> bool:
243        return self in {TestResult.FAIL, TestResult.TIMEOUT, TestResult.INTERRUPT,
244                        TestResult.UNEXPECTEDPASS, TestResult.ERROR}
245
246    def is_finished(self) -> bool:
247        return self not in {TestResult.PENDING, TestResult.RUNNING}
248
249    def was_killed(self) -> bool:
250        return self in (TestResult.TIMEOUT, TestResult.INTERRUPT)
251
252    def colorize(self, s: str) -> mlog.AnsiDecorator:
253        if self.is_bad():
254            decorator = mlog.red
255        elif self in (TestResult.SKIP, TestResult.EXPECTEDFAIL):
256            decorator = mlog.yellow
257        elif self.is_finished():
258            decorator = mlog.green
259        else:
260            decorator = mlog.blue
261        return decorator(s)
262
263    def get_text(self, colorize: bool) -> str:
264        result_str = '{res:{reslen}}'.format(res=self.value, reslen=self.maxlen())
265        return self.colorize(result_str).get_text(colorize)
266
267
268TYPE_TAPResult = T.Union['TAPParser.Test', 'TAPParser.Error', 'TAPParser.Version', 'TAPParser.Plan', 'TAPParser.Bailout']
269
270class TAPParser:
271    class Plan(T.NamedTuple):
272        num_tests: int
273        late: bool
274        skipped: bool
275        explanation: T.Optional[str]
276
277    class Bailout(T.NamedTuple):
278        message: str
279
280    class Test(T.NamedTuple):
281        number: int
282        name: str
283        result: TestResult
284        explanation: T.Optional[str]
285
286        def __str__(self) -> str:
287            return f'{self.number} {self.name}'.strip()
288
289    class Error(T.NamedTuple):
290        message: str
291
292    class Version(T.NamedTuple):
293        version: int
294
295    _MAIN = 1
296    _AFTER_TEST = 2
297    _YAML = 3
298
299    _RE_BAILOUT = re.compile(r'Bail out!\s*(.*)')
300    _RE_DIRECTIVE = re.compile(r'(?:\s*\#\s*([Ss][Kk][Ii][Pp]\S*|[Tt][Oo][Dd][Oo])\b\s*(.*))?')
301    _RE_PLAN = re.compile(r'1\.\.([0-9]+)' + _RE_DIRECTIVE.pattern)
302    _RE_TEST = re.compile(r'((?:not )?ok)\s*(?:([0-9]+)\s*)?([^#]*)' + _RE_DIRECTIVE.pattern)
303    _RE_VERSION = re.compile(r'TAP version ([0-9]+)')
304    _RE_YAML_START = re.compile(r'(\s+)---.*')
305    _RE_YAML_END = re.compile(r'\s+\.\.\.\s*')
306
307    found_late_test = False
308    bailed_out = False
309    plan: T.Optional[Plan] = None
310    lineno = 0
311    num_tests = 0
312    yaml_lineno: T.Optional[int] = None
313    yaml_indent = ''
314    state = _MAIN
315    version = 12
316
317    def parse_test(self, ok: bool, num: int, name: str, directive: T.Optional[str], explanation: T.Optional[str]) -> \
318            T.Generator[T.Union['TAPParser.Test', 'TAPParser.Error'], None, None]:
319        name = name.strip()
320        if name[0:2] == '- ':
321            name = name[2:]
322        explanation = explanation.strip() if explanation else None
323        if directive is not None:
324            directive = directive.upper()
325            if directive.startswith('SKIP'):
326                if ok:
327                    yield self.Test(num, name, TestResult.SKIP, explanation)
328                    return
329            elif directive == 'TODO':
330                yield self.Test(num, name, TestResult.UNEXPECTEDPASS if ok else TestResult.EXPECTEDFAIL, explanation)
331                return
332            else:
333                yield self.Error(f'invalid directive "{directive}"')
334
335        yield self.Test(num, name, TestResult.OK if ok else TestResult.FAIL, explanation)
336
337    async def parse_async(self, lines: T.AsyncIterator[str]) -> T.AsyncIterator[TYPE_TAPResult]:
338        async for line in lines:
339            for event in self.parse_line(line):
340                yield event
341        for event in self.parse_line(None):
342            yield event
343
344    def parse(self, io: T.Iterator[str]) -> T.Iterator[TYPE_TAPResult]:
345        for line in io:
346            yield from self.parse_line(line)
347        yield from self.parse_line(None)
348
349    def parse_line(self, line: T.Optional[str]) -> T.Iterator[TYPE_TAPResult]:
350        if line is not None:
351            self.lineno += 1
352            line = line.rstrip()
353
354            # YAML blocks are only accepted after a test
355            if self.state == self._AFTER_TEST:
356                if self.version >= 13:
357                    m = self._RE_YAML_START.match(line)
358                    if m:
359                        self.state = self._YAML
360                        self.yaml_lineno = self.lineno
361                        self.yaml_indent = m.group(1)
362                        return
363                self.state = self._MAIN
364
365            elif self.state == self._YAML:
366                if self._RE_YAML_END.match(line):
367                    self.state = self._MAIN
368                    return
369                if line.startswith(self.yaml_indent):
370                    return
371                yield self.Error(f'YAML block not terminated (started on line {self.yaml_lineno})')
372                self.state = self._MAIN
373
374            assert self.state == self._MAIN
375            if line.startswith('#'):
376                return
377
378            m = self._RE_TEST.match(line)
379            if m:
380                if self.plan and self.plan.late and not self.found_late_test:
381                    yield self.Error('unexpected test after late plan')
382                    self.found_late_test = True
383                self.num_tests += 1
384                num = self.num_tests if m.group(2) is None else int(m.group(2))
385                if num != self.num_tests:
386                    yield self.Error('out of order test numbers')
387                yield from self.parse_test(m.group(1) == 'ok', num,
388                                           m.group(3), m.group(4), m.group(5))
389                self.state = self._AFTER_TEST
390                return
391
392            m = self._RE_PLAN.match(line)
393            if m:
394                if self.plan:
395                    yield self.Error('more than one plan found')
396                else:
397                    num_tests = int(m.group(1))
398                    skipped = (num_tests == 0)
399                    if m.group(2):
400                        if m.group(2).upper().startswith('SKIP'):
401                            if num_tests > 0:
402                                yield self.Error('invalid SKIP directive for plan')
403                            skipped = True
404                        else:
405                            yield self.Error('invalid directive for plan')
406                    self.plan = self.Plan(num_tests=num_tests, late=(self.num_tests > 0),
407                                          skipped=skipped, explanation=m.group(3))
408                    yield self.plan
409                return
410
411            m = self._RE_BAILOUT.match(line)
412            if m:
413                yield self.Bailout(m.group(1))
414                self.bailed_out = True
415                return
416
417            m = self._RE_VERSION.match(line)
418            if m:
419                # The TAP version is only accepted as the first line
420                if self.lineno != 1:
421                    yield self.Error('version number must be on the first line')
422                    return
423                self.version = int(m.group(1))
424                if self.version < 13:
425                    yield self.Error('version number should be at least 13')
426                else:
427                    yield self.Version(version=self.version)
428                return
429
430            if not line:
431                return
432
433            yield self.Error('unexpected input at line {}'.format((self.lineno,)))
434        else:
435            # end of file
436            if self.state == self._YAML:
437                yield self.Error(f'YAML block not terminated (started on line {self.yaml_lineno})')
438
439            if not self.bailed_out and self.plan and self.num_tests != self.plan.num_tests:
440                if self.num_tests < self.plan.num_tests:
441                    yield self.Error(f'Too few tests run (expected {self.plan.num_tests}, got {self.num_tests})')
442                else:
443                    yield self.Error(f'Too many tests run (expected {self.plan.num_tests}, got {self.num_tests})')
444
445class TestLogger:
446    def flush(self) -> None:
447        pass
448
449    def start(self, harness: 'TestHarness') -> None:
450        pass
451
452    def start_test(self, harness: 'TestHarness', test: 'TestRun') -> None:
453        pass
454
455    def log_subtest(self, harness: 'TestHarness', test: 'TestRun', s: str, res: TestResult) -> str:
456        return ''
457
458    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
459        pass
460
461    async def finish(self, harness: 'TestHarness') -> None:
462        pass
463
464    def close(self) -> None:
465        pass
466
467
468class TestFileLogger(TestLogger):
469    def __init__(self, filename: str, errors: str = 'replace') -> None:
470        self.filename = filename
471        self.file = open(filename, 'w', encoding='utf-8', errors=errors)
472
473    def close(self) -> None:
474        if self.file:
475            self.file.close()
476            self.file = None
477
478
479class ConsoleLogger(TestLogger):
480    def __init__(self) -> None:
481        self.update = asyncio.Event()
482        self.running_tests = OrderedSet()  # type: OrderedSet['TestRun']
483        self.progress_task = None          # type: T.Optional[asyncio.Future]
484        self.max_left_width = 0            # type: int
485        self.stop = False
486        self.should_erase_line = ''
487        self.test_count = 0
488        self.started_tests = 0
489        try:
490            self.cols, _ = os.get_terminal_size(1)
491            self.is_tty = True
492        except OSError:
493            self.cols = 80
494            self.is_tty = False
495
496    def flush(self) -> None:
497        if self.should_erase_line:
498            print(self.should_erase_line, end='')
499            self.should_erase_line = ''
500
501    def print_progress(self, lines: T.List[str]) -> None:
502        line_count = len(lines)
503        if line_count > 0:
504            self.flush()
505            for line in lines:
506                print(line)
507            print(f'\x1b[{line_count}A', end='')
508            self.should_erase_line = '\x1b[K' + '\x1b[1B\x1b[K' * (line_count - 1)
509            if line_count > 1:
510                self.should_erase_line += f'\x1b[{line_count - 1}A'
511
512    def request_update(self) -> None:
513        self.update.set()
514
515    def emit_progress(self, harness: 'TestHarness') -> None:
516        lines: T.List[str] = []
517        for test in islice(reversed(self.running_tests), 10):
518            left = ' ' * (len(str(self.test_count)) * 2 + 2)
519            right = '{spaces} {dur:{durlen}}'.format(
520                spaces=' ' * TestResult.maxlen(),
521                dur=int(time.time() - test.starttime),
522                durlen=harness.duration_max_len)
523            if test.timeout:
524                right += '/{timeout:{durlen}}'.format(
525                    timeout=test.timeout,
526                    durlen=harness.duration_max_len)
527            right += 's'
528            lines = [harness.format(test, colorize=True,
529                                     max_left_width=self.max_left_width,
530                                     left=left,
531                                     right=right)] + lines
532        if len(self.running_tests) > 10:
533            lines += [' ' * len(harness.get_test_num_prefix(0))
534                      + f'[{len(self.running_tests) - 10} more tests running]']
535        self.print_progress(lines)
536
537    def start(self, harness: 'TestHarness') -> None:
538        async def report_progress() -> None:
539            loop = asyncio.get_event_loop()
540            next_update = 0.0
541            self.request_update()
542            while not self.stop:
543                await self.update.wait()
544                self.update.clear()
545                # We may get here simply because the progress line has been
546                # overwritten, so do not always switch.  Only do so every
547                # second, or if the printed test has finished
548                if loop.time() >= next_update:
549                    next_update = loop.time() + 1
550                    loop.call_at(next_update, self.request_update)
551                self.emit_progress(harness)
552            self.flush()
553
554        self.test_count = harness.test_count
555        self.cols = max(self.cols, harness.max_left_width + 30)
556
557        if self.is_tty and not harness.need_console:
558            # Account for "[aa-bb/cc] OO " in the progress report
559            self.max_left_width = 3 * len(str(self.test_count)) + 8
560            self.progress_task = asyncio.ensure_future(report_progress())
561
562    def start_test(self, harness: 'TestHarness', test: 'TestRun') -> None:
563        if harness.options.verbose and test.cmdline:
564            self.flush()
565            print(harness.format(test, mlog.colorize_console(),
566                                 max_left_width=self.max_left_width,
567                                 right=test.res.get_text(mlog.colorize_console())))
568        self.started_tests += 1
569        self.running_tests.add(test)
570        self.running_tests.move_to_end(test, last=False)
571        self.request_update()
572
573    @staticmethod
574    def print_test_details_header(prefix: str, header: str) -> None:
575        header += ':'
576        print(prefix + mlog.italic(f'{header:<9}').get_text(mlog.colorize_console()))
577
578    @staticmethod
579    def print_test_details_line(prefix: str,
580                                line: str,
581                                end: str = '\n',
582                                flush: bool = False) -> None:
583        print(prefix + '  ' + line, flush=flush, end=end)
584
585    @staticmethod
586    def print_test_details(prefix: str,
587                           header: str,
588                           lines: T.Union[T.List[str], str],
589                           clip: T.Optional[bool] = False) -> None:
590        offset = 0
591        if not isinstance(lines, list):
592            lines = [lines]
593        if clip and len(lines) > 100:
594            offset = -100
595            header += ' (only the last 100 lines from a long output included)'
596        ConsoleLogger.print_test_details_header(prefix, header)
597        for line in lines[offset:]:
598            ConsoleLogger.print_test_details_line(prefix, line)
599
600    def print_log(self,
601                  harness: 'TestHarness',
602                  result: 'TestRun',
603                  no_output: bool = False) -> None:
604        assert result.cmdline
605        prefix = harness.get_test_num_prefix(result.num)
606        self.print_test_details(prefix, "command", result.cmdline)
607        self.print_test_details(prefix,
608                                "exit details",
609                                returncode_to_status(result.returncode))
610        if not no_output:
611            if result.stdo:
612                if harness.options.split or result.stde:
613                    name = 'stdout'
614                else:
615                    name = 'output'
616                self.print_test_details(prefix,
617                                        name,
618                                        result.stdo.splitlines(),
619                                        not harness.options.verbose)
620            if result.stde:
621                self.print_test_details(prefix,
622                                        "stderr",
623                                        result.stde.splitlines(),
624                                        not harness.options.verbose)
625            if result.additional_out:
626                self.print_test_details(prefix,
627                                        "additional output",
628                                        result.additional_out.splitlines(),
629                                        not harness.options.verbose)
630            if result.additional_err:
631                self.print_test_details(prefix,
632                                        "additional error",
633                                        result.additional_err.splitlines(),
634                                        not harness.options.verbose)
635
636    def log_subtest(self, harness: 'TestHarness', test: 'TestRun', s: str, result: TestResult) -> str:
637        return 'subtest %s %s' % (s, result.get_text(mlog.colorize_console()))
638
639    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
640        self.running_tests.remove(result)
641        if result.res is TestResult.TIMEOUT and (harness.options.verbose or
642                                                 harness.options.print_errorlogs):
643            result.additional_err += f'timed out (after {result.timeout} seconds)\n'
644
645        if not harness.options.quiet or not result.res.is_ok():
646            self.flush()
647            print(harness.format(result, mlog.colorize_console(), max_left_width=self.max_left_width))
648            if harness.options.verbose and not result.is_parallel and result.cmdline and not result.needs_parsing:
649                # output already printed during execution
650                self.print_log(harness, result, no_output=True)
651            elif harness.options.verbose or (result.res.is_bad() and harness.options.print_errorlogs):
652                # verbose or fail + print_errorlogs -> print
653                self.print_log(harness, result)
654
655        self.request_update()
656
657    async def finish(self, harness: 'TestHarness') -> None:
658        self.stop = True
659        self.request_update()
660        if self.progress_task:
661            await self.progress_task
662
663        if harness.collected_failures and \
664                (harness.options.print_errorlogs or harness.options.verbose):
665            print("\nSummary of Failures:\n")
666            for i, result in enumerate(harness.collected_failures, 1):
667                print(harness.format(result, mlog.colorize_console()))
668
669        print(harness.summary())
670
671
672class TextLogfileBuilder(TestFileLogger):
673    def start(self, harness: 'TestHarness') -> None:
674        self.file.write(f'Log of Meson test suite run on {datetime.datetime.now().isoformat()}\n\n')
675        inherit_env = env_tuple_to_str(os.environ.items())
676        self.file.write(f'Inherited environment: {inherit_env}\n\n')
677
678    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
679        self.file.write(harness.format(result, False) + '\n')
680        cmdline = result.cmdline
681        if cmdline:
682            starttime_str = time.strftime("%H:%M:%S", time.gmtime(result.starttime))
683            self.file.write(starttime_str + ' ' + cmdline + '\n')
684            if result.stdo:
685                self.file.write(dashes('stdout', '-', 78) + '\n')
686                self.file.write(result.stdo + '\n')
687                self.file.write(dashes('', '-', 78) + '\n\n')
688            if result.stde:
689                self.file.write(dashes('stderr', '-', 78) + '\n')
690                self.file.write(result.stde + '\n')
691                self.file.write(dashes('', '-', 78) + '\n\n')
692
693    async def finish(self, harness: 'TestHarness') -> None:
694        if harness.collected_failures:
695            self.file.write("\nSummary of Failures:\n\n")
696            for i, result in enumerate(harness.collected_failures, 1):
697                self.file.write(harness.format(result, False) + '\n')
698        self.file.write(harness.summary())
699
700        print(f'Full log written to {self.filename}')
701
702
703class JsonLogfileBuilder(TestFileLogger):
704    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
705        jresult = {'name': result.name,
706                   'stdout': result.stdo,
707                   'result': result.res.value,
708                   'starttime': result.starttime,
709                   'duration': result.duration,
710                   'returncode': result.returncode,
711                   'env': result.env,
712                   'command': result.cmd}  # type: T.Dict[str, T.Any]
713        if result.stde:
714            jresult['stderr'] = result.stde
715        self.file.write(json.dumps(jresult) + '\n')
716
717
718class JunitBuilder(TestLogger):
719
720    """Builder for Junit test results.
721
722    Junit is impossible to stream out, it requires attributes counting the
723    total number of tests, failures, skips, and errors in the root element
724    and in each test suite. As such, we use a builder class to track each
725    test case, and calculate all metadata before writing it out.
726
727    For tests with multiple results (like from a TAP test), we record the
728    test as a suite with the project_name.test_name. This allows us to track
729    each result separately. For tests with only one result (such as exit-code
730    tests) we record each one into a suite with the name project_name. The use
731    of the project_name allows us to sort subproject tests separately from
732    the root project.
733    """
734
735    def __init__(self, filename: str) -> None:
736        self.filename = filename
737        self.root = et.Element(
738            'testsuites', tests='0', errors='0', failures='0')
739        self.suites = {}  # type: T.Dict[str, et.Element]
740
741    def log(self, harness: 'TestHarness', test: 'TestRun') -> None:
742        """Log a single test case."""
743        if test.junit is not None:
744            for suite in test.junit.findall('.//testsuite'):
745                # Assume that we don't need to merge anything here...
746                suite.attrib['name'] = '{}.{}.{}'.format(test.project, test.name, suite.attrib['name'])
747
748                # GTest can inject invalid attributes
749                for case in suite.findall('.//testcase[@result]'):
750                    del case.attrib['result']
751                for case in suite.findall('.//testcase[@timestamp]'):
752                    del case.attrib['timestamp']
753                self.root.append(suite)
754            return
755
756        # In this case we have a test binary with multiple results.
757        # We want to record this so that each result is recorded
758        # separately
759        if test.results:
760            suitename = f'{test.project}.{test.name}'
761            assert suitename not in self.suites or harness.options.repeat > 1, 'duplicate suite'
762
763            suite = self.suites[suitename] = et.Element(
764                'testsuite',
765                name=suitename,
766                tests=str(len(test.results)),
767                errors=str(sum(1 for r in test.results if r.result in
768                               {TestResult.INTERRUPT, TestResult.ERROR})),
769                failures=str(sum(1 for r in test.results if r.result in
770                                 {TestResult.FAIL, TestResult.UNEXPECTEDPASS, TestResult.TIMEOUT})),
771                skipped=str(sum(1 for r in test.results if r.result is TestResult.SKIP)),
772                time=str(test.duration),
773            )
774
775            for subtest in test.results:
776                # Both name and classname are required. Use the suite name as
777                # the class name, so that e.g. GitLab groups testcases correctly.
778                testcase = et.SubElement(suite, 'testcase', name=str(subtest), classname=suitename)
779                if subtest.result is TestResult.SKIP:
780                    et.SubElement(testcase, 'skipped')
781                elif subtest.result is TestResult.ERROR:
782                    et.SubElement(testcase, 'error')
783                elif subtest.result is TestResult.FAIL:
784                    et.SubElement(testcase, 'failure')
785                elif subtest.result is TestResult.UNEXPECTEDPASS:
786                    fail = et.SubElement(testcase, 'failure')
787                    fail.text = 'Test unexpected passed.'
788                elif subtest.result is TestResult.INTERRUPT:
789                    fail = et.SubElement(testcase, 'error')
790                    fail.text = 'Test was interrupted by user.'
791                elif subtest.result is TestResult.TIMEOUT:
792                    fail = et.SubElement(testcase, 'error')
793                    fail.text = 'Test did not finish before configured timeout.'
794                if subtest.explanation:
795                    et.SubElement(testcase, 'system-out').text = subtest.explanation
796            if test.stdo:
797                out = et.SubElement(suite, 'system-out')
798                out.text = test.stdo.rstrip()
799            if test.stde:
800                err = et.SubElement(suite, 'system-err')
801                err.text = test.stde.rstrip()
802        else:
803            if test.project not in self.suites:
804                suite = self.suites[test.project] = et.Element(
805                    'testsuite', name=test.project, tests='1', errors='0',
806                    failures='0', skipped='0', time=str(test.duration))
807            else:
808                suite = self.suites[test.project]
809                suite.attrib['tests'] = str(int(suite.attrib['tests']) + 1)
810
811            testcase = et.SubElement(suite, 'testcase', name=test.name,
812                                     classname=test.project, time=str(test.duration))
813            if test.res is TestResult.SKIP:
814                et.SubElement(testcase, 'skipped')
815                suite.attrib['skipped'] = str(int(suite.attrib['skipped']) + 1)
816            elif test.res is TestResult.ERROR:
817                et.SubElement(testcase, 'error')
818                suite.attrib['errors'] = str(int(suite.attrib['errors']) + 1)
819            elif test.res is TestResult.FAIL:
820                et.SubElement(testcase, 'failure')
821                suite.attrib['failures'] = str(int(suite.attrib['failures']) + 1)
822            if test.stdo:
823                out = et.SubElement(testcase, 'system-out')
824                out.text = test.stdo.rstrip()
825            if test.stde:
826                err = et.SubElement(testcase, 'system-err')
827                err.text = test.stde.rstrip()
828
829    async def finish(self, harness: 'TestHarness') -> None:
830        """Calculate total test counts and write out the xml result."""
831        for suite in self.suites.values():
832            self.root.append(suite)
833            # Skipped is really not allowed in the "testsuits" element
834            for attr in ['tests', 'errors', 'failures']:
835                self.root.attrib[attr] = str(int(self.root.attrib[attr]) + int(suite.attrib[attr]))
836
837        tree = et.ElementTree(self.root)
838        with open(self.filename, 'wb') as f:
839            tree.write(f, encoding='utf-8', xml_declaration=True)
840
841
842class TestRun:
843    TEST_NUM = 0
844    PROTOCOL_TO_CLASS: T.Dict[TestProtocol, T.Type['TestRun']] = {}
845
846    def __new__(cls, test: TestSerialisation, *args: T.Any, **kwargs: T.Any) -> T.Any:
847        return super().__new__(TestRun.PROTOCOL_TO_CLASS[test.protocol])
848
849    def __init__(self, test: TestSerialisation, test_env: T.Dict[str, str],
850                 name: str, timeout: T.Optional[int], is_parallel: bool):
851        self.res = TestResult.PENDING
852        self.test = test
853        self._num = None       # type: T.Optional[int]
854        self.name = name
855        self.timeout = timeout
856        self.results = list()  # type: T.List[TAPParser.Test]
857        self.returncode = 0
858        self.starttime = None  # type: T.Optional[float]
859        self.duration = None   # type: T.Optional[float]
860        self.stdo = None       # type: T.Optional[str]
861        self.stde = None       # type: T.Optional[str]
862        self.cmd = None        # type: T.Optional[T.List[str]]
863        self.env = test_env    # type: T.Dict[str, str]
864        self.should_fail = test.should_fail
865        self.project = test.project_name
866        self.junit = None      # type: T.Optional[et.ElementTree]
867        self.is_parallel = is_parallel
868
869    def start(self, cmd: T.List[str]) -> None:
870        self.res = TestResult.RUNNING
871        self.starttime = time.time()
872        self.cmd = cmd
873
874    @property
875    def num(self) -> int:
876        if self._num is None:
877            TestRun.TEST_NUM += 1
878            self._num = TestRun.TEST_NUM
879        return self._num
880
881    def detail(self) -> str:
882        if self.res is TestResult.PENDING:
883            return ''
884        if self.returncode:
885            return returncode_to_status(self.returncode)
886        if self.results:
887            # running or succeeded
888            passed = sum(x.result.is_ok() for x in self.results)
889            ran = sum(x.result is not TestResult.SKIP for x in self.results)
890            if passed == ran:
891                return f'{passed} subtests passed'
892            else:
893                return f'{passed}/{ran} subtests passed'
894        return ''
895
896    def _complete(self, returncode: int, res: TestResult,
897                  stdo: T.Optional[str], stde: T.Optional[str],
898                  additional_out: T.Optional[str], additional_err: T.Optional[str]) -> None:
899        assert isinstance(res, TestResult)
900        if self.should_fail and res in (TestResult.OK, TestResult.FAIL):
901            res = TestResult.UNEXPECTEDPASS if res.is_ok() else TestResult.EXPECTEDFAIL
902
903        self.res = res
904        self.returncode = returncode
905        self.duration = time.time() - self.starttime
906        self.stdo = stdo
907        self.stde = stde
908        self.additional_out = additional_out
909        self.additional_err = additional_err
910
911    @property
912    def cmdline(self) -> T.Optional[str]:
913        if not self.cmd:
914            return None
915        test_only_env = set(self.env.items()) - set(os.environ.items())
916        return env_tuple_to_str(test_only_env) + \
917            ' '.join(sh_quote(x) for x in self.cmd)
918
919    def complete_skip(self, message: str) -> None:
920        self.starttime = time.time()
921        self._complete(GNU_SKIP_RETURNCODE, TestResult.SKIP, message, None, None, None)
922
923    def complete(self, returncode: int, res: TestResult,
924                 stdo: T.Optional[str], stde: T.Optional[str],
925                 additional_out: T.Optional[str], additional_err: T.Optional[str]) -> None:
926        self._complete(returncode, res, stdo, stde, additional_out, additional_err)
927
928    @property
929    def needs_parsing(self) -> bool:
930        return False
931
932    async def parse(self, harness: 'TestHarness', lines: T.AsyncIterator[str]) -> T.Tuple[TestResult, str, str]:
933        async for l in lines:
934            pass
935        return TestResult.OK, '', ''
936
937
938class TestRunExitCode(TestRun):
939
940    def complete(self, returncode: int, res: TestResult,
941                 stdo: T.Optional[str], stde: T.Optional[str],
942                 additional_out: T.Optional[str], additional_err: T.Optional[str]) -> None:
943        if res:
944            pass
945        elif returncode == GNU_SKIP_RETURNCODE:
946            res = TestResult.SKIP
947        elif returncode == GNU_ERROR_RETURNCODE:
948            res = TestResult.ERROR
949        else:
950            res = TestResult.FAIL if bool(returncode) else TestResult.OK
951        super().complete(returncode, res, stdo, stde, additional_out, additional_err)
952
953TestRun.PROTOCOL_TO_CLASS[TestProtocol.EXITCODE] = TestRunExitCode
954
955
956class TestRunGTest(TestRunExitCode):
957    def complete(self, returncode: int, res: TestResult,
958                 stdo: T.Optional[str], stde: T.Optional[str],
959                 additional_out: T.Optional[str], additional_err: T.Optional[str]) -> None:
960        filename = f'{self.test.name}.xml'
961        if self.test.workdir:
962            filename = os.path.join(self.test.workdir, filename)
963
964        try:
965            self.junit = et.parse(filename)
966        except FileNotFoundError:
967            # This can happen if the test fails to run or complete for some
968            # reason, like the rpath for libgtest isn't properly set. ExitCode
969            # will handle the failure, don't generate a stacktrace.
970            pass
971
972        super().complete(returncode, res, stdo, stde, additional_out, additional_err)
973
974TestRun.PROTOCOL_TO_CLASS[TestProtocol.GTEST] = TestRunGTest
975
976
977class TestRunTAP(TestRun):
978    @property
979    def needs_parsing(self) -> bool:
980        return True
981
982    def complete(self, returncode: int, res: TestResult,
983                 stdo: T.Optional[str], stde: T.Optional[str],
984                 additional_out: T.Optional[str], additional_err: T.Optional[str]) -> None:
985        if returncode != 0 and not res.was_killed():
986            res = TestResult.ERROR
987            stde = stde or ''
988            stde += f'\n(test program exited with status code {returncode})'
989
990        super().complete(returncode, res, stdo, stde, additional_out, additional_err)
991
992    async def parse(self,
993                    harness: 'TestHarness',
994                    lines: T.AsyncIterator[str]) -> T.Tuple[TestResult, str, str]:
995        res = TestResult.OK
996        output = ''
997        error = ''
998
999        async for i in TAPParser().parse_async(lines):
1000            if isinstance(i, TAPParser.Bailout):
1001                res = TestResult.ERROR
1002                output += '\n' + harness.log_subtest(self, i.message, res)
1003            elif isinstance(i, TAPParser.Test):
1004                self.results.append(i)
1005                if i.result.is_bad():
1006                    res = TestResult.FAIL
1007                output += '\n' + harness.log_subtest(self, i.name or f'subtest {i.number}', i.result)
1008            elif isinstance(i, TAPParser.Error):
1009                error += '\nTAP parsing error: ' + i.message
1010                res = TestResult.ERROR
1011
1012        if all(t.result is TestResult.SKIP for t in self.results):
1013            # This includes the case where self.results is empty
1014            res = TestResult.SKIP
1015        return res, output, error
1016
1017TestRun.PROTOCOL_TO_CLASS[TestProtocol.TAP] = TestRunTAP
1018
1019
1020class TestRunRust(TestRun):
1021    @property
1022    def needs_parsing(self) -> bool:
1023        return True
1024
1025    async def parse(self,
1026                    harness: 'TestHarness',
1027                    lines: T.AsyncIterator[str]) -> T.Tuple[TestResult, str, str]:
1028        def parse_res(n: int, name: str, result: str) -> TAPParser.Test:
1029            if result == 'ok':
1030                return TAPParser.Test(n, name, TestResult.OK, None)
1031            elif result == 'ignored':
1032                return TAPParser.Test(n, name, TestResult.SKIP, None)
1033            elif result == 'FAILED':
1034                return TAPParser.Test(n, name, TestResult.FAIL, None)
1035            return TAPParser.Test(n, name, TestResult.ERROR,
1036                                  f'Unsupported output from rust test: {result}')
1037
1038        output = ''
1039        n = 1
1040        async for line in lines:
1041            if line.startswith('test ') and not line.startswith('test result'):
1042                _, name, _, result = line.rstrip().split(' ')
1043                name = name.replace('::', '.')
1044                t = parse_res(n, name, result)
1045                self.results.append(t)
1046                output += '\n' + harness.log_subtest(self, name, t.result)
1047                n += 1
1048
1049        if all(t.result is TestResult.SKIP for t in self.results):
1050            # This includes the case where self.results is empty
1051            return TestResult.SKIP, output, ''
1052        elif any(t.result is TestResult.ERROR for t in self.results):
1053            return TestResult.ERROR, output, ''
1054        elif any(t.result is TestResult.FAIL for t in self.results):
1055            return TestResult.FAIL, output, ''
1056        return TestResult.OK, output, ''
1057
1058TestRun.PROTOCOL_TO_CLASS[TestProtocol.RUST] = TestRunRust
1059
1060
1061def decode(stream: T.Union[None, bytes]) -> str:
1062    if stream is None:
1063        return ''
1064    try:
1065        return stream.decode('utf-8')
1066    except UnicodeDecodeError:
1067        return stream.decode('iso-8859-1', errors='ignore')
1068
1069async def read_decode(reader: asyncio.StreamReader,
1070                      line_handler: T.Callable[[str], None]) -> str:
1071    stdo_lines = []
1072    try:
1073        while not reader.at_eof():
1074            line = decode(await reader.readline())
1075            if len(line) == 0:
1076                continue
1077            stdo_lines.append(line)
1078            if line_handler:
1079                line_handler(line)
1080        return ''.join(stdo_lines)
1081    except asyncio.CancelledError:
1082        return ''.join(stdo_lines)
1083
1084# Extract lines out of the StreamReader.  Print them
1085# along the way if requested, and at the end collect
1086# them all into a future.
1087async def read_decode_lines(reader: asyncio.StreamReader, q: 'asyncio.Queue[T.Optional[str]]',
1088                            console_mode: ConsoleUser) -> str:
1089    stdo_lines = []
1090    try:
1091        while not reader.at_eof():
1092            line = decode(await reader.readline())
1093            stdo_lines.append(line)
1094            if console_mode is ConsoleUser.STDOUT:
1095                print(line, end='', flush=True)
1096            await q.put(line)
1097        return ''.join(stdo_lines)
1098    except asyncio.CancelledError:
1099        return ''.join(stdo_lines)
1100    finally:
1101        await q.put(None)
1102
1103def run_with_mono(fname: str) -> bool:
1104    return fname.endswith('.exe') and not (is_windows() or is_cygwin())
1105
1106def check_testdata(objs: T.List[TestSerialisation]) -> T.List[TestSerialisation]:
1107    if not isinstance(objs, list):
1108        raise MesonVersionMismatchException('<unknown>', coredata_version)
1109    for obj in objs:
1110        if not isinstance(obj, TestSerialisation):
1111            raise MesonVersionMismatchException('<unknown>', coredata_version)
1112        if not hasattr(obj, 'version'):
1113            raise MesonVersionMismatchException('<unknown>', coredata_version)
1114        if major_versions_differ(obj.version, coredata_version):
1115            raise MesonVersionMismatchException(obj.version, coredata_version)
1116    return objs
1117
1118# Custom waiting primitives for asyncio
1119
1120async def try_wait_one(*awaitables: T.Any, timeout: T.Optional[T.Union[int, float]]) -> None:
1121    """Wait for completion of one of the given futures, ignoring timeouts."""
1122    await asyncio.wait(awaitables,
1123                       timeout=timeout, return_when=asyncio.FIRST_COMPLETED)
1124
1125async def queue_iter(q: 'asyncio.Queue[T.Optional[str]]') -> T.AsyncIterator[str]:
1126    while True:
1127        item = await q.get()
1128        q.task_done()
1129        if item is None:
1130            break
1131        yield item
1132
1133async def complete(future: asyncio.Future) -> None:
1134    """Wait for completion of the given future, ignoring cancellation."""
1135    try:
1136        await future
1137    except asyncio.CancelledError:
1138        pass
1139
1140async def complete_all(futures: T.Iterable[asyncio.Future],
1141                       timeout: T.Optional[T.Union[int, float]] = None) -> None:
1142    """Wait for completion of all the given futures, ignoring cancellation.
1143       If timeout is not None, raise an asyncio.TimeoutError after the given
1144       time has passed.  asyncio.TimeoutError is only raised if some futures
1145       have not completed and none have raised exceptions, even if timeout
1146       is zero."""
1147
1148    def check_futures(futures: T.Iterable[asyncio.Future]) -> None:
1149        # Raise exceptions if needed
1150        left = False
1151        for f in futures:
1152            if not f.done():
1153                left = True
1154            elif not f.cancelled():
1155                f.result()
1156        if left:
1157            raise asyncio.TimeoutError
1158
1159    # Python is silly and does not have a variant of asyncio.wait with an
1160    # absolute time as deadline.
1161    deadline = None if timeout is None else asyncio.get_event_loop().time() + timeout
1162    while futures and (timeout is None or timeout > 0):
1163        done, futures = await asyncio.wait(futures, timeout=timeout,
1164                                           return_when=asyncio.FIRST_EXCEPTION)
1165        check_futures(done)
1166        if deadline:
1167            timeout = deadline - asyncio.get_event_loop().time()
1168
1169    check_futures(futures)
1170
1171
1172class TestSubprocess:
1173    def __init__(self, p: asyncio.subprocess.Process,
1174                 stdout: T.Optional[int], stderr: T.Optional[int],
1175                 postwait_fn: T.Callable[[], None] = None):
1176        self._process = p
1177        self.stdout = stdout
1178        self.stderr = stderr
1179        self.stdo_task = None            # type: T.Optional[asyncio.Future[str]]
1180        self.stde_task = None            # type: T.Optional[asyncio.Future[str]]
1181        self.postwait_fn = postwait_fn   # type: T.Callable[[], None]
1182        self.all_futures = []            # type: T.List[asyncio.Future]
1183
1184    def stdout_lines(self, console_mode: ConsoleUser) -> T.AsyncIterator[str]:
1185        q = asyncio.Queue()              # type: asyncio.Queue[T.Optional[str]]
1186        decode_coro = read_decode_lines(self._process.stdout, q, console_mode)
1187        self.stdo_task = asyncio.ensure_future(decode_coro)
1188        return queue_iter(q)
1189
1190    def communicate(self,
1191                    console_mode: ConsoleUser,
1192                    line_handler: T.Callable[[str], None] = None) -> T.Tuple[T.Optional[T.Awaitable[str]], T.Optional[T.Awaitable[str]]]:
1193        # asyncio.ensure_future ensures that printing can
1194        # run in the background, even before it is awaited
1195        if self.stdo_task is None and self.stdout is not None:
1196            decode_coro = read_decode(self._process.stdout, line_handler)
1197            self.stdo_task = asyncio.ensure_future(decode_coro)
1198            self.all_futures.append(self.stdo_task)
1199        if self.stderr is not None and self.stderr != asyncio.subprocess.STDOUT:
1200            decode_coro = read_decode(self._process.stderr, line_handler)
1201            self.stde_task = asyncio.ensure_future(decode_coro)
1202            self.all_futures.append(self.stde_task)
1203
1204        return self.stdo_task, self.stde_task
1205
1206    async def _kill(self) -> T.Optional[str]:
1207        # Python does not provide multiplatform support for
1208        # killing a process and all its children so we need
1209        # to roll our own.
1210        p = self._process
1211        try:
1212            if is_windows():
1213                subprocess.run(['taskkill', '/F', '/T', '/PID', str(p.pid)])
1214            else:
1215                # Send a termination signal to the process group that setsid()
1216                # created - giving it a chance to perform any cleanup.
1217                os.killpg(p.pid, signal.SIGTERM)
1218
1219                # Make sure the termination signal actually kills the process
1220                # group, otherwise retry with a SIGKILL.
1221                await try_wait_one(p.wait(), timeout=0.5)
1222                if p.returncode is not None:
1223                    return None
1224
1225                os.killpg(p.pid, signal.SIGKILL)
1226
1227            await try_wait_one(p.wait(), timeout=1)
1228            if p.returncode is not None:
1229                return None
1230
1231            # An earlier kill attempt has not worked for whatever reason.
1232            # Try to kill it one last time with a direct call.
1233            # If the process has spawned children, they will remain around.
1234            p.kill()
1235            await try_wait_one(p.wait(), timeout=1)
1236            if p.returncode is not None:
1237                return None
1238            return 'Test process could not be killed.'
1239        except ProcessLookupError:
1240            # Sometimes (e.g. with Wine) this happens.  There's nothing
1241            # we can do, probably the process already died so just wait
1242            # for the event loop to pick that up.
1243            await p.wait()
1244            return None
1245        finally:
1246            if self.stdo_task:
1247                self.stdo_task.cancel()
1248            if self.stde_task:
1249                self.stde_task.cancel()
1250
1251    async def wait(self, timeout: T.Optional[int]) -> T.Tuple[int, TestResult, T.Optional[str]]:
1252        p = self._process
1253        result = None
1254        additional_error = None
1255
1256        self.all_futures.append(asyncio.ensure_future(p.wait()))
1257        try:
1258            await complete_all(self.all_futures, timeout=timeout)
1259        except asyncio.TimeoutError:
1260            additional_error = await self._kill()
1261            result = TestResult.TIMEOUT
1262        except asyncio.CancelledError:
1263            # The main loop must have seen Ctrl-C.
1264            additional_error = await self._kill()
1265            result = TestResult.INTERRUPT
1266        finally:
1267            if self.postwait_fn:
1268                self.postwait_fn()
1269
1270        return p.returncode or 0, \
1271            result, \
1272            additional_error + '\n' if additional_error else ''
1273
1274class SingleTestRunner:
1275
1276    def __init__(self, test: TestSerialisation, env: T.Dict[str, str], name: str,
1277                 options: argparse.Namespace):
1278        self.test = test
1279        self.options = options
1280        self.cmd = self._get_cmd()
1281
1282        if self.cmd and self.test.extra_paths:
1283            env['PATH'] = os.pathsep.join(self.test.extra_paths + ['']) + env['PATH']
1284            winecmd = []
1285            for c in self.cmd:
1286                winecmd.append(c)
1287                if os.path.basename(c).startswith('wine'):
1288                    env['WINEPATH'] = get_wine_shortpath(
1289                        winecmd,
1290                        ['Z:' + p for p in self.test.extra_paths] + env.get('WINEPATH', '').split(';')
1291                    )
1292                    break
1293
1294        # If MALLOC_PERTURB_ is not set, or if it is set to an empty value,
1295        # (i.e., the test or the environment don't explicitly set it), set
1296        # it ourselves. We do this unconditionally for regular tests
1297        # because it is extremely useful to have.
1298        # Setting MALLOC_PERTURB_="0" will completely disable this feature.
1299        if ('MALLOC_PERTURB_' not in env or not env['MALLOC_PERTURB_']) and not options.benchmark:
1300            env['MALLOC_PERTURB_'] = str(random.randint(1, 255))
1301
1302        if self.options.gdb or self.test.timeout is None or self.test.timeout <= 0:
1303            timeout = None
1304        elif self.options.timeout_multiplier is None:
1305            timeout = self.test.timeout
1306        elif self.options.timeout_multiplier <= 0:
1307            timeout = None
1308        else:
1309            timeout = self.test.timeout * self.options.timeout_multiplier
1310
1311        is_parallel = test.is_parallel and self.options.num_processes > 1 and not self.options.gdb
1312        self.runobj = TestRun(test, env, name, timeout, is_parallel)
1313
1314        if self.options.gdb:
1315            self.console_mode = ConsoleUser.GDB
1316        elif self.options.verbose and not is_parallel and not self.runobj.needs_parsing:
1317            self.console_mode = ConsoleUser.STDOUT
1318        else:
1319            self.console_mode = ConsoleUser.LOGGER
1320
1321    def _get_test_cmd(self) -> T.Optional[T.List[str]]:
1322        if self.test.fname[0].endswith('.jar'):
1323            return ['java', '-jar'] + self.test.fname
1324        elif not self.test.is_cross_built and run_with_mono(self.test.fname[0]):
1325            return ['mono'] + self.test.fname
1326        elif self.test.cmd_is_built and self.test.is_cross_built and self.test.needs_exe_wrapper:
1327            if self.test.exe_runner is None:
1328                # Can not run test on cross compiled executable
1329                # because there is no execute wrapper.
1330                return None
1331            elif self.test.cmd_is_built:
1332                # If the command is not built (ie, its a python script),
1333                # then we don't check for the exe-wrapper
1334                if not self.test.exe_runner.found():
1335                    msg = ('The exe_wrapper defined in the cross file {!r} was not '
1336                           'found. Please check the command and/or add it to PATH.')
1337                    raise TestException(msg.format(self.test.exe_runner.name))
1338                return self.test.exe_runner.get_command() + self.test.fname
1339        return self.test.fname
1340
1341    def _get_cmd(self) -> T.Optional[T.List[str]]:
1342        test_cmd = self._get_test_cmd()
1343        if not test_cmd:
1344            return None
1345        return TestHarness.get_wrapper(self.options) + test_cmd
1346
1347    @property
1348    def is_parallel(self) -> bool:
1349        return self.runobj.is_parallel
1350
1351    @property
1352    def visible_name(self) -> str:
1353        return self.runobj.name
1354
1355    @property
1356    def timeout(self) -> T.Optional[int]:
1357        return self.runobj.timeout
1358
1359    async def run(self, harness: 'TestHarness') -> TestRun:
1360        if self.cmd is None:
1361            skip_stdout = 'Not run because can not execute cross compiled binaries.'
1362            harness.log_start_test(self.runobj)
1363            self.runobj.complete_skip(skip_stdout)
1364        else:
1365            cmd = self.cmd + self.test.cmd_args + self.options.test_args
1366            self.runobj.start(cmd)
1367            harness.log_start_test(self.runobj)
1368            await self._run_cmd(harness, cmd)
1369        return self.runobj
1370
1371    async def _run_subprocess(self, args: T.List[str], *,
1372                              stdout: int, stderr: int,
1373                              env: T.Dict[str, str], cwd: T.Optional[str]) -> TestSubprocess:
1374        # Let gdb handle ^C instead of us
1375        if self.options.gdb:
1376            previous_sigint_handler = signal.getsignal(signal.SIGINT)
1377            # Make the meson executable ignore SIGINT while gdb is running.
1378            signal.signal(signal.SIGINT, signal.SIG_IGN)
1379
1380        def preexec_fn() -> None:
1381            if self.options.gdb:
1382                # Restore the SIGINT handler for the child process to
1383                # ensure it can handle it.
1384                signal.signal(signal.SIGINT, signal.SIG_DFL)
1385            else:
1386                # We don't want setsid() in gdb because gdb needs the
1387                # terminal in order to handle ^C and not show tcsetpgrp()
1388                # errors avoid not being able to use the terminal.
1389                os.setsid()
1390
1391        def postwait_fn() -> None:
1392            if self.options.gdb:
1393                # Let us accept ^C again
1394                signal.signal(signal.SIGINT, previous_sigint_handler)
1395
1396        p = await asyncio.create_subprocess_exec(*args,
1397                                                 stdout=stdout,
1398                                                 stderr=stderr,
1399                                                 env=env,
1400                                                 cwd=cwd,
1401                                                 preexec_fn=preexec_fn if not is_windows() else None)
1402        return TestSubprocess(p, stdout=stdout, stderr=stderr,
1403                              postwait_fn=postwait_fn if not is_windows() else None)
1404
1405    async def _run_cmd(self, harness: 'TestHarness', cmd: T.List[str]) -> None:
1406        if self.console_mode is ConsoleUser.GDB:
1407            stdout = None
1408            stderr = None
1409        else:
1410            stdout = asyncio.subprocess.PIPE
1411            stderr = asyncio.subprocess.STDOUT \
1412                if not self.options.split and not self.runobj.needs_parsing \
1413                else asyncio.subprocess.PIPE
1414
1415        extra_cmd = []  # type: T.List[str]
1416        if self.test.protocol is TestProtocol.GTEST:
1417            gtestname = self.test.name
1418            if self.test.workdir:
1419                gtestname = os.path.join(self.test.workdir, self.test.name)
1420            extra_cmd.append(f'--gtest_output=xml:{gtestname}.xml')
1421
1422        p = await self._run_subprocess(cmd + extra_cmd,
1423                                       stdout=stdout,
1424                                       stderr=stderr,
1425                                       env=self.runobj.env,
1426                                       cwd=self.test.workdir)
1427
1428        parse_task = None
1429        if self.runobj.needs_parsing:
1430            parse_coro = self.runobj.parse(harness,
1431                                           p.stdout_lines(self.console_mode))
1432            parse_task = asyncio.ensure_future(parse_coro)
1433
1434        if self.console_mode == ConsoleUser.STDOUT:
1435            prefix = harness.get_test_num_prefix(self.runobj.num)
1436
1437            def printer(line: str) -> None:
1438                ConsoleLogger.print_test_details_line(prefix,
1439                                                      line,
1440                                                      flush=True,
1441                                                      end='')
1442            ConsoleLogger.print_test_details_header(prefix, 'output')
1443            stdo_task, stde_task = p.communicate(self.console_mode, printer)
1444        else:
1445            stdo_task, stde_task = p.communicate(self.console_mode)
1446        additional_output = ''
1447        returncode, result, additional_error = await p.wait(self.runobj.timeout)
1448
1449        if parse_task is not None:
1450            res, additional_output, error = await parse_task
1451            if error:
1452                additional_error = join_lines(additional_error, error)
1453            result = result or res
1454
1455        stdo = await stdo_task if stdo_task else ''
1456        stde = await stde_task if stde_task else ''
1457        self.runobj.complete(returncode,
1458                             result,
1459                             stdo.strip(),
1460                             stde.strip(),
1461                             additional_output.strip(),
1462                             additional_error.strip())
1463
1464
1465class TestHarness:
1466    def __init__(self, options: argparse.Namespace):
1467        self.options = options
1468        self.collected_failures = []  # type: T.List[TestRun]
1469        self.fail_count = 0
1470        self.expectedfail_count = 0
1471        self.unexpectedpass_count = 0
1472        self.success_count = 0
1473        self.skip_count = 0
1474        self.timeout_count = 0
1475        self.test_count = 0
1476        self.name_max_len = 0
1477        self.is_run = False
1478        self.loggers = []         # type: T.List[TestLogger]
1479        self.loggers.append(ConsoleLogger())
1480        self.need_console = False
1481
1482        self.logfile_base = None  # type: T.Optional[str]
1483        if self.options.logbase and not self.options.gdb:
1484            namebase = None
1485            self.logfile_base = os.path.join(self.options.wd, 'meson-logs', self.options.logbase)
1486
1487            if self.options.wrapper:
1488                namebase = os.path.basename(self.get_wrapper(self.options)[0])
1489            elif self.options.setup:
1490                namebase = self.options.setup.replace(":", "_")
1491
1492            if namebase:
1493                self.logfile_base += '-' + namebase.replace(' ', '_')
1494
1495        startdir = os.getcwd()
1496        try:
1497            os.chdir(self.options.wd)
1498            self.build_data = build.load(os.getcwd())
1499            if not self.options.setup:
1500                self.options.setup = self.build_data.test_setup_default_name
1501            if self.options.benchmark:
1502                self.tests = self.load_tests('meson_benchmark_setup.dat')
1503            else:
1504                self.tests = self.load_tests('meson_test_setup.dat')
1505        finally:
1506            os.chdir(startdir)
1507
1508        ss = set()
1509        for t in self.tests:
1510            for s in t.suite:
1511                ss.add(s)
1512        self.suites = list(ss)
1513
1514    def load_tests(self, file_name: str) -> T.List[TestSerialisation]:
1515        datafile = Path('meson-private') / file_name
1516        if not datafile.is_file():
1517            raise TestException(f'Directory {self.options.wd!r} does not seem to be a Meson build directory.')
1518        with datafile.open('rb') as f:
1519            objs = check_testdata(pickle.load(f))
1520        return objs
1521
1522    def __enter__(self) -> 'TestHarness':
1523        return self
1524
1525    def __exit__(self, exc_type: T.Any, exc_value: T.Any, traceback: T.Any) -> None:
1526        self.close_logfiles()
1527
1528    def close_logfiles(self) -> None:
1529        for l in self.loggers:
1530            l.close()
1531
1532    def get_test_setup(self, test: T.Optional[TestSerialisation]) -> build.TestSetup:
1533        if ':' in self.options.setup:
1534            if self.options.setup not in self.build_data.test_setups:
1535                sys.exit(f"Unknown test setup '{self.options.setup}'.")
1536            return self.build_data.test_setups[self.options.setup]
1537        else:
1538            full_name = test.project_name + ":" + self.options.setup
1539            if full_name not in self.build_data.test_setups:
1540                sys.exit(f"Test setup '{self.options.setup}' not found from project '{test.project_name}'.")
1541            return self.build_data.test_setups[full_name]
1542
1543    def merge_setup_options(self, options: argparse.Namespace, test: TestSerialisation) -> T.Dict[str, str]:
1544        current = self.get_test_setup(test)
1545        if not options.gdb:
1546            options.gdb = current.gdb
1547        if options.gdb:
1548            options.verbose = True
1549        if options.timeout_multiplier is None:
1550            options.timeout_multiplier = current.timeout_multiplier
1551    #    if options.env is None:
1552    #        options.env = current.env # FIXME, should probably merge options here.
1553        if options.wrapper is None:
1554            options.wrapper = current.exe_wrapper
1555        elif current.exe_wrapper:
1556            sys.exit('Conflict: both test setup and command line specify an exe wrapper.')
1557        return current.env.get_env(os.environ.copy())
1558
1559    def get_test_runner(self, test: TestSerialisation) -> SingleTestRunner:
1560        name = self.get_pretty_suite(test)
1561        options = deepcopy(self.options)
1562        if self.options.setup:
1563            env = self.merge_setup_options(options, test)
1564        else:
1565            env = os.environ.copy()
1566        test_env = test.env.get_env(env)
1567        env.update(test_env)
1568        if (test.is_cross_built and test.needs_exe_wrapper and
1569                test.exe_runner and test.exe_runner.found()):
1570            env['MESON_EXE_WRAPPER'] = join_args(test.exe_runner.get_command())
1571        return SingleTestRunner(test, env, name, options)
1572
1573    def process_test_result(self, result: TestRun) -> None:
1574        if result.res is TestResult.TIMEOUT:
1575            self.timeout_count += 1
1576        elif result.res is TestResult.SKIP:
1577            self.skip_count += 1
1578        elif result.res is TestResult.OK:
1579            self.success_count += 1
1580        elif result.res in {TestResult.FAIL, TestResult.ERROR, TestResult.INTERRUPT}:
1581            self.fail_count += 1
1582        elif result.res is TestResult.EXPECTEDFAIL:
1583            self.expectedfail_count += 1
1584        elif result.res is TestResult.UNEXPECTEDPASS:
1585            self.unexpectedpass_count += 1
1586        else:
1587            sys.exit(f'Unknown test result encountered: {result.res}')
1588
1589        if result.res.is_bad():
1590            self.collected_failures.append(result)
1591        for l in self.loggers:
1592            l.log(self, result)
1593
1594    @property
1595    def numlen(self) -> int:
1596        return len(str(self.test_count))
1597
1598    @property
1599    def max_left_width(self) -> int:
1600        return 2 * self.numlen + 2
1601
1602    def get_test_num_prefix(self, num: int) -> str:
1603        return '{num:{numlen}}/{testcount} '.format(numlen=self.numlen,
1604                                                    num=num,
1605                                                    testcount=self.test_count)
1606
1607    def format(self, result: TestRun, colorize: bool,
1608               max_left_width: int = 0,
1609               left: T.Optional[str] = None,
1610               middle: T.Optional[str] = None,
1611               right: T.Optional[str] = None) -> str:
1612        if left is None:
1613            left = self.get_test_num_prefix(result.num)
1614
1615        # A non-default max_left_width lets the logger print more stuff before the
1616        # name, while ensuring that the rightmost columns remain aligned.
1617        max_left_width = max(max_left_width, self.max_left_width)
1618
1619        if middle is None:
1620            middle = result.name
1621        extra_mid_width = max_left_width + self.name_max_len + 1 - uniwidth(middle) - uniwidth(left)
1622        middle += ' ' * max(1, extra_mid_width)
1623
1624        if right is None:
1625            right = '{res} {dur:{durlen}.2f}s'.format(
1626                res=result.res.get_text(colorize),
1627                dur=result.duration,
1628                durlen=self.duration_max_len + 3)
1629            if not (result.res.is_bad() and self.options.print_errorlogs) \
1630                    and not self.options.verbose \
1631                    and (result.res.is_bad() or result.needs_parsing):
1632                detail = result.detail()
1633                if detail:
1634                    right += '   ' + detail
1635        return left + middle + right
1636
1637    def summary(self) -> str:
1638        return textwrap.dedent('''\
1639
1640            Ok:                 {:<4}
1641            Expected Fail:      {:<4}
1642            Fail:               {:<4}
1643            Unexpected Pass:    {:<4}
1644            Skipped:            {:<4}
1645            Timeout:            {:<4}
1646            ''').format(self.success_count, self.expectedfail_count, self.fail_count,
1647                        self.unexpectedpass_count, self.skip_count, self.timeout_count)
1648
1649    def total_failure_count(self) -> int:
1650        return self.fail_count + self.unexpectedpass_count + self.timeout_count
1651
1652    def doit(self) -> int:
1653        if self.is_run:
1654            raise RuntimeError('Test harness object can only be used once.')
1655        self.is_run = True
1656        tests = self.get_tests()
1657        if not tests:
1658            return 0
1659        if not self.options.no_rebuild and not rebuild_deps(self.options.wd, tests):
1660            # We return 125 here in case the build failed.
1661            # The reason is that exit code 125 tells `git bisect run` that the current
1662            # commit should be skipped.  Thus users can directly use `meson test` to
1663            # bisect without needing to handle the does-not-build case separately in a
1664            # wrapper script.
1665            sys.exit(125)
1666
1667        self.name_max_len = max([uniwidth(self.get_pretty_suite(test)) for test in tests])
1668        startdir = os.getcwd()
1669        try:
1670            os.chdir(self.options.wd)
1671            runners = []             # type: T.List[SingleTestRunner]
1672            for i in range(self.options.repeat):
1673                runners.extend(self.get_test_runner(test) for test in tests)
1674                if i == 0:
1675                    self.duration_max_len = max([len(str(int(runner.timeout or 99)))
1676                                                 for runner in runners])
1677                    # Disable the progress report if it gets in the way
1678                    self.need_console = any(runner.console_mode is not ConsoleUser.LOGGER
1679                                            for runner in runners)
1680
1681            self.test_count = len(runners)
1682            self.run_tests(runners)
1683        finally:
1684            os.chdir(startdir)
1685        return self.total_failure_count()
1686
1687    @staticmethod
1688    def split_suite_string(suite: str) -> T.Tuple[str, str]:
1689        if ':' in suite:
1690            split = suite.split(':', 1)
1691            assert len(split) == 2
1692            return split[0], split[1]
1693        else:
1694            return suite, ""
1695
1696    @staticmethod
1697    def test_in_suites(test: TestSerialisation, suites: T.List[str]) -> bool:
1698        for suite in suites:
1699            (prj_match, st_match) = TestHarness.split_suite_string(suite)
1700            for prjst in test.suite:
1701                (prj, st) = TestHarness.split_suite_string(prjst)
1702
1703                # the SUITE can be passed as
1704                #     suite_name
1705                # or
1706                #     project_name:suite_name
1707                # so we need to select only the test belonging to project_name
1708
1709                # this if handle the first case (i.e., SUITE == suite_name)
1710
1711                # in this way we can run tests belonging to different
1712                # (sub)projects which share the same suite_name
1713                if not st_match and st == prj_match:
1714                    return True
1715
1716                # these two conditions are needed to handle the second option
1717                # i.e., SUITE == project_name:suite_name
1718
1719                # in this way we select the only the tests of
1720                # project_name with suite_name
1721                if prj_match and prj != prj_match:
1722                    continue
1723                if st_match and st != st_match:
1724                    continue
1725                return True
1726        return False
1727
1728    def test_suitable(self, test: TestSerialisation) -> bool:
1729        if TestHarness.test_in_suites(test, self.options.exclude_suites):
1730            return False
1731
1732        if self.options.include_suites:
1733            # Both force inclusion (overriding add_test_setup) and exclude
1734            # everything else
1735            return TestHarness.test_in_suites(test, self.options.include_suites)
1736
1737        if self.options.setup:
1738            setup = self.get_test_setup(test)
1739            if TestHarness.test_in_suites(test, setup.exclude_suites):
1740                return False
1741
1742        return True
1743
1744    def tests_from_args(self, tests: T.List[TestSerialisation]) -> T.Generator[TestSerialisation, None, None]:
1745        '''
1746        Allow specifying test names like "meson test foo1 foo2", where test('foo1', ...)
1747
1748        Also support specifying the subproject to run tests from like
1749        "meson test subproj:" (all tests inside subproj) or "meson test subproj:foo1"
1750        to run foo1 inside subproj. Coincidentally also "meson test :foo1" to
1751        run all tests with that name across all subprojects, which is
1752        identical to "meson test foo1"
1753        '''
1754        for arg in self.options.args:
1755            if ':' in arg:
1756                subproj, name = arg.split(':', maxsplit=1)
1757            else:
1758                subproj, name = '', arg
1759            for t in tests:
1760                if subproj and t.project_name != subproj:
1761                    continue
1762                if name and t.name != name:
1763                    continue
1764                yield t
1765
1766    def get_tests(self) -> T.List[TestSerialisation]:
1767        if not self.tests:
1768            print('No tests defined.')
1769            return []
1770
1771        tests = [t for t in self.tests if self.test_suitable(t)]
1772        if self.options.args:
1773            tests = list(self.tests_from_args(tests))
1774
1775        if not tests:
1776            print('No suitable tests defined.')
1777            return []
1778
1779        return tests
1780
1781    def flush_logfiles(self) -> None:
1782        for l in self.loggers:
1783            l.flush()
1784
1785    def open_logfiles(self) -> None:
1786        if not self.logfile_base:
1787            return
1788
1789        self.loggers.append(JunitBuilder(self.logfile_base + '.junit.xml'))
1790        self.loggers.append(JsonLogfileBuilder(self.logfile_base + '.json'))
1791        self.loggers.append(TextLogfileBuilder(self.logfile_base + '.txt', errors='surrogateescape'))
1792
1793    @staticmethod
1794    def get_wrapper(options: argparse.Namespace) -> T.List[str]:
1795        wrap = []  # type: T.List[str]
1796        if options.gdb:
1797            wrap = [options.gdb_path, '--quiet', '--nh']
1798            if options.repeat > 1:
1799                wrap += ['-ex', 'run', '-ex', 'quit']
1800            # Signal the end of arguments to gdb
1801            wrap += ['--args']
1802        if options.wrapper:
1803            wrap += options.wrapper
1804        return wrap
1805
1806    def get_pretty_suite(self, test: TestSerialisation) -> str:
1807        if len(self.suites) > 1 and test.suite:
1808            rv = TestHarness.split_suite_string(test.suite[0])[0]
1809            s = "+".join(TestHarness.split_suite_string(s)[1] for s in test.suite)
1810            if s:
1811                rv += ":"
1812            return rv + s + " / " + test.name
1813        else:
1814            return test.name
1815
1816    def run_tests(self, runners: T.List[SingleTestRunner]) -> None:
1817        try:
1818            self.open_logfiles()
1819            # Replace with asyncio.run once we can require Python 3.7
1820            loop = asyncio.get_event_loop()
1821            loop.run_until_complete(self._run_tests(runners))
1822        finally:
1823            self.close_logfiles()
1824
1825    def log_subtest(self, test: TestRun, s: str, res: TestResult) -> str:
1826        rv = ''
1827        for l in self.loggers:
1828            tmp = l.log_subtest(self, test, s, res)
1829            if tmp:
1830                rv += tmp
1831        return rv
1832
1833    def log_start_test(self, test: TestRun) -> None:
1834        for l in self.loggers:
1835            l.start_test(self, test)
1836
1837    async def _run_tests(self, runners: T.List[SingleTestRunner]) -> None:
1838        semaphore = asyncio.Semaphore(self.options.num_processes)
1839        futures = deque()  # type: T.Deque[asyncio.Future]
1840        running_tests = dict() # type: T.Dict[asyncio.Future, str]
1841        interrupted = False
1842        ctrlc_times = deque(maxlen=MAX_CTRLC) # type: T.Deque[float]
1843
1844        async def run_test(test: SingleTestRunner) -> None:
1845            async with semaphore:
1846                if interrupted or (self.options.repeat > 1 and self.fail_count):
1847                    return
1848                res = await test.run(self)
1849                self.process_test_result(res)
1850
1851        def test_done(f: asyncio.Future) -> None:
1852            if not f.cancelled():
1853                f.result()
1854            futures.remove(f)
1855            try:
1856                del running_tests[f]
1857            except KeyError:
1858                pass
1859
1860        def cancel_one_test(warn: bool) -> None:
1861            future = futures.popleft()
1862            futures.append(future)
1863            if warn:
1864                self.flush_logfiles()
1865                mlog.warning('CTRL-C detected, interrupting {}'.format(running_tests[future]))
1866            del running_tests[future]
1867            future.cancel()
1868
1869        def cancel_all_tests() -> None:
1870            nonlocal interrupted
1871            interrupted = True
1872            while running_tests:
1873                cancel_one_test(False)
1874
1875        def sigterm_handler() -> None:
1876            if interrupted:
1877                return
1878            self.flush_logfiles()
1879            mlog.warning('Received SIGTERM, exiting')
1880            cancel_all_tests()
1881
1882        def sigint_handler() -> None:
1883            # We always pick the longest-running future that has not been cancelled
1884            # If all the tests have been CTRL-C'ed, just stop
1885            nonlocal interrupted
1886            if interrupted:
1887                return
1888            ctrlc_times.append(asyncio.get_event_loop().time())
1889            if len(ctrlc_times) == MAX_CTRLC and ctrlc_times[-1] - ctrlc_times[0] < 1:
1890                self.flush_logfiles()
1891                mlog.warning('CTRL-C detected, exiting')
1892                cancel_all_tests()
1893            elif running_tests:
1894                cancel_one_test(True)
1895            else:
1896                self.flush_logfiles()
1897                mlog.warning('CTRL-C detected, exiting')
1898                interrupted = True
1899
1900        for l in self.loggers:
1901            l.start(self)
1902
1903        if sys.platform != 'win32':
1904            asyncio.get_event_loop().add_signal_handler(signal.SIGINT, sigint_handler)
1905            asyncio.get_event_loop().add_signal_handler(signal.SIGTERM, sigterm_handler)
1906        try:
1907            for runner in runners:
1908                if not runner.is_parallel:
1909                    await complete_all(futures)
1910                future = asyncio.ensure_future(run_test(runner))
1911                futures.append(future)
1912                running_tests[future] = runner.visible_name
1913                future.add_done_callback(test_done)
1914                if not runner.is_parallel:
1915                    await complete(future)
1916                if self.options.repeat > 1 and self.fail_count:
1917                    break
1918
1919            await complete_all(futures)
1920        finally:
1921            if sys.platform != 'win32':
1922                asyncio.get_event_loop().remove_signal_handler(signal.SIGINT)
1923                asyncio.get_event_loop().remove_signal_handler(signal.SIGTERM)
1924            for l in self.loggers:
1925                await l.finish(self)
1926
1927def list_tests(th: TestHarness) -> bool:
1928    tests = th.get_tests()
1929    for t in tests:
1930        print(th.get_pretty_suite(t))
1931    return not tests
1932
1933def rebuild_deps(wd: str, tests: T.List[TestSerialisation]) -> bool:
1934    def convert_path_to_target(path: str) -> str:
1935        path = os.path.relpath(path, wd)
1936        if os.sep != '/':
1937            path = path.replace(os.sep, '/')
1938        return path
1939
1940    if not (Path(wd) / 'build.ninja').is_file():
1941        print('Only ninja backend is supported to rebuild tests before running them.')
1942        return True
1943
1944    ninja = environment.detect_ninja()
1945    if not ninja:
1946        print("Can't find ninja, can't rebuild test.")
1947        return False
1948
1949    depends = set()            # type: T.Set[str]
1950    targets = set()            # type: T.Set[str]
1951    intro_targets = dict()     # type: T.Dict[str, T.List[str]]
1952    for target in load_info_file(get_infodir(wd), kind='targets'):
1953        intro_targets[target['id']] = [
1954            convert_path_to_target(f)
1955            for f in target['filename']]
1956    for t in tests:
1957        for d in t.depends:
1958            if d in depends:
1959                continue
1960            depends.update(d)
1961            targets.update(intro_targets[d])
1962
1963    ret = subprocess.run(ninja + ['-C', wd] + sorted(targets)).returncode
1964    if ret != 0:
1965        print(f'Could not rebuild {wd}')
1966        return False
1967
1968    return True
1969
1970def run(options: argparse.Namespace) -> int:
1971    if options.benchmark:
1972        options.num_processes = 1
1973
1974    if options.verbose and options.quiet:
1975        print('Can not be both quiet and verbose at the same time.')
1976        return 1
1977
1978    check_bin = None
1979    if options.gdb:
1980        options.verbose = True
1981        if options.wrapper:
1982            print('Must not specify both a wrapper and gdb at the same time.')
1983            return 1
1984        check_bin = 'gdb'
1985
1986    if options.wrapper:
1987        check_bin = options.wrapper[0]
1988
1989    if sys.platform == 'win32':
1990        loop = asyncio.ProactorEventLoop()
1991        asyncio.set_event_loop(loop)
1992
1993    if check_bin is not None:
1994        exe = ExternalProgram(check_bin, silent=True)
1995        if not exe.found():
1996            print(f'Could not find requested program: {check_bin!r}')
1997            return 1
1998
1999    with TestHarness(options) as th:
2000        try:
2001            if options.list:
2002                return list_tests(th)
2003            return th.doit()
2004        except TestException as e:
2005            print('Meson test encountered an error:\n')
2006            if os.environ.get('MESON_FORCE_BACKTRACE'):
2007                raise e
2008            else:
2009                print(e)
2010            return 1
2011
2012def run_with_args(args: T.List[str]) -> int:
2013    parser = argparse.ArgumentParser(prog='meson test')
2014    add_arguments(parser)
2015    options = parser.parse_args(args)
2016    return run(options)
2017