1# Copyright 2016-2017 The Meson development team
2
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6
7#     http://www.apache.org/licenses/LICENSE-2.0
8
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15# A tool to run tests in many different ways.
16
17from pathlib import Path
18from collections import deque
19from copy import deepcopy
20import argparse
21import asyncio
22import datetime
23import enum
24import json
25import multiprocessing
26import os
27import pickle
28import platform
29import random
30import re
31import signal
32import subprocess
33import shlex
34import sys
35import textwrap
36import time
37import typing as T
38import unicodedata
39import xml.etree.ElementTree as et
40
41from . import build
42from . import environment
43from . import mlog
44from .coredata import major_versions_differ, MesonVersionMismatchException
45from .coredata import version as coredata_version
46from .mesonlib import (MesonException, OrderedSet, RealPathAction,
47                       get_wine_shortpath, join_args, split_args, setup_vsenv)
48from .mintro import get_infodir, load_info_file
49from .programs import ExternalProgram
50from .backend.backends import TestProtocol, TestSerialisation
51
52# GNU autotools interprets a return code of 77 from tests it executes to
53# mean that the test should be skipped.
54GNU_SKIP_RETURNCODE = 77
55
56# GNU autotools interprets a return code of 99 from tests it executes to
57# mean that the test failed even before testing what it is supposed to test.
58GNU_ERROR_RETURNCODE = 99
59
60# Exit if 3 Ctrl-C's are received within one second
61MAX_CTRLC = 3
62
63def is_windows() -> bool:
64    platname = platform.system().lower()
65    return platname == 'windows'
66
67def is_cygwin() -> bool:
68    return sys.platform == 'cygwin'
69
70UNIWIDTH_MAPPING = {'F': 2, 'H': 1, 'W': 2, 'Na': 1, 'N': 1, 'A': 1}
71def uniwidth(s: str) -> int:
72    result = 0
73    for c in s:
74        w = unicodedata.east_asian_width(c)
75        result += UNIWIDTH_MAPPING[w]
76    return result
77
78def determine_worker_count() -> int:
79    varname = 'MESON_TESTTHREADS'
80    if varname in os.environ:
81        try:
82            num_workers = int(os.environ[varname])
83        except ValueError:
84            print(f'Invalid value in {varname}, using 1 thread.')
85            num_workers = 1
86    else:
87        try:
88            # Fails in some weird environments such as Debian
89            # reproducible build.
90            num_workers = multiprocessing.cpu_count()
91        except Exception:
92            num_workers = 1
93    return num_workers
94
95def add_arguments(parser: argparse.ArgumentParser) -> None:
96    parser.add_argument('--repeat', default=1, dest='repeat', type=int,
97                        help='Number of times to run the tests.')
98    parser.add_argument('--no-rebuild', default=False, action='store_true',
99                        help='Do not rebuild before running tests.')
100    parser.add_argument('--gdb', default=False, dest='gdb', action='store_true',
101                        help='Run test under gdb.')
102    parser.add_argument('--gdb-path', default='gdb', dest='gdb_path',
103                        help='Path to the gdb binary (default: gdb).')
104    parser.add_argument('--list', default=False, dest='list', action='store_true',
105                        help='List available tests.')
106    parser.add_argument('--wrapper', default=None, dest='wrapper', type=split_args,
107                        help='wrapper to run tests with (e.g. Valgrind)')
108    parser.add_argument('-C', dest='wd', action=RealPathAction,
109                        # https://github.com/python/typeshed/issues/3107
110                        # https://github.com/python/mypy/issues/7177
111                        type=os.path.abspath,  # type: ignore
112                        help='directory to cd into before running')
113    parser.add_argument('--suite', default=[], dest='include_suites', action='append', metavar='SUITE',
114                        help='Only run tests belonging to the given suite.')
115    parser.add_argument('--no-suite', default=[], dest='exclude_suites', action='append', metavar='SUITE',
116                        help='Do not run tests belonging to the given suite.')
117    parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false',
118                        help='Do not split stderr and stdout in test logs.')
119    parser.add_argument('--print-errorlogs', default=False, action='store_true',
120                        help="Whether to print failing tests' logs.")
121    parser.add_argument('--benchmark', default=False, action='store_true',
122                        help="Run benchmarks instead of tests.")
123    parser.add_argument('--logbase', default='testlog',
124                        help="Base name for log file.")
125    parser.add_argument('--num-processes', default=determine_worker_count(), type=int,
126                        help='How many parallel processes to use.')
127    parser.add_argument('-v', '--verbose', default=False, action='store_true',
128                        help='Do not redirect stdout and stderr')
129    parser.add_argument('-q', '--quiet', default=False, action='store_true',
130                        help='Produce less output to the terminal.')
131    parser.add_argument('-t', '--timeout-multiplier', type=float, default=None,
132                        help='Define a multiplier for test timeout, for example '
133                        ' when running tests in particular conditions they might take'
134                        ' more time to execute. (<= 0 to disable timeout)')
135    parser.add_argument('--setup', default=None, dest='setup',
136                        help='Which test setup to use.')
137    parser.add_argument('--test-args', default=[], type=split_args,
138                        help='Arguments to pass to the specified test(s) or all tests')
139    parser.add_argument('args', nargs='*',
140                        help='Optional list of test names to run. "testname" to run all tests with that name, '
141                        '"subprojname:testname" to specifically run "testname" from "subprojname", '
142                        '"subprojname:" to run all tests defined by "subprojname".')
143
144
145def print_safe(s: str) -> None:
146    end = '' if s[-1] == '\n' else '\n'
147    try:
148        print(s, end=end)
149    except UnicodeEncodeError:
150        s = s.encode('ascii', errors='backslashreplace').decode('ascii')
151        print(s, end=end)
152
153def join_lines(a: str, b: str) -> str:
154    if not a:
155        return b
156    if not b:
157        return a
158    return a + '\n' + b
159
160def dashes(s: str, dash: str, cols: int) -> str:
161    if not s:
162        return dash * cols
163    s = ' ' + s + ' '
164    width = uniwidth(s)
165    first = (cols - width) // 2
166    s = dash * first + s
167    return s + dash * (cols - first - width)
168
169def returncode_to_status(retcode: int) -> str:
170    # Note: We can't use `os.WIFSIGNALED(result.returncode)` and the related
171    # functions here because the status returned by subprocess is munged. It
172    # returns a negative value if the process was killed by a signal rather than
173    # the raw status returned by `wait()`. Also, If a shell sits between Meson
174    # the the actual unit test that shell is likely to convert a termination due
175    # to a signal into an exit status of 128 plus the signal number.
176    if retcode < 0:
177        signum = -retcode
178        try:
179            signame = signal.Signals(signum).name
180        except ValueError:
181            signame = 'SIGinvalid'
182        return f'killed by signal {signum} {signame}'
183
184    if retcode <= 128:
185        return f'exit status {retcode}'
186
187    signum = retcode - 128
188    try:
189        signame = signal.Signals(signum).name
190    except ValueError:
191        signame = 'SIGinvalid'
192    return f'(exit status {retcode} or signal {signum} {signame})'
193
194# TODO for Windows
195sh_quote: T.Callable[[str], str] = lambda x: x
196if not is_windows():
197    sh_quote = shlex.quote
198
199def env_tuple_to_str(env: T.Iterable[T.Tuple[str, str]]) -> str:
200    return ''.join(["{}={} ".format(k, sh_quote(v)) for k, v in env])
201
202
203class TestException(MesonException):
204    pass
205
206
207@enum.unique
208class ConsoleUser(enum.Enum):
209
210    # the logger can use the console
211    LOGGER = 0
212
213    # the console is used by gdb
214    GDB = 1
215
216    # the console is used to write stdout/stderr
217    STDOUT = 2
218
219
220@enum.unique
221class TestResult(enum.Enum):
222
223    PENDING = 'PENDING'
224    RUNNING = 'RUNNING'
225    OK = 'OK'
226    TIMEOUT = 'TIMEOUT'
227    INTERRUPT = 'INTERRUPT'
228    SKIP = 'SKIP'
229    FAIL = 'FAIL'
230    EXPECTEDFAIL = 'EXPECTEDFAIL'
231    UNEXPECTEDPASS = 'UNEXPECTEDPASS'
232    ERROR = 'ERROR'
233
234    @staticmethod
235    def maxlen() -> int:
236        return 14 # len(UNEXPECTEDPASS)
237
238    def is_ok(self) -> bool:
239        return self in {TestResult.OK, TestResult.EXPECTEDFAIL}
240
241    def is_bad(self) -> bool:
242        return self in {TestResult.FAIL, TestResult.TIMEOUT, TestResult.INTERRUPT,
243                        TestResult.UNEXPECTEDPASS, TestResult.ERROR}
244
245    def is_finished(self) -> bool:
246        return self not in {TestResult.PENDING, TestResult.RUNNING}
247
248    def was_killed(self) -> bool:
249        return self in (TestResult.TIMEOUT, TestResult.INTERRUPT)
250
251    def colorize(self, s: str) -> mlog.AnsiDecorator:
252        if self.is_bad():
253            decorator = mlog.red
254        elif self in (TestResult.SKIP, TestResult.EXPECTEDFAIL):
255            decorator = mlog.yellow
256        elif self.is_finished():
257            decorator = mlog.green
258        else:
259            decorator = mlog.blue
260        return decorator(s)
261
262    def get_text(self, colorize: bool) -> str:
263        result_str = '{res:{reslen}}'.format(res=self.value, reslen=self.maxlen())
264        return self.colorize(result_str).get_text(colorize)
265
266    def get_command_marker(self) -> str:
267        return str(self.colorize('>>> '))
268
269
270TYPE_TAPResult = T.Union['TAPParser.Test', 'TAPParser.Error', 'TAPParser.Version', 'TAPParser.Plan', 'TAPParser.Bailout']
271
272class TAPParser:
273    class Plan(T.NamedTuple):
274        num_tests: int
275        late: bool
276        skipped: bool
277        explanation: T.Optional[str]
278
279    class Bailout(T.NamedTuple):
280        message: str
281
282    class Test(T.NamedTuple):
283        number: int
284        name: str
285        result: TestResult
286        explanation: T.Optional[str]
287
288        def __str__(self) -> str:
289            return f'{self.number} {self.name}'.strip()
290
291    class Error(T.NamedTuple):
292        message: str
293
294    class Version(T.NamedTuple):
295        version: int
296
297    _MAIN = 1
298    _AFTER_TEST = 2
299    _YAML = 3
300
301    _RE_BAILOUT = re.compile(r'Bail out!\s*(.*)')
302    _RE_DIRECTIVE = re.compile(r'(?:\s*\#\s*([Ss][Kk][Ii][Pp]\S*|[Tt][Oo][Dd][Oo])\b\s*(.*))?')
303    _RE_PLAN = re.compile(r'1\.\.([0-9]+)' + _RE_DIRECTIVE.pattern)
304    _RE_TEST = re.compile(r'((?:not )?ok)\s*(?:([0-9]+)\s*)?([^#]*)' + _RE_DIRECTIVE.pattern)
305    _RE_VERSION = re.compile(r'TAP version ([0-9]+)')
306    _RE_YAML_START = re.compile(r'(\s+)---.*')
307    _RE_YAML_END = re.compile(r'\s+\.\.\.\s*')
308
309    found_late_test = False
310    bailed_out = False
311    plan: T.Optional[Plan] = None
312    lineno = 0
313    num_tests = 0
314    yaml_lineno: T.Optional[int] = None
315    yaml_indent = ''
316    state = _MAIN
317    version = 12
318
319    def parse_test(self, ok: bool, num: int, name: str, directive: T.Optional[str], explanation: T.Optional[str]) -> \
320            T.Generator[T.Union['TAPParser.Test', 'TAPParser.Error'], None, None]:
321        name = name.strip()
322        explanation = explanation.strip() if explanation else None
323        if directive is not None:
324            directive = directive.upper()
325            if directive.startswith('SKIP'):
326                if ok:
327                    yield self.Test(num, name, TestResult.SKIP, explanation)
328                    return
329            elif directive == 'TODO':
330                yield self.Test(num, name, TestResult.UNEXPECTEDPASS if ok else TestResult.EXPECTEDFAIL, explanation)
331                return
332            else:
333                yield self.Error(f'invalid directive "{directive}"')
334
335        yield self.Test(num, name, TestResult.OK if ok else TestResult.FAIL, explanation)
336
337    async def parse_async(self, lines: T.AsyncIterator[str]) -> T.AsyncIterator[TYPE_TAPResult]:
338        async for line in lines:
339            for event in self.parse_line(line):
340                yield event
341        for event in self.parse_line(None):
342            yield event
343
344    def parse(self, io: T.Iterator[str]) -> T.Iterator[TYPE_TAPResult]:
345        for line in io:
346            yield from self.parse_line(line)
347        yield from self.parse_line(None)
348
349    def parse_line(self, line: T.Optional[str]) -> T.Iterator[TYPE_TAPResult]:
350        if line is not None:
351            self.lineno += 1
352            line = line.rstrip()
353
354            # YAML blocks are only accepted after a test
355            if self.state == self._AFTER_TEST:
356                if self.version >= 13:
357                    m = self._RE_YAML_START.match(line)
358                    if m:
359                        self.state = self._YAML
360                        self.yaml_lineno = self.lineno
361                        self.yaml_indent = m.group(1)
362                        return
363                self.state = self._MAIN
364
365            elif self.state == self._YAML:
366                if self._RE_YAML_END.match(line):
367                    self.state = self._MAIN
368                    return
369                if line.startswith(self.yaml_indent):
370                    return
371                yield self.Error(f'YAML block not terminated (started on line {self.yaml_lineno})')
372                self.state = self._MAIN
373
374            assert self.state == self._MAIN
375            if line.startswith('#'):
376                return
377
378            m = self._RE_TEST.match(line)
379            if m:
380                if self.plan and self.plan.late and not self.found_late_test:
381                    yield self.Error('unexpected test after late plan')
382                    self.found_late_test = True
383                self.num_tests += 1
384                num = self.num_tests if m.group(2) is None else int(m.group(2))
385                if num != self.num_tests:
386                    yield self.Error('out of order test numbers')
387                yield from self.parse_test(m.group(1) == 'ok', num,
388                                           m.group(3), m.group(4), m.group(5))
389                self.state = self._AFTER_TEST
390                return
391
392            m = self._RE_PLAN.match(line)
393            if m:
394                if self.plan:
395                    yield self.Error('more than one plan found')
396                else:
397                    num_tests = int(m.group(1))
398                    skipped = (num_tests == 0)
399                    if m.group(2):
400                        if m.group(2).upper().startswith('SKIP'):
401                            if num_tests > 0:
402                                yield self.Error('invalid SKIP directive for plan')
403                            skipped = True
404                        else:
405                            yield self.Error('invalid directive for plan')
406                    self.plan = self.Plan(num_tests=num_tests, late=(self.num_tests > 0),
407                                          skipped=skipped, explanation=m.group(3))
408                    yield self.plan
409                return
410
411            m = self._RE_BAILOUT.match(line)
412            if m:
413                yield self.Bailout(m.group(1))
414                self.bailed_out = True
415                return
416
417            m = self._RE_VERSION.match(line)
418            if m:
419                # The TAP version is only accepted as the first line
420                if self.lineno != 1:
421                    yield self.Error('version number must be on the first line')
422                    return
423                self.version = int(m.group(1))
424                if self.version < 13:
425                    yield self.Error('version number should be at least 13')
426                else:
427                    yield self.Version(version=self.version)
428                return
429
430            if not line:
431                return
432
433            yield self.Error(f'unexpected input at line {self.lineno}')
434        else:
435            # end of file
436            if self.state == self._YAML:
437                yield self.Error(f'YAML block not terminated (started on line {self.yaml_lineno})')
438
439            if not self.bailed_out and self.plan and self.num_tests != self.plan.num_tests:
440                if self.num_tests < self.plan.num_tests:
441                    yield self.Error(f'Too few tests run (expected {self.plan.num_tests}, got {self.num_tests})')
442                else:
443                    yield self.Error(f'Too many tests run (expected {self.plan.num_tests}, got {self.num_tests})')
444
445class TestLogger:
446    def flush(self) -> None:
447        pass
448
449    def start(self, harness: 'TestHarness') -> None:
450        pass
451
452    def start_test(self, harness: 'TestHarness', test: 'TestRun') -> None:
453        pass
454
455    def log_subtest(self, harness: 'TestHarness', test: 'TestRun', s: str, res: TestResult) -> None:
456        pass
457
458    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
459        pass
460
461    async def finish(self, harness: 'TestHarness') -> None:
462        pass
463
464    def close(self) -> None:
465        pass
466
467
468class TestFileLogger(TestLogger):
469    def __init__(self, filename: str, errors: str = 'replace') -> None:
470        self.filename = filename
471        self.file = open(filename, 'w', encoding='utf-8', errors=errors)
472
473    def close(self) -> None:
474        if self.file:
475            self.file.close()
476            self.file = None
477
478
479class ConsoleLogger(TestLogger):
480    ASCII_SPINNER = ['..', ':.', '.:']
481    SPINNER = ["\U0001f311", "\U0001f312", "\U0001f313", "\U0001f314",
482               "\U0001f315", "\U0001f316", "\U0001f317", "\U0001f318"]
483
484    SCISSORS = "\u2700 "
485    HLINE = "\u2015"
486    RTRI = "\u25B6 "
487
488    def __init__(self) -> None:
489        self.update = asyncio.Event()
490        self.running_tests = OrderedSet()  # type: OrderedSet['TestRun']
491        self.progress_test = None          # type: T.Optional['TestRun']
492        self.progress_task = None          # type: T.Optional[asyncio.Future]
493        self.max_left_width = 0            # type: int
494        self.stop = False
495        self.update = asyncio.Event()
496        self.should_erase_line = ''
497        self.test_count = 0
498        self.started_tests = 0
499        self.spinner_index = 0
500        try:
501            self.cols, _ = os.get_terminal_size(1)
502            self.is_tty = True
503        except OSError:
504            self.cols = 80
505            self.is_tty = False
506
507        self.output_start = dashes(self.SCISSORS, self.HLINE, self.cols - 2)
508        self.output_end = dashes('', self.HLINE, self.cols - 2)
509        self.sub = self.RTRI
510        self.spinner = self.SPINNER
511        try:
512            self.output_start.encode(sys.stdout.encoding or 'ascii')
513        except UnicodeEncodeError:
514            self.output_start = dashes('8<', '-', self.cols - 2)
515            self.output_end = dashes('', '-', self.cols - 2)
516            self.sub = '| '
517            self.spinner = self.ASCII_SPINNER
518
519    def flush(self) -> None:
520        if self.should_erase_line:
521            print(self.should_erase_line, end='')
522            self.should_erase_line = ''
523
524    def print_progress(self, line: str) -> None:
525        print(self.should_erase_line, line, sep='', end='\r')
526        self.should_erase_line = '\x1b[K'
527
528    def request_update(self) -> None:
529        self.update.set()
530
531    def emit_progress(self, harness: 'TestHarness') -> None:
532        if self.progress_test is None:
533            self.flush()
534            return
535
536        if len(self.running_tests) == 1:
537            count = f'{self.started_tests}/{self.test_count}'
538        else:
539            count = '{}-{}/{}'.format(self.started_tests - len(self.running_tests) + 1,
540                                      self.started_tests, self.test_count)
541
542        left = '[{}] {} '.format(count, self.spinner[self.spinner_index])
543        self.spinner_index = (self.spinner_index + 1) % len(self.spinner)
544
545        right = '{spaces} {dur:{durlen}}'.format(
546            spaces=' ' * TestResult.maxlen(),
547            dur=int(time.time() - self.progress_test.starttime),
548            durlen=harness.duration_max_len)
549        if self.progress_test.timeout:
550            right += '/{timeout:{durlen}}'.format(
551                timeout=self.progress_test.timeout,
552                durlen=harness.duration_max_len)
553        right += 's'
554        detail = self.progress_test.detail
555        if detail:
556            right += '   ' + detail
557
558        line = harness.format(self.progress_test, colorize=True,
559                              max_left_width=self.max_left_width,
560                              left=left, right=right)
561        self.print_progress(line)
562
563    def start(self, harness: 'TestHarness') -> None:
564        async def report_progress() -> None:
565            loop = asyncio.get_event_loop()
566            next_update = 0.0
567            self.request_update()
568            while not self.stop:
569                await self.update.wait()
570                self.update.clear()
571
572                # We may get here simply because the progress line has been
573                # overwritten, so do not always switch.  Only do so every
574                # second, or if the printed test has finished
575                if loop.time() >= next_update:
576                    self.progress_test = None
577                    next_update = loop.time() + 1
578                    loop.call_at(next_update, self.request_update)
579
580                if (self.progress_test and
581                        self.progress_test.res is not TestResult.RUNNING):
582                    self.progress_test = None
583
584                if not self.progress_test:
585                    if not self.running_tests:
586                        continue
587                    # Pick a test in round robin order
588                    self.progress_test = self.running_tests.pop(last=False)
589                    self.running_tests.add(self.progress_test)
590
591                self.emit_progress(harness)
592            self.flush()
593
594        self.test_count = harness.test_count
595        self.cols = max(self.cols, harness.max_left_width + 30)
596
597        if self.is_tty and not harness.need_console:
598            # Account for "[aa-bb/cc] OO " in the progress report
599            self.max_left_width = 3 * len(str(self.test_count)) + 8
600            self.progress_task = asyncio.ensure_future(report_progress())
601
602    def start_test(self, harness: 'TestHarness', test: 'TestRun') -> None:
603        if harness.options.verbose and test.cmdline:
604            self.flush()
605            print(harness.format(test, mlog.colorize_console(),
606                                 max_left_width=self.max_left_width,
607                                 right=test.res.get_text(mlog.colorize_console())))
608            print(test.res.get_command_marker() + test.cmdline)
609            if test.needs_parsing:
610                pass
611            elif not test.is_parallel:
612                print(self.output_start, flush=True)
613            else:
614                print(flush=True)
615
616        self.started_tests += 1
617        self.running_tests.add(test)
618        self.running_tests.move_to_end(test, last=False)
619        self.request_update()
620
621    def shorten_log(self, harness: 'TestHarness', result: 'TestRun') -> str:
622        if not harness.options.verbose and not harness.options.print_errorlogs:
623            return ''
624
625        log = result.get_log(mlog.colorize_console(),
626                             stderr_only=result.needs_parsing)
627        if harness.options.verbose:
628            return log
629
630        lines = log.splitlines()
631        if len(lines) < 100:
632            return log
633        else:
634            return str(mlog.bold('Listing only the last 100 lines from a long log.\n')) + '\n'.join(lines[-100:])
635
636    def print_log(self, harness: 'TestHarness', result: 'TestRun') -> None:
637        if not harness.options.verbose:
638            cmdline = result.cmdline
639            if not cmdline:
640                print(result.res.get_command_marker() + result.stdo)
641                return
642            print(result.res.get_command_marker() + cmdline)
643
644        log = self.shorten_log(harness, result)
645        if log:
646            print(self.output_start)
647            print_safe(log)
648            print(self.output_end)
649
650    def log_subtest(self, harness: 'TestHarness', test: 'TestRun', s: str, result: TestResult) -> None:
651        if harness.options.verbose or (harness.options.print_errorlogs and result.is_bad()):
652            self.flush()
653            print(harness.format(test, mlog.colorize_console(), max_left_width=self.max_left_width,
654                                 prefix=self.sub,
655                                 middle=s,
656                                 right=result.get_text(mlog.colorize_console())), flush=True)
657
658            self.request_update()
659
660    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
661        self.running_tests.remove(result)
662        if result.res is TestResult.TIMEOUT and harness.options.verbose:
663            self.flush()
664            print(f'{result.name} time out (After {result.timeout} seconds)')
665
666        if not harness.options.quiet or not result.res.is_ok():
667            self.flush()
668            if harness.options.verbose and not result.is_parallel and result.cmdline:
669                if not result.needs_parsing:
670                    print(self.output_end)
671                print(harness.format(result, mlog.colorize_console(), max_left_width=self.max_left_width))
672            else:
673                print(harness.format(result, mlog.colorize_console(), max_left_width=self.max_left_width),
674                      flush=True)
675                if harness.options.verbose or result.res.is_bad():
676                    self.print_log(harness, result)
677            if harness.options.verbose or result.res.is_bad():
678                print(flush=True)
679
680        self.request_update()
681
682    async def finish(self, harness: 'TestHarness') -> None:
683        self.stop = True
684        self.request_update()
685        if self.progress_task:
686            await self.progress_task
687
688        if harness.collected_failures and \
689                (harness.options.print_errorlogs or harness.options.verbose):
690            print("\nSummary of Failures:\n")
691            for i, result in enumerate(harness.collected_failures, 1):
692                print(harness.format(result, mlog.colorize_console()))
693
694        print(harness.summary())
695
696
697class TextLogfileBuilder(TestFileLogger):
698    def start(self, harness: 'TestHarness') -> None:
699        self.file.write(f'Log of Meson test suite run on {datetime.datetime.now().isoformat()}\n\n')
700        inherit_env = env_tuple_to_str(os.environ.items())
701        self.file.write(f'Inherited environment: {inherit_env}\n\n')
702
703    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
704        self.file.write(harness.format(result, False) + '\n')
705        cmdline = result.cmdline
706        if cmdline:
707            starttime_str = time.strftime("%H:%M:%S", time.gmtime(result.starttime))
708            self.file.write(starttime_str + ' ' + cmdline + '\n')
709            self.file.write(dashes('output', '-', 78) + '\n')
710            self.file.write(result.get_log())
711            self.file.write(dashes('', '-', 78) + '\n\n')
712
713    async def finish(self, harness: 'TestHarness') -> None:
714        if harness.collected_failures:
715            self.file.write("\nSummary of Failures:\n\n")
716            for i, result in enumerate(harness.collected_failures, 1):
717                self.file.write(harness.format(result, False) + '\n')
718        self.file.write(harness.summary())
719
720        print(f'Full log written to {self.filename}')
721
722
723class JsonLogfileBuilder(TestFileLogger):
724    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
725        jresult = {'name': result.name,
726                   'stdout': result.stdo,
727                   'result': result.res.value,
728                   'starttime': result.starttime,
729                   'duration': result.duration,
730                   'returncode': result.returncode,
731                   'env': result.env,
732                   'command': result.cmd}  # type: T.Dict[str, T.Any]
733        if result.stde:
734            jresult['stderr'] = result.stde
735        self.file.write(json.dumps(jresult) + '\n')
736
737
738class JunitBuilder(TestLogger):
739
740    """Builder for Junit test results.
741
742    Junit is impossible to stream out, it requires attributes counting the
743    total number of tests, failures, skips, and errors in the root element
744    and in each test suite. As such, we use a builder class to track each
745    test case, and calculate all metadata before writing it out.
746
747    For tests with multiple results (like from a TAP test), we record the
748    test as a suite with the project_name.test_name. This allows us to track
749    each result separately. For tests with only one result (such as exit-code
750    tests) we record each one into a suite with the name project_name. The use
751    of the project_name allows us to sort subproject tests separately from
752    the root project.
753    """
754
755    def __init__(self, filename: str) -> None:
756        self.filename = filename
757        self.root = et.Element(
758            'testsuites', tests='0', errors='0', failures='0')
759        self.suites = {}  # type: T.Dict[str, et.Element]
760
761    def log(self, harness: 'TestHarness', test: 'TestRun') -> None:
762        """Log a single test case."""
763        if test.junit is not None:
764            for suite in test.junit.findall('.//testsuite'):
765                # Assume that we don't need to merge anything here...
766                suite.attrib['name'] = '{}.{}.{}'.format(test.project, test.name, suite.attrib['name'])
767
768                # GTest can inject invalid attributes
769                for case in suite.findall('.//testcase[@result]'):
770                    del case.attrib['result']
771                for case in suite.findall('.//testcase[@timestamp]'):
772                    del case.attrib['timestamp']
773                self.root.append(suite)
774            return
775
776        # In this case we have a test binary with multiple results.
777        # We want to record this so that each result is recorded
778        # separately
779        if test.results:
780            suitename = f'{test.project}.{test.name}'
781            assert suitename not in self.suites or harness.options.repeat > 1, 'duplicate suite'
782
783            suite = self.suites[suitename] = et.Element(
784                'testsuite',
785                name=suitename,
786                tests=str(len(test.results)),
787                errors=str(sum(1 for r in test.results if r.result in
788                               {TestResult.INTERRUPT, TestResult.ERROR})),
789                failures=str(sum(1 for r in test.results if r.result in
790                                 {TestResult.FAIL, TestResult.UNEXPECTEDPASS, TestResult.TIMEOUT})),
791                skipped=str(sum(1 for r in test.results if r.result is TestResult.SKIP)),
792                time=str(test.duration),
793            )
794
795            for subtest in test.results:
796                # Both name and classname are required. Use the suite name as
797                # the class name, so that e.g. GitLab groups testcases correctly.
798                testcase = et.SubElement(suite, 'testcase', name=str(subtest), classname=suitename)
799                if subtest.result is TestResult.SKIP:
800                    et.SubElement(testcase, 'skipped')
801                elif subtest.result is TestResult.ERROR:
802                    et.SubElement(testcase, 'error')
803                elif subtest.result is TestResult.FAIL:
804                    et.SubElement(testcase, 'failure')
805                elif subtest.result is TestResult.UNEXPECTEDPASS:
806                    fail = et.SubElement(testcase, 'failure')
807                    fail.text = 'Test unexpected passed.'
808                elif subtest.result is TestResult.INTERRUPT:
809                    fail = et.SubElement(testcase, 'error')
810                    fail.text = 'Test was interrupted by user.'
811                elif subtest.result is TestResult.TIMEOUT:
812                    fail = et.SubElement(testcase, 'error')
813                    fail.text = 'Test did not finish before configured timeout.'
814                if subtest.explanation:
815                    et.SubElement(testcase, 'system-out').text = subtest.explanation
816            if test.stdo:
817                out = et.SubElement(suite, 'system-out')
818                out.text = test.stdo.rstrip()
819            if test.stde:
820                err = et.SubElement(suite, 'system-err')
821                err.text = test.stde.rstrip()
822        else:
823            if test.project not in self.suites:
824                suite = self.suites[test.project] = et.Element(
825                    'testsuite', name=test.project, tests='1', errors='0',
826                    failures='0', skipped='0', time=str(test.duration))
827            else:
828                suite = self.suites[test.project]
829                suite.attrib['tests'] = str(int(suite.attrib['tests']) + 1)
830
831            testcase = et.SubElement(suite, 'testcase', name=test.name,
832                                     classname=test.project, time=str(test.duration))
833            if test.res is TestResult.SKIP:
834                et.SubElement(testcase, 'skipped')
835                suite.attrib['skipped'] = str(int(suite.attrib['skipped']) + 1)
836            elif test.res is TestResult.ERROR:
837                et.SubElement(testcase, 'error')
838                suite.attrib['errors'] = str(int(suite.attrib['errors']) + 1)
839            elif test.res is TestResult.FAIL:
840                et.SubElement(testcase, 'failure')
841                suite.attrib['failures'] = str(int(suite.attrib['failures']) + 1)
842            if test.stdo:
843                out = et.SubElement(testcase, 'system-out')
844                out.text = test.stdo.rstrip()
845            if test.stde:
846                err = et.SubElement(testcase, 'system-err')
847                err.text = test.stde.rstrip()
848
849    async def finish(self, harness: 'TestHarness') -> None:
850        """Calculate total test counts and write out the xml result."""
851        for suite in self.suites.values():
852            self.root.append(suite)
853            # Skipped is really not allowed in the "testsuits" element
854            for attr in ['tests', 'errors', 'failures']:
855                self.root.attrib[attr] = str(int(self.root.attrib[attr]) + int(suite.attrib[attr]))
856
857        tree = et.ElementTree(self.root)
858        with open(self.filename, 'wb') as f:
859            tree.write(f, encoding='utf-8', xml_declaration=True)
860
861
862class TestRun:
863    TEST_NUM = 0
864    PROTOCOL_TO_CLASS: T.Dict[TestProtocol, T.Type['TestRun']] = {}
865
866    def __new__(cls, test: TestSerialisation, *args: T.Any, **kwargs: T.Any) -> T.Any:
867        return super().__new__(TestRun.PROTOCOL_TO_CLASS[test.protocol])
868
869    def __init__(self, test: TestSerialisation, test_env: T.Dict[str, str],
870                 name: str, timeout: T.Optional[int], is_parallel: bool):
871        self.res = TestResult.PENDING
872        self.test = test
873        self._num = None       # type: T.Optional[int]
874        self.name = name
875        self.timeout = timeout
876        self.results = list()  # type: T.List[TAPParser.Test]
877        self.returncode = 0
878        self.starttime = None  # type: T.Optional[float]
879        self.duration = None   # type: T.Optional[float]
880        self.stdo = None       # type: T.Optional[str]
881        self.stde = None       # type: T.Optional[str]
882        self.cmd = None        # type: T.Optional[T.List[str]]
883        self.env = test_env    # type: T.Dict[str, str]
884        self.should_fail = test.should_fail
885        self.project = test.project_name
886        self.junit = None      # type: T.Optional[et.ElementTree]
887        self.is_parallel = is_parallel
888
889    def start(self, cmd: T.List[str]) -> None:
890        self.res = TestResult.RUNNING
891        self.starttime = time.time()
892        self.cmd = cmd
893
894    @property
895    def num(self) -> int:
896        if self._num is None:
897            TestRun.TEST_NUM += 1
898            self._num = TestRun.TEST_NUM
899        return self._num
900
901    @property
902    def detail(self) -> str:
903        if self.res is TestResult.PENDING:
904            return ''
905        if self.returncode:
906            return returncode_to_status(self.returncode)
907        if self.results:
908            # running or succeeded
909            passed = sum(x.result.is_ok() for x in self.results)
910            ran = sum(x.result is not TestResult.SKIP for x in self.results)
911            if passed == ran:
912                return f'{passed} subtests passed'
913            else:
914                return f'{passed}/{ran} subtests passed'
915        return ''
916
917    def _complete(self, returncode: int, res: TestResult,
918                  stdo: T.Optional[str], stde: T.Optional[str]) -> None:
919        assert isinstance(res, TestResult)
920        if self.should_fail and res in (TestResult.OK, TestResult.FAIL):
921            res = TestResult.UNEXPECTEDPASS if res.is_ok() else TestResult.EXPECTEDFAIL
922
923        self.res = res
924        self.returncode = returncode
925        self.duration = time.time() - self.starttime
926        self.stdo = stdo
927        self.stde = stde
928
929    @property
930    def cmdline(self) -> T.Optional[str]:
931        if not self.cmd:
932            return None
933        test_only_env = set(self.env.items()) - set(os.environ.items())
934        return env_tuple_to_str(test_only_env) + \
935            ' '.join(sh_quote(x) for x in self.cmd)
936
937    def complete_skip(self, message: str) -> None:
938        self.starttime = time.time()
939        self._complete(GNU_SKIP_RETURNCODE, TestResult.SKIP, message, None)
940
941    def complete(self, returncode: int, res: TestResult,
942                 stdo: T.Optional[str], stde: T.Optional[str]) -> None:
943        self._complete(returncode, res, stdo, stde)
944
945    def get_log(self, colorize: bool = False, stderr_only: bool = False) -> str:
946        stdo = '' if stderr_only else self.stdo
947        if self.stde:
948            res = ''
949            if stdo:
950                res += mlog.cyan('stdout:').get_text(colorize) + '\n'
951                res += stdo
952                if res[-1:] != '\n':
953                    res += '\n'
954            res += mlog.cyan('stderr:').get_text(colorize) + '\n'
955            res += self.stde
956        else:
957            res = stdo
958        if res and res[-1:] != '\n':
959            res += '\n'
960        return res
961
962    @property
963    def needs_parsing(self) -> bool:
964        return False
965
966    async def parse(self, harness: 'TestHarness', lines: T.AsyncIterator[str]) -> T.Tuple[TestResult, str]:
967        async for l in lines:
968            pass
969        return TestResult.OK, ''
970
971
972class TestRunExitCode(TestRun):
973
974    def complete(self, returncode: int, res: TestResult,
975                 stdo: T.Optional[str], stde: T.Optional[str]) -> None:
976        if res:
977            pass
978        elif returncode == GNU_SKIP_RETURNCODE:
979            res = TestResult.SKIP
980        elif returncode == GNU_ERROR_RETURNCODE:
981            res = TestResult.ERROR
982        else:
983            res = TestResult.FAIL if bool(returncode) else TestResult.OK
984        super().complete(returncode, res, stdo, stde)
985
986TestRun.PROTOCOL_TO_CLASS[TestProtocol.EXITCODE] = TestRunExitCode
987
988
989class TestRunGTest(TestRunExitCode):
990    def complete(self, returncode: int, res: TestResult,
991                 stdo: T.Optional[str], stde: T.Optional[str]) -> None:
992        filename = f'{self.test.name}.xml'
993        if self.test.workdir:
994            filename = os.path.join(self.test.workdir, filename)
995
996        try:
997            self.junit = et.parse(filename)
998        except FileNotFoundError:
999            # This can happen if the test fails to run or complete for some
1000            # reason, like the rpath for libgtest isn't properly set. ExitCode
1001            # will handle the failure, don't generate a stacktrace.
1002            pass
1003
1004        super().complete(returncode, res, stdo, stde)
1005
1006TestRun.PROTOCOL_TO_CLASS[TestProtocol.GTEST] = TestRunGTest
1007
1008
1009class TestRunTAP(TestRun):
1010    @property
1011    def needs_parsing(self) -> bool:
1012        return True
1013
1014    def complete(self, returncode: int, res: TestResult,
1015                 stdo: str, stde: str) -> None:
1016        if returncode != 0 and not res.was_killed():
1017            res = TestResult.ERROR
1018            stde = stde or ''
1019            stde += f'\n(test program exited with status code {returncode})'
1020
1021        super().complete(returncode, res, stdo, stde)
1022
1023    async def parse(self, harness: 'TestHarness', lines: T.AsyncIterator[str]) -> T.Tuple[TestResult, str]:
1024        res = TestResult.OK
1025        error = ''
1026
1027        async for i in TAPParser().parse_async(lines):
1028            if isinstance(i, TAPParser.Bailout):
1029                res = TestResult.ERROR
1030                harness.log_subtest(self, i.message, res)
1031            elif isinstance(i, TAPParser.Test):
1032                self.results.append(i)
1033                if i.result.is_bad():
1034                    res = TestResult.FAIL
1035                harness.log_subtest(self, i.name or f'subtest {i.number}', i.result)
1036            elif isinstance(i, TAPParser.Error):
1037                error = '\nTAP parsing error: ' + i.message
1038                res = TestResult.ERROR
1039
1040        if all(t.result is TestResult.SKIP for t in self.results):
1041            # This includes the case where self.results is empty
1042            res = TestResult.SKIP
1043        return res, error
1044
1045TestRun.PROTOCOL_TO_CLASS[TestProtocol.TAP] = TestRunTAP
1046
1047
1048class TestRunRust(TestRun):
1049    @property
1050    def needs_parsing(self) -> bool:
1051        return True
1052
1053    async def parse(self, harness: 'TestHarness', lines: T.AsyncIterator[str]) -> T.Tuple[TestResult, str]:
1054        def parse_res(n: int, name: str, result: str) -> TAPParser.Test:
1055            if result == 'ok':
1056                return TAPParser.Test(n, name, TestResult.OK, None)
1057            elif result == 'ignored':
1058                return TAPParser.Test(n, name, TestResult.SKIP, None)
1059            elif result == 'FAILED':
1060                return TAPParser.Test(n, name, TestResult.FAIL, None)
1061            return TAPParser.Test(n, name, TestResult.ERROR,
1062                                  f'Unsupported output from rust test: {result}')
1063
1064        n = 1
1065        async for line in lines:
1066            if line.startswith('test ') and not line.startswith('test result'):
1067                _, name, _, result = line.rstrip().split(' ')
1068                name = name.replace('::', '.')
1069                t = parse_res(n, name, result)
1070                self.results.append(t)
1071                harness.log_subtest(self, name, t.result)
1072                n += 1
1073
1074        if all(t.result is TestResult.SKIP for t in self.results):
1075            # This includes the case where self.results is empty
1076            return TestResult.SKIP, ''
1077        elif any(t.result is TestResult.ERROR for t in self.results):
1078            return TestResult.ERROR, ''
1079        elif any(t.result is TestResult.FAIL for t in self.results):
1080            return TestResult.FAIL, ''
1081        return TestResult.OK, ''
1082
1083TestRun.PROTOCOL_TO_CLASS[TestProtocol.RUST] = TestRunRust
1084
1085
1086def decode(stream: T.Union[None, bytes]) -> str:
1087    if stream is None:
1088        return ''
1089    try:
1090        return stream.decode('utf-8')
1091    except UnicodeDecodeError:
1092        return stream.decode('iso-8859-1', errors='ignore')
1093
1094async def read_decode(reader: asyncio.StreamReader, console_mode: ConsoleUser) -> str:
1095    stdo_lines = []
1096    try:
1097        while not reader.at_eof():
1098            # Prefer splitting by line, as that produces nicer output
1099            try:
1100                line_bytes = await reader.readuntil(b'\n')
1101            except asyncio.IncompleteReadError as e:
1102                line_bytes = e.partial
1103            except asyncio.LimitOverrunError as e:
1104                line_bytes = await reader.readexactly(e.consumed)
1105            line = decode(line_bytes)
1106            stdo_lines.append(line)
1107            if console_mode is ConsoleUser.STDOUT:
1108                print(line, end='', flush=True)
1109        return ''.join(stdo_lines)
1110    except asyncio.CancelledError:
1111        return ''.join(stdo_lines)
1112
1113# Extract lines out of the StreamReader.  Print them
1114# along the way if requested, and at the end collect
1115# them all into a future.
1116async def read_decode_lines(reader: asyncio.StreamReader, q: 'asyncio.Queue[T.Optional[str]]',
1117                            console_mode: ConsoleUser) -> str:
1118    stdo_lines = []
1119    try:
1120        while not reader.at_eof():
1121            line = decode(await reader.readline())
1122            stdo_lines.append(line)
1123            if console_mode is ConsoleUser.STDOUT:
1124                print(line, end='', flush=True)
1125            await q.put(line)
1126        return ''.join(stdo_lines)
1127    except asyncio.CancelledError:
1128        return ''.join(stdo_lines)
1129    finally:
1130        await q.put(None)
1131
1132def run_with_mono(fname: str) -> bool:
1133    return fname.endswith('.exe') and not (is_windows() or is_cygwin())
1134
1135def check_testdata(objs: T.List[TestSerialisation]) -> T.List[TestSerialisation]:
1136    if not isinstance(objs, list):
1137        raise MesonVersionMismatchException('<unknown>', coredata_version)
1138    for obj in objs:
1139        if not isinstance(obj, TestSerialisation):
1140            raise MesonVersionMismatchException('<unknown>', coredata_version)
1141        if not hasattr(obj, 'version'):
1142            raise MesonVersionMismatchException('<unknown>', coredata_version)
1143        if major_versions_differ(obj.version, coredata_version):
1144            raise MesonVersionMismatchException(obj.version, coredata_version)
1145    return objs
1146
1147# Custom waiting primitives for asyncio
1148
1149async def try_wait_one(*awaitables: T.Any, timeout: T.Optional[T.Union[int, float]]) -> None:
1150    """Wait for completion of one of the given futures, ignoring timeouts."""
1151    await asyncio.wait(awaitables,
1152                       timeout=timeout, return_when=asyncio.FIRST_COMPLETED)
1153
1154async def queue_iter(q: 'asyncio.Queue[T.Optional[str]]') -> T.AsyncIterator[str]:
1155    while True:
1156        item = await q.get()
1157        q.task_done()
1158        if item is None:
1159            break
1160        yield item
1161
1162async def complete(future: asyncio.Future) -> None:
1163    """Wait for completion of the given future, ignoring cancellation."""
1164    try:
1165        await future
1166    except asyncio.CancelledError:
1167        pass
1168
1169async def complete_all(futures: T.Iterable[asyncio.Future],
1170                       timeout: T.Optional[T.Union[int, float]] = None) -> None:
1171    """Wait for completion of all the given futures, ignoring cancellation.
1172       If timeout is not None, raise an asyncio.TimeoutError after the given
1173       time has passed.  asyncio.TimeoutError is only raised if some futures
1174       have not completed and none have raised exceptions, even if timeout
1175       is zero."""
1176
1177    def check_futures(futures: T.Iterable[asyncio.Future]) -> None:
1178        # Raise exceptions if needed
1179        left = False
1180        for f in futures:
1181            if not f.done():
1182                left = True
1183            elif not f.cancelled():
1184                f.result()
1185        if left:
1186            raise asyncio.TimeoutError
1187
1188    # Python is silly and does not have a variant of asyncio.wait with an
1189    # absolute time as deadline.
1190    deadline = None if timeout is None else asyncio.get_event_loop().time() + timeout
1191    while futures and (timeout is None or timeout > 0):
1192        done, futures = await asyncio.wait(futures, timeout=timeout,
1193                                           return_when=asyncio.FIRST_EXCEPTION)
1194        check_futures(done)
1195        if deadline:
1196            timeout = deadline - asyncio.get_event_loop().time()
1197
1198    check_futures(futures)
1199
1200
1201class TestSubprocess:
1202    def __init__(self, p: asyncio.subprocess.Process,
1203                 stdout: T.Optional[int], stderr: T.Optional[int],
1204                 postwait_fn: T.Callable[[], None] = None):
1205        self._process = p
1206        self.stdout = stdout
1207        self.stderr = stderr
1208        self.stdo_task = None            # type: T.Optional[asyncio.Future[str]]
1209        self.stde_task = None            # type: T.Optional[asyncio.Future[str]]
1210        self.postwait_fn = postwait_fn   # type: T.Callable[[], None]
1211        self.all_futures = []            # type: T.List[asyncio.Future]
1212
1213    def stdout_lines(self, console_mode: ConsoleUser) -> T.AsyncIterator[str]:
1214        q = asyncio.Queue()              # type: asyncio.Queue[T.Optional[str]]
1215        decode_coro = read_decode_lines(self._process.stdout, q, console_mode)
1216        self.stdo_task = asyncio.ensure_future(decode_coro)
1217        return queue_iter(q)
1218
1219    def communicate(self, console_mode: ConsoleUser) -> T.Tuple[T.Optional[T.Awaitable[str]],
1220                                                                T.Optional[T.Awaitable[str]]]:
1221        # asyncio.ensure_future ensures that printing can
1222        # run in the background, even before it is awaited
1223        if self.stdo_task is None and self.stdout is not None:
1224            decode_coro = read_decode(self._process.stdout, console_mode)
1225            self.stdo_task = asyncio.ensure_future(decode_coro)
1226            self.all_futures.append(self.stdo_task)
1227        if self.stderr is not None and self.stderr != asyncio.subprocess.STDOUT:
1228            decode_coro = read_decode(self._process.stderr, console_mode)
1229            self.stde_task = asyncio.ensure_future(decode_coro)
1230            self.all_futures.append(self.stde_task)
1231
1232        return self.stdo_task, self.stde_task
1233
1234    async def _kill(self) -> T.Optional[str]:
1235        # Python does not provide multiplatform support for
1236        # killing a process and all its children so we need
1237        # to roll our own.
1238        p = self._process
1239        try:
1240            if is_windows():
1241                subprocess.run(['taskkill', '/F', '/T', '/PID', str(p.pid)])
1242            else:
1243                # Send a termination signal to the process group that setsid()
1244                # created - giving it a chance to perform any cleanup.
1245                os.killpg(p.pid, signal.SIGTERM)
1246
1247                # Make sure the termination signal actually kills the process
1248                # group, otherwise retry with a SIGKILL.
1249                await try_wait_one(p.wait(), timeout=0.5)
1250                if p.returncode is not None:
1251                    return None
1252
1253                os.killpg(p.pid, signal.SIGKILL)
1254
1255            await try_wait_one(p.wait(), timeout=1)
1256            if p.returncode is not None:
1257                return None
1258
1259            # An earlier kill attempt has not worked for whatever reason.
1260            # Try to kill it one last time with a direct call.
1261            # If the process has spawned children, they will remain around.
1262            p.kill()
1263            await try_wait_one(p.wait(), timeout=1)
1264            if p.returncode is not None:
1265                return None
1266            return 'Test process could not be killed.'
1267        except ProcessLookupError:
1268            # Sometimes (e.g. with Wine) this happens.  There's nothing
1269            # we can do, probably the process already died so just wait
1270            # for the event loop to pick that up.
1271            await p.wait()
1272            return None
1273        finally:
1274            if self.stdo_task:
1275                self.stdo_task.cancel()
1276            if self.stde_task:
1277                self.stde_task.cancel()
1278
1279    async def wait(self, timeout: T.Optional[int]) -> T.Tuple[int, TestResult, T.Optional[str]]:
1280        p = self._process
1281        result = None
1282        additional_error = None
1283
1284        self.all_futures.append(asyncio.ensure_future(p.wait()))
1285        try:
1286            await complete_all(self.all_futures, timeout=timeout)
1287        except asyncio.TimeoutError:
1288            additional_error = await self._kill()
1289            result = TestResult.TIMEOUT
1290        except asyncio.CancelledError:
1291            # The main loop must have seen Ctrl-C.
1292            additional_error = await self._kill()
1293            result = TestResult.INTERRUPT
1294        finally:
1295            if self.postwait_fn:
1296                self.postwait_fn()
1297
1298        return p.returncode or 0, result, additional_error
1299
1300class SingleTestRunner:
1301
1302    def __init__(self, test: TestSerialisation, env: T.Dict[str, str], name: str,
1303                 options: argparse.Namespace):
1304        self.test = test
1305        self.options = options
1306        self.cmd = self._get_cmd()
1307
1308        if self.cmd and self.test.extra_paths:
1309            env['PATH'] = os.pathsep.join(self.test.extra_paths + ['']) + env['PATH']
1310            winecmd = []
1311            for c in self.cmd:
1312                winecmd.append(c)
1313                if os.path.basename(c).startswith('wine'):
1314                    env['WINEPATH'] = get_wine_shortpath(
1315                        winecmd,
1316                        ['Z:' + p for p in self.test.extra_paths] + env.get('WINEPATH', '').split(';')
1317                    )
1318                    break
1319
1320        # If MALLOC_PERTURB_ is not set, or if it is set to an empty value,
1321        # (i.e., the test or the environment don't explicitly set it), set
1322        # it ourselves. We do this unconditionally for regular tests
1323        # because it is extremely useful to have.
1324        # Setting MALLOC_PERTURB_="0" will completely disable this feature.
1325        if ('MALLOC_PERTURB_' not in env or not env['MALLOC_PERTURB_']) and not options.benchmark:
1326            env['MALLOC_PERTURB_'] = str(random.randint(1, 255))
1327
1328        if self.options.gdb or self.test.timeout is None or self.test.timeout <= 0:
1329            timeout = None
1330        elif self.options.timeout_multiplier is None:
1331            timeout = self.test.timeout
1332        elif self.options.timeout_multiplier <= 0:
1333            timeout = None
1334        else:
1335            timeout = self.test.timeout * self.options.timeout_multiplier
1336
1337        is_parallel = test.is_parallel and self.options.num_processes > 1 and not self.options.gdb
1338        self.runobj = TestRun(test, env, name, timeout, is_parallel)
1339
1340        if self.options.gdb:
1341            self.console_mode = ConsoleUser.GDB
1342        elif self.options.verbose and not is_parallel and not self.runobj.needs_parsing:
1343            self.console_mode = ConsoleUser.STDOUT
1344        else:
1345            self.console_mode = ConsoleUser.LOGGER
1346
1347    def _get_test_cmd(self) -> T.Optional[T.List[str]]:
1348        if self.test.fname[0].endswith('.jar'):
1349            return ['java', '-jar'] + self.test.fname
1350        elif not self.test.is_cross_built and run_with_mono(self.test.fname[0]):
1351            return ['mono'] + self.test.fname
1352        elif self.test.cmd_is_built and self.test.is_cross_built and self.test.needs_exe_wrapper:
1353            if self.test.exe_runner is None:
1354                # Can not run test on cross compiled executable
1355                # because there is no execute wrapper.
1356                return None
1357            elif self.test.cmd_is_built:
1358                # If the command is not built (ie, its a python script),
1359                # then we don't check for the exe-wrapper
1360                if not self.test.exe_runner.found():
1361                    msg = ('The exe_wrapper defined in the cross file {!r} was not '
1362                           'found. Please check the command and/or add it to PATH.')
1363                    raise TestException(msg.format(self.test.exe_runner.name))
1364                return self.test.exe_runner.get_command() + self.test.fname
1365        return self.test.fname
1366
1367    def _get_cmd(self) -> T.Optional[T.List[str]]:
1368        test_cmd = self._get_test_cmd()
1369        if not test_cmd:
1370            return None
1371        return TestHarness.get_wrapper(self.options) + test_cmd
1372
1373    @property
1374    def is_parallel(self) -> bool:
1375        return self.runobj.is_parallel
1376
1377    @property
1378    def visible_name(self) -> str:
1379        return self.runobj.name
1380
1381    @property
1382    def timeout(self) -> T.Optional[int]:
1383        return self.runobj.timeout
1384
1385    async def run(self, harness: 'TestHarness') -> TestRun:
1386        if self.cmd is None:
1387            skip_stdout = 'Not run because can not execute cross compiled binaries.'
1388            harness.log_start_test(self.runobj)
1389            self.runobj.complete_skip(skip_stdout)
1390        else:
1391            cmd = self.cmd + self.test.cmd_args + self.options.test_args
1392            self.runobj.start(cmd)
1393            harness.log_start_test(self.runobj)
1394            await self._run_cmd(harness, cmd)
1395        return self.runobj
1396
1397    async def _run_subprocess(self, args: T.List[str], *,
1398                              stdout: int, stderr: int,
1399                              env: T.Dict[str, str], cwd: T.Optional[str]) -> TestSubprocess:
1400        # Let gdb handle ^C instead of us
1401        if self.options.gdb:
1402            previous_sigint_handler = signal.getsignal(signal.SIGINT)
1403            # Make the meson executable ignore SIGINT while gdb is running.
1404            signal.signal(signal.SIGINT, signal.SIG_IGN)
1405
1406        def preexec_fn() -> None:
1407            if self.options.gdb:
1408                # Restore the SIGINT handler for the child process to
1409                # ensure it can handle it.
1410                signal.signal(signal.SIGINT, signal.SIG_DFL)
1411            else:
1412                # We don't want setsid() in gdb because gdb needs the
1413                # terminal in order to handle ^C and not show tcsetpgrp()
1414                # errors avoid not being able to use the terminal.
1415                os.setsid()
1416
1417        def postwait_fn() -> None:
1418            if self.options.gdb:
1419                # Let us accept ^C again
1420                signal.signal(signal.SIGINT, previous_sigint_handler)
1421
1422        p = await asyncio.create_subprocess_exec(*args,
1423                                                 stdout=stdout,
1424                                                 stderr=stderr,
1425                                                 env=env,
1426                                                 cwd=cwd,
1427                                                 preexec_fn=preexec_fn if not is_windows() else None)
1428        return TestSubprocess(p, stdout=stdout, stderr=stderr,
1429                              postwait_fn=postwait_fn if not is_windows() else None)
1430
1431    async def _run_cmd(self, harness: 'TestHarness', cmd: T.List[str]) -> None:
1432        if self.console_mode is ConsoleUser.GDB:
1433            stdout = None
1434            stderr = None
1435        else:
1436            stdout = asyncio.subprocess.PIPE
1437            stderr = asyncio.subprocess.STDOUT \
1438                if not self.options.split and not self.runobj.needs_parsing \
1439                else asyncio.subprocess.PIPE
1440
1441        extra_cmd = []  # type: T.List[str]
1442        if self.test.protocol is TestProtocol.GTEST:
1443            gtestname = self.test.name
1444            if self.test.workdir:
1445                gtestname = os.path.join(self.test.workdir, self.test.name)
1446            extra_cmd.append(f'--gtest_output=xml:{gtestname}.xml')
1447
1448        p = await self._run_subprocess(cmd + extra_cmd,
1449                                       stdout=stdout,
1450                                       stderr=stderr,
1451                                       env=self.runobj.env,
1452                                       cwd=self.test.workdir)
1453
1454        parse_task = None
1455        if self.runobj.needs_parsing:
1456            parse_coro = self.runobj.parse(harness, p.stdout_lines(self.console_mode))
1457            parse_task = asyncio.ensure_future(parse_coro)
1458
1459        stdo_task, stde_task = p.communicate(self.console_mode)
1460        returncode, result, additional_error = await p.wait(self.runobj.timeout)
1461
1462        if parse_task is not None:
1463            res, error = await parse_task
1464            if error:
1465                additional_error = join_lines(additional_error, error)
1466            result = result or res
1467
1468        stdo = await stdo_task if stdo_task else ''
1469        stde = await stde_task if stde_task else ''
1470        stde = join_lines(stde, additional_error)
1471        self.runobj.complete(returncode, result, stdo, stde)
1472
1473
1474class TestHarness:
1475    def __init__(self, options: argparse.Namespace):
1476        self.options = options
1477        self.collected_failures = []  # type: T.List[TestRun]
1478        self.fail_count = 0
1479        self.expectedfail_count = 0
1480        self.unexpectedpass_count = 0
1481        self.success_count = 0
1482        self.skip_count = 0
1483        self.timeout_count = 0
1484        self.test_count = 0
1485        self.name_max_len = 0
1486        self.is_run = False
1487        self.loggers = []         # type: T.List[TestLogger]
1488        self.loggers.append(ConsoleLogger())
1489        self.need_console = False
1490
1491        self.logfile_base = None  # type: T.Optional[str]
1492        if self.options.logbase and not self.options.gdb:
1493            namebase = None
1494            self.logfile_base = os.path.join(self.options.wd, 'meson-logs', self.options.logbase)
1495
1496            if self.options.wrapper:
1497                namebase = os.path.basename(self.get_wrapper(self.options)[0])
1498            elif self.options.setup:
1499                namebase = self.options.setup.replace(":", "_")
1500
1501            if namebase:
1502                self.logfile_base += '-' + namebase.replace(' ', '_')
1503
1504        startdir = os.getcwd()
1505        try:
1506            os.chdir(self.options.wd)
1507            self.build_data = build.load(os.getcwd())
1508            if not self.options.setup:
1509                self.options.setup = self.build_data.test_setup_default_name
1510            if self.options.benchmark:
1511                self.tests = self.load_tests('meson_benchmark_setup.dat')
1512            else:
1513                self.tests = self.load_tests('meson_test_setup.dat')
1514        finally:
1515            os.chdir(startdir)
1516
1517        ss = set()
1518        for t in self.tests:
1519            for s in t.suite:
1520                ss.add(s)
1521        self.suites = list(ss)
1522
1523    def load_tests(self, file_name: str) -> T.List[TestSerialisation]:
1524        datafile = Path('meson-private') / file_name
1525        if not datafile.is_file():
1526            raise TestException(f'Directory {self.options.wd!r} does not seem to be a Meson build directory.')
1527        with datafile.open('rb') as f:
1528            objs = check_testdata(pickle.load(f))
1529        return objs
1530
1531    def __enter__(self) -> 'TestHarness':
1532        return self
1533
1534    def __exit__(self, exc_type: T.Any, exc_value: T.Any, traceback: T.Any) -> None:
1535        self.close_logfiles()
1536
1537    def close_logfiles(self) -> None:
1538        for l in self.loggers:
1539            l.close()
1540
1541    def get_test_setup(self, test: T.Optional[TestSerialisation]) -> build.TestSetup:
1542        if ':' in self.options.setup:
1543            if self.options.setup not in self.build_data.test_setups:
1544                sys.exit(f"Unknown test setup '{self.options.setup}'.")
1545            return self.build_data.test_setups[self.options.setup]
1546        else:
1547            full_name = test.project_name + ":" + self.options.setup
1548            if full_name not in self.build_data.test_setups:
1549                sys.exit(f"Test setup '{self.options.setup}' not found from project '{test.project_name}'.")
1550            return self.build_data.test_setups[full_name]
1551
1552    def merge_setup_options(self, options: argparse.Namespace, test: TestSerialisation) -> T.Dict[str, str]:
1553        current = self.get_test_setup(test)
1554        if not options.gdb:
1555            options.gdb = current.gdb
1556        if options.gdb:
1557            options.verbose = True
1558        if options.timeout_multiplier is None:
1559            options.timeout_multiplier = current.timeout_multiplier
1560    #    if options.env is None:
1561    #        options.env = current.env # FIXME, should probably merge options here.
1562        if options.wrapper is None:
1563            options.wrapper = current.exe_wrapper
1564        elif current.exe_wrapper:
1565            sys.exit('Conflict: both test setup and command line specify an exe wrapper.')
1566        return current.env.get_env(os.environ.copy())
1567
1568    def get_test_runner(self, test: TestSerialisation) -> SingleTestRunner:
1569        name = self.get_pretty_suite(test)
1570        options = deepcopy(self.options)
1571        if self.options.setup:
1572            env = self.merge_setup_options(options, test)
1573        else:
1574            env = os.environ.copy()
1575        test_env = test.env.get_env(env)
1576        env.update(test_env)
1577        if (test.is_cross_built and test.needs_exe_wrapper and
1578                test.exe_runner and test.exe_runner.found()):
1579            env['MESON_EXE_WRAPPER'] = join_args(test.exe_runner.get_command())
1580        return SingleTestRunner(test, env, name, options)
1581
1582    def process_test_result(self, result: TestRun) -> None:
1583        if result.res is TestResult.TIMEOUT:
1584            self.timeout_count += 1
1585        elif result.res is TestResult.SKIP:
1586            self.skip_count += 1
1587        elif result.res is TestResult.OK:
1588            self.success_count += 1
1589        elif result.res in {TestResult.FAIL, TestResult.ERROR, TestResult.INTERRUPT}:
1590            self.fail_count += 1
1591        elif result.res is TestResult.EXPECTEDFAIL:
1592            self.expectedfail_count += 1
1593        elif result.res is TestResult.UNEXPECTEDPASS:
1594            self.unexpectedpass_count += 1
1595        else:
1596            sys.exit(f'Unknown test result encountered: {result.res}')
1597
1598        if result.res.is_bad():
1599            self.collected_failures.append(result)
1600        for l in self.loggers:
1601            l.log(self, result)
1602
1603    @property
1604    def numlen(self) -> int:
1605        return len(str(self.test_count))
1606
1607    @property
1608    def max_left_width(self) -> int:
1609        return 2 * self.numlen + 2
1610
1611    def format(self, result: TestRun, colorize: bool,
1612               max_left_width: int = 0,
1613               prefix: str = '',
1614               left: T.Optional[str] = None,
1615               middle: T.Optional[str] = None,
1616               right: T.Optional[str] = None) -> str:
1617
1618        if left is None:
1619            left = '{num:{numlen}}/{testcount} '.format(
1620                numlen=self.numlen,
1621                num=result.num,
1622                testcount=self.test_count)
1623
1624        # A non-default max_left_width lets the logger print more stuff before the
1625        # name, while ensuring that the rightmost columns remain aligned.
1626        max_left_width = max(max_left_width, self.max_left_width)
1627
1628        if middle is None:
1629            middle = result.name
1630        extra_mid_width = max_left_width + self.name_max_len + 1 - uniwidth(middle) - uniwidth(left) - uniwidth(prefix)
1631        middle += ' ' * max(1, extra_mid_width)
1632
1633        if right is None:
1634            right = '{res} {dur:{durlen}.2f}s'.format(
1635                res=result.res.get_text(colorize),
1636                dur=result.duration,
1637                durlen=self.duration_max_len + 3)
1638            detail = result.detail
1639            if detail:
1640                right += '   ' + detail
1641        return prefix + left + middle + right
1642
1643    def summary(self) -> str:
1644        return textwrap.dedent('''
1645
1646            Ok:                 {:<4}
1647            Expected Fail:      {:<4}
1648            Fail:               {:<4}
1649            Unexpected Pass:    {:<4}
1650            Skipped:            {:<4}
1651            Timeout:            {:<4}
1652            ''').format(self.success_count, self.expectedfail_count, self.fail_count,
1653                        self.unexpectedpass_count, self.skip_count, self.timeout_count)
1654
1655    def total_failure_count(self) -> int:
1656        return self.fail_count + self.unexpectedpass_count + self.timeout_count
1657
1658    def doit(self) -> int:
1659        if self.is_run:
1660            raise RuntimeError('Test harness object can only be used once.')
1661        self.is_run = True
1662        tests = self.get_tests()
1663        if not tests:
1664            return 0
1665        if not self.options.no_rebuild and not rebuild_deps(self.options.wd, tests):
1666            # We return 125 here in case the build failed.
1667            # The reason is that exit code 125 tells `git bisect run` that the current
1668            # commit should be skipped.  Thus users can directly use `meson test` to
1669            # bisect without needing to handle the does-not-build case separately in a
1670            # wrapper script.
1671            sys.exit(125)
1672
1673        self.name_max_len = max(uniwidth(self.get_pretty_suite(test)) for test in tests)
1674        startdir = os.getcwd()
1675        try:
1676            os.chdir(self.options.wd)
1677            runners = []             # type: T.List[SingleTestRunner]
1678            for i in range(self.options.repeat):
1679                runners.extend(self.get_test_runner(test) for test in tests)
1680                if i == 0:
1681                    self.duration_max_len = max(len(str(int(runner.timeout or 99)))
1682                                                for runner in runners)
1683                    # Disable the progress report if it gets in the way
1684                    self.need_console = any(runner.console_mode is not ConsoleUser.LOGGER
1685                                            for runner in runners)
1686
1687            self.test_count = len(runners)
1688            self.run_tests(runners)
1689        finally:
1690            os.chdir(startdir)
1691        return self.total_failure_count()
1692
1693    @staticmethod
1694    def split_suite_string(suite: str) -> T.Tuple[str, str]:
1695        if ':' in suite:
1696            split = suite.split(':', 1)
1697            assert len(split) == 2
1698            return split[0], split[1]
1699        else:
1700            return suite, ""
1701
1702    @staticmethod
1703    def test_in_suites(test: TestSerialisation, suites: T.List[str]) -> bool:
1704        for suite in suites:
1705            (prj_match, st_match) = TestHarness.split_suite_string(suite)
1706            for prjst in test.suite:
1707                (prj, st) = TestHarness.split_suite_string(prjst)
1708
1709                # the SUITE can be passed as
1710                #     suite_name
1711                # or
1712                #     project_name:suite_name
1713                # so we need to select only the test belonging to project_name
1714
1715                # this if handle the first case (i.e., SUITE == suite_name)
1716
1717                # in this way we can run tests belonging to different
1718                # (sub)projects which share the same suite_name
1719                if not st_match and st == prj_match:
1720                    return True
1721
1722                # these two conditions are needed to handle the second option
1723                # i.e., SUITE == project_name:suite_name
1724
1725                # in this way we select the only the tests of
1726                # project_name with suite_name
1727                if prj_match and prj != prj_match:
1728                    continue
1729                if st_match and st != st_match:
1730                    continue
1731                return True
1732        return False
1733
1734    def test_suitable(self, test: TestSerialisation) -> bool:
1735        if TestHarness.test_in_suites(test, self.options.exclude_suites):
1736            return False
1737
1738        if self.options.include_suites:
1739            # Both force inclusion (overriding add_test_setup) and exclude
1740            # everything else
1741            return TestHarness.test_in_suites(test, self.options.include_suites)
1742
1743        if self.options.setup:
1744            setup = self.get_test_setup(test)
1745            if TestHarness.test_in_suites(test, setup.exclude_suites):
1746                return False
1747
1748        return True
1749
1750    def tests_from_args(self, tests: T.List[TestSerialisation]) -> T.Generator[TestSerialisation, None, None]:
1751        '''
1752        Allow specifying test names like "meson test foo1 foo2", where test('foo1', ...)
1753
1754        Also support specifying the subproject to run tests from like
1755        "meson test subproj:" (all tests inside subproj) or "meson test subproj:foo1"
1756        to run foo1 inside subproj. Coincidentally also "meson test :foo1" to
1757        run all tests with that name across all subprojects, which is
1758        identical to "meson test foo1"
1759        '''
1760        for arg in self.options.args:
1761            if ':' in arg:
1762                subproj, name = arg.split(':', maxsplit=1)
1763            else:
1764                subproj, name = '', arg
1765            for t in tests:
1766                if subproj and t.project_name != subproj:
1767                    continue
1768                if name and t.name != name:
1769                    continue
1770                yield t
1771
1772    def get_tests(self) -> T.List[TestSerialisation]:
1773        if not self.tests:
1774            print('No tests defined.')
1775            return []
1776
1777        tests = [t for t in self.tests if self.test_suitable(t)]
1778        if self.options.args:
1779            tests = list(self.tests_from_args(tests))
1780
1781        if not tests:
1782            print('No suitable tests defined.')
1783            return []
1784
1785        return tests
1786
1787    def flush_logfiles(self) -> None:
1788        for l in self.loggers:
1789            l.flush()
1790
1791    def open_logfiles(self) -> None:
1792        if not self.logfile_base:
1793            return
1794
1795        self.loggers.append(JunitBuilder(self.logfile_base + '.junit.xml'))
1796        self.loggers.append(JsonLogfileBuilder(self.logfile_base + '.json'))
1797        self.loggers.append(TextLogfileBuilder(self.logfile_base + '.txt', errors='surrogateescape'))
1798
1799    @staticmethod
1800    def get_wrapper(options: argparse.Namespace) -> T.List[str]:
1801        wrap = []  # type: T.List[str]
1802        if options.gdb:
1803            wrap = [options.gdb_path, '--quiet', '--nh']
1804            if options.repeat > 1:
1805                wrap += ['-ex', 'run', '-ex', 'quit']
1806            # Signal the end of arguments to gdb
1807            wrap += ['--args']
1808        if options.wrapper:
1809            wrap += options.wrapper
1810        return wrap
1811
1812    def get_pretty_suite(self, test: TestSerialisation) -> str:
1813        if len(self.suites) > 1 and test.suite:
1814            rv = TestHarness.split_suite_string(test.suite[0])[0]
1815            s = "+".join(TestHarness.split_suite_string(s)[1] for s in test.suite)
1816            if s:
1817                rv += ":"
1818            return rv + s + " / " + test.name
1819        else:
1820            return test.name
1821
1822    def run_tests(self, runners: T.List[SingleTestRunner]) -> None:
1823        try:
1824            self.open_logfiles()
1825            # Replace with asyncio.run once we can require Python 3.7
1826            loop = asyncio.get_event_loop()
1827            loop.run_until_complete(self._run_tests(runners))
1828        finally:
1829            self.close_logfiles()
1830
1831    def log_subtest(self, test: TestRun, s: str, res: TestResult) -> None:
1832        for l in self.loggers:
1833            l.log_subtest(self, test, s, res)
1834
1835    def log_start_test(self, test: TestRun) -> None:
1836        for l in self.loggers:
1837            l.start_test(self, test)
1838
1839    async def _run_tests(self, runners: T.List[SingleTestRunner]) -> None:
1840        semaphore = asyncio.Semaphore(self.options.num_processes)
1841        futures = deque()  # type: T.Deque[asyncio.Future]
1842        running_tests = dict() # type: T.Dict[asyncio.Future, str]
1843        interrupted = False
1844        ctrlc_times = deque(maxlen=MAX_CTRLC) # type: T.Deque[float]
1845
1846        async def run_test(test: SingleTestRunner) -> None:
1847            async with semaphore:
1848                if interrupted or (self.options.repeat > 1 and self.fail_count):
1849                    return
1850                res = await test.run(self)
1851                self.process_test_result(res)
1852
1853        def test_done(f: asyncio.Future) -> None:
1854            if not f.cancelled():
1855                f.result()
1856            futures.remove(f)
1857            try:
1858                del running_tests[f]
1859            except KeyError:
1860                pass
1861
1862        def cancel_one_test(warn: bool) -> None:
1863            future = futures.popleft()
1864            futures.append(future)
1865            if warn:
1866                self.flush_logfiles()
1867                mlog.warning('CTRL-C detected, interrupting {}'.format(running_tests[future]))
1868            del running_tests[future]
1869            future.cancel()
1870
1871        def cancel_all_tests() -> None:
1872            nonlocal interrupted
1873            interrupted = True
1874            while running_tests:
1875                cancel_one_test(False)
1876
1877        def sigterm_handler() -> None:
1878            if interrupted:
1879                return
1880            self.flush_logfiles()
1881            mlog.warning('Received SIGTERM, exiting')
1882            cancel_all_tests()
1883
1884        def sigint_handler() -> None:
1885            # We always pick the longest-running future that has not been cancelled
1886            # If all the tests have been CTRL-C'ed, just stop
1887            nonlocal interrupted
1888            if interrupted:
1889                return
1890            ctrlc_times.append(asyncio.get_event_loop().time())
1891            if len(ctrlc_times) == MAX_CTRLC and ctrlc_times[-1] - ctrlc_times[0] < 1:
1892                self.flush_logfiles()
1893                mlog.warning('CTRL-C detected, exiting')
1894                cancel_all_tests()
1895            elif running_tests:
1896                cancel_one_test(True)
1897            else:
1898                self.flush_logfiles()
1899                mlog.warning('CTRL-C detected, exiting')
1900                interrupted = True
1901
1902        for l in self.loggers:
1903            l.start(self)
1904
1905        if sys.platform != 'win32':
1906            if os.getpgid(0) == os.getpid():
1907                asyncio.get_event_loop().add_signal_handler(signal.SIGINT, sigint_handler)
1908            else:
1909                asyncio.get_event_loop().add_signal_handler(signal.SIGINT, sigterm_handler)
1910            asyncio.get_event_loop().add_signal_handler(signal.SIGTERM, sigterm_handler)
1911        try:
1912            for runner in runners:
1913                if not runner.is_parallel:
1914                    await complete_all(futures)
1915                future = asyncio.ensure_future(run_test(runner))
1916                futures.append(future)
1917                running_tests[future] = runner.visible_name
1918                future.add_done_callback(test_done)
1919                if not runner.is_parallel:
1920                    await complete(future)
1921                if self.options.repeat > 1 and self.fail_count:
1922                    break
1923
1924            await complete_all(futures)
1925        finally:
1926            if sys.platform != 'win32':
1927                asyncio.get_event_loop().remove_signal_handler(signal.SIGINT)
1928                asyncio.get_event_loop().remove_signal_handler(signal.SIGTERM)
1929            for l in self.loggers:
1930                await l.finish(self)
1931
1932def list_tests(th: TestHarness) -> bool:
1933    tests = th.get_tests()
1934    for t in tests:
1935        print(th.get_pretty_suite(t))
1936    return not tests
1937
1938def rebuild_deps(wd: str, tests: T.List[TestSerialisation]) -> bool:
1939    def convert_path_to_target(path: str) -> str:
1940        path = os.path.relpath(path, wd)
1941        if os.sep != '/':
1942            path = path.replace(os.sep, '/')
1943        return path
1944
1945    if not (Path(wd) / 'build.ninja').is_file():
1946        print('Only ninja backend is supported to rebuild tests before running them.')
1947        return True
1948
1949    ninja = environment.detect_ninja()
1950    if not ninja:
1951        print("Can't find ninja, can't rebuild test.")
1952        return False
1953
1954    depends = set()            # type: T.Set[str]
1955    targets = set()            # type: T.Set[str]
1956    intro_targets = dict()     # type: T.Dict[str, T.List[str]]
1957    for target in load_info_file(get_infodir(wd), kind='targets'):
1958        intro_targets[target['id']] = [
1959            convert_path_to_target(f)
1960            for f in target['filename']]
1961    for t in tests:
1962        for d in t.depends:
1963            if d in depends:
1964                continue
1965            depends.update(d)
1966            targets.update(intro_targets[d])
1967
1968    ret = subprocess.run(ninja + ['-C', wd] + sorted(targets)).returncode
1969    if ret != 0:
1970        print(f'Could not rebuild {wd}')
1971        return False
1972
1973    return True
1974
1975def run(options: argparse.Namespace) -> int:
1976    if options.benchmark:
1977        options.num_processes = 1
1978
1979    if options.verbose and options.quiet:
1980        print('Can not be both quiet and verbose at the same time.')
1981        return 1
1982
1983    check_bin = None
1984    if options.gdb:
1985        options.verbose = True
1986        if options.wrapper:
1987            print('Must not specify both a wrapper and gdb at the same time.')
1988            return 1
1989        check_bin = 'gdb'
1990
1991    if options.wrapper:
1992        check_bin = options.wrapper[0]
1993
1994    if sys.platform == 'win32':
1995        loop = asyncio.ProactorEventLoop()
1996        asyncio.set_event_loop(loop)
1997
1998    if check_bin is not None:
1999        exe = ExternalProgram(check_bin, silent=True)
2000        if not exe.found():
2001            print(f'Could not find requested program: {check_bin!r}')
2002            return 1
2003
2004    b = build.load(options.wd)
2005    setup_vsenv(b.need_vsenv)
2006
2007    with TestHarness(options) as th:
2008        try:
2009            if options.list:
2010                return list_tests(th)
2011            return th.doit()
2012        except TestException as e:
2013            print('Meson test encountered an error:\n')
2014            if os.environ.get('MESON_FORCE_BACKTRACE'):
2015                raise e
2016            else:
2017                print(e)
2018            return 1
2019
2020def run_with_args(args: T.List[str]) -> int:
2021    parser = argparse.ArgumentParser(prog='meson test')
2022    add_arguments(parser)
2023    options = parser.parse_args(args)
2024    return run(options)
2025