1# Copyright 2016-2017 The Meson development team
2
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6
7#     http://www.apache.org/licenses/LICENSE-2.0
8
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15# A tool to run tests in many different ways.
16
17from pathlib import Path
18from collections import deque
19from copy import deepcopy
20import argparse
21import asyncio
22import datetime
23import enum
24import json
25import multiprocessing
26import os
27import pickle
28import platform
29import random
30import re
31import signal
32import subprocess
33import shlex
34import sys
35import textwrap
36import time
37import typing as T
38import unicodedata
39import xml.etree.ElementTree as et
40
41from . import build
42from . import environment
43from . import mlog
44from .coredata import major_versions_differ, MesonVersionMismatchException
45from .coredata import version as coredata_version
46from .mesonlib import (MesonException, OrderedSet, RealPathAction,
47                       get_wine_shortpath, join_args, split_args)
48from .mintro import get_infodir, load_info_file
49from .programs import ExternalProgram
50from .backend.backends import TestProtocol, TestSerialisation
51
52# GNU autotools interprets a return code of 77 from tests it executes to
53# mean that the test should be skipped.
54GNU_SKIP_RETURNCODE = 77
55
56# GNU autotools interprets a return code of 99 from tests it executes to
57# mean that the test failed even before testing what it is supposed to test.
58GNU_ERROR_RETURNCODE = 99
59
60# Exit if 3 Ctrl-C's are received within one second
61MAX_CTRLC = 3
62
63def is_windows() -> bool:
64    platname = platform.system().lower()
65    return platname == 'windows'
66
67def is_cygwin() -> bool:
68    return sys.platform == 'cygwin'
69
70UNIWIDTH_MAPPING = {'F': 2, 'H': 1, 'W': 2, 'Na': 1, 'N': 1, 'A': 1}
71def uniwidth(s: str) -> int:
72    result = 0
73    for c in s:
74        w = unicodedata.east_asian_width(c)
75        result += UNIWIDTH_MAPPING[w]
76    return result
77
78def determine_worker_count() -> int:
79    varname = 'MESON_TESTTHREADS'
80    if varname in os.environ:
81        try:
82            num_workers = int(os.environ[varname])
83        except ValueError:
84            print(f'Invalid value in {varname}, using 1 thread.')
85            num_workers = 1
86    else:
87        try:
88            # Fails in some weird environments such as Debian
89            # reproducible build.
90            num_workers = multiprocessing.cpu_count()
91        except Exception:
92            num_workers = 1
93    return num_workers
94
95def add_arguments(parser: argparse.ArgumentParser) -> None:
96    parser.add_argument('--repeat', default=1, dest='repeat', type=int,
97                        help='Number of times to run the tests.')
98    parser.add_argument('--no-rebuild', default=False, action='store_true',
99                        help='Do not rebuild before running tests.')
100    parser.add_argument('--gdb', default=False, dest='gdb', action='store_true',
101                        help='Run test under gdb.')
102    parser.add_argument('--gdb-path', default='gdb', dest='gdb_path',
103                        help='Path to the gdb binary (default: gdb).')
104    parser.add_argument('--list', default=False, dest='list', action='store_true',
105                        help='List available tests.')
106    parser.add_argument('--wrapper', default=None, dest='wrapper', type=split_args,
107                        help='wrapper to run tests with (e.g. Valgrind)')
108    parser.add_argument('-C', dest='wd', action=RealPathAction,
109                        # https://github.com/python/typeshed/issues/3107
110                        # https://github.com/python/mypy/issues/7177
111                        type=os.path.abspath,  # type: ignore
112                        help='directory to cd into before running')
113    parser.add_argument('--suite', default=[], dest='include_suites', action='append', metavar='SUITE',
114                        help='Only run tests belonging to the given suite.')
115    parser.add_argument('--no-suite', default=[], dest='exclude_suites', action='append', metavar='SUITE',
116                        help='Do not run tests belonging to the given suite.')
117    parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false',
118                        help='Do not split stderr and stdout in test logs.')
119    parser.add_argument('--print-errorlogs', default=False, action='store_true',
120                        help="Whether to print failing tests' logs.")
121    parser.add_argument('--benchmark', default=False, action='store_true',
122                        help="Run benchmarks instead of tests.")
123    parser.add_argument('--logbase', default='testlog',
124                        help="Base name for log file.")
125    parser.add_argument('--num-processes', default=determine_worker_count(), type=int,
126                        help='How many parallel processes to use.')
127    parser.add_argument('-v', '--verbose', default=False, action='store_true',
128                        help='Do not redirect stdout and stderr')
129    parser.add_argument('-q', '--quiet', default=False, action='store_true',
130                        help='Produce less output to the terminal.')
131    parser.add_argument('-t', '--timeout-multiplier', type=float, default=None,
132                        help='Define a multiplier for test timeout, for example '
133                        ' when running tests in particular conditions they might take'
134                        ' more time to execute. (<= 0 to disable timeout)')
135    parser.add_argument('--setup', default=None, dest='setup',
136                        help='Which test setup to use.')
137    parser.add_argument('--test-args', default=[], type=split_args,
138                        help='Arguments to pass to the specified test(s) or all tests')
139    parser.add_argument('args', nargs='*',
140                        help='Optional list of test names to run. "testname" to run all tests with that name, '
141                        '"subprojname:testname" to specifically run "testname" from "subprojname", '
142                        '"subprojname:" to run all tests defined by "subprojname".')
143
144
145def print_safe(s: str) -> None:
146    end = '' if s[-1] == '\n' else '\n'
147    try:
148        print(s, end=end)
149    except UnicodeEncodeError:
150        s = s.encode('ascii', errors='backslashreplace').decode('ascii')
151        print(s, end=end)
152
153def join_lines(a: str, b: str) -> str:
154    if not a:
155        return b
156    if not b:
157        return a
158    return a + '\n' + b
159
160def dashes(s: str, dash: str, cols: int) -> str:
161    if not s:
162        return dash * cols
163    s = ' ' + s + ' '
164    width = uniwidth(s)
165    first = (cols - width) // 2
166    s = dash * first + s
167    return s + dash * (cols - first - width)
168
169def returncode_to_status(retcode: int) -> str:
170    # Note: We can't use `os.WIFSIGNALED(result.returncode)` and the related
171    # functions here because the status returned by subprocess is munged. It
172    # returns a negative value if the process was killed by a signal rather than
173    # the raw status returned by `wait()`. Also, If a shell sits between Meson
174    # the the actual unit test that shell is likely to convert a termination due
175    # to a signal into an exit status of 128 plus the signal number.
176    if retcode < 0:
177        signum = -retcode
178        try:
179            signame = signal.Signals(signum).name
180        except ValueError:
181            signame = 'SIGinvalid'
182        return f'killed by signal {signum} {signame}'
183
184    if retcode <= 128:
185        return f'exit status {retcode}'
186
187    signum = retcode - 128
188    try:
189        signame = signal.Signals(signum).name
190    except ValueError:
191        signame = 'SIGinvalid'
192    return f'(exit status {retcode} or signal {signum} {signame})'
193
194# TODO for Windows
195sh_quote: T.Callable[[str], str] = lambda x: x
196if not is_windows():
197    sh_quote = shlex.quote
198
199def env_tuple_to_str(env: T.Iterable[T.Tuple[str, str]]) -> str:
200    return ''.join(["{}={} ".format(k, sh_quote(v)) for k, v in env])
201
202
203class TestException(MesonException):
204    pass
205
206
207@enum.unique
208class ConsoleUser(enum.Enum):
209
210    # the logger can use the console
211    LOGGER = 0
212
213    # the console is used by gdb
214    GDB = 1
215
216    # the console is used to write stdout/stderr
217    STDOUT = 2
218
219
220@enum.unique
221class TestResult(enum.Enum):
222
223    PENDING = 'PENDING'
224    RUNNING = 'RUNNING'
225    OK = 'OK'
226    TIMEOUT = 'TIMEOUT'
227    INTERRUPT = 'INTERRUPT'
228    SKIP = 'SKIP'
229    FAIL = 'FAIL'
230    EXPECTEDFAIL = 'EXPECTEDFAIL'
231    UNEXPECTEDPASS = 'UNEXPECTEDPASS'
232    ERROR = 'ERROR'
233
234    @staticmethod
235    def maxlen() -> int:
236        return 14 # len(UNEXPECTEDPASS)
237
238    def is_ok(self) -> bool:
239        return self in {TestResult.OK, TestResult.EXPECTEDFAIL}
240
241    def is_bad(self) -> bool:
242        return self in {TestResult.FAIL, TestResult.TIMEOUT, TestResult.INTERRUPT,
243                        TestResult.UNEXPECTEDPASS, TestResult.ERROR}
244
245    def is_finished(self) -> bool:
246        return self not in {TestResult.PENDING, TestResult.RUNNING}
247
248    def was_killed(self) -> bool:
249        return self in (TestResult.TIMEOUT, TestResult.INTERRUPT)
250
251    def colorize(self, s: str) -> mlog.AnsiDecorator:
252        if self.is_bad():
253            decorator = mlog.red
254        elif self in (TestResult.SKIP, TestResult.EXPECTEDFAIL):
255            decorator = mlog.yellow
256        elif self.is_finished():
257            decorator = mlog.green
258        else:
259            decorator = mlog.blue
260        return decorator(s)
261
262    def get_text(self, colorize: bool) -> str:
263        result_str = '{res:{reslen}}'.format(res=self.value, reslen=self.maxlen())
264        return self.colorize(result_str).get_text(colorize)
265
266    def get_command_marker(self) -> str:
267        return str(self.colorize('>>> '))
268
269
270TYPE_TAPResult = T.Union['TAPParser.Test', 'TAPParser.Error', 'TAPParser.Version', 'TAPParser.Plan', 'TAPParser.Bailout']
271
272class TAPParser:
273    class Plan(T.NamedTuple):
274        num_tests: int
275        late: bool
276        skipped: bool
277        explanation: T.Optional[str]
278
279    class Bailout(T.NamedTuple):
280        message: str
281
282    class Test(T.NamedTuple):
283        number: int
284        name: str
285        result: TestResult
286        explanation: T.Optional[str]
287
288        def __str__(self) -> str:
289            return f'{self.number} {self.name}'.strip()
290
291    class Error(T.NamedTuple):
292        message: str
293
294    class Version(T.NamedTuple):
295        version: int
296
297    _MAIN = 1
298    _AFTER_TEST = 2
299    _YAML = 3
300
301    _RE_BAILOUT = re.compile(r'Bail out!\s*(.*)')
302    _RE_DIRECTIVE = re.compile(r'(?:\s*\#\s*([Ss][Kk][Ii][Pp]\S*|[Tt][Oo][Dd][Oo])\b\s*(.*))?')
303    _RE_PLAN = re.compile(r'1\.\.([0-9]+)' + _RE_DIRECTIVE.pattern)
304    _RE_TEST = re.compile(r'((?:not )?ok)\s*(?:([0-9]+)\s*)?([^#]*)' + _RE_DIRECTIVE.pattern)
305    _RE_VERSION = re.compile(r'TAP version ([0-9]+)')
306    _RE_YAML_START = re.compile(r'(\s+)---.*')
307    _RE_YAML_END = re.compile(r'\s+\.\.\.\s*')
308
309    found_late_test = False
310    bailed_out = False
311    plan: T.Optional[Plan] = None
312    lineno = 0
313    num_tests = 0
314    yaml_lineno: T.Optional[int] = None
315    yaml_indent = ''
316    state = _MAIN
317    version = 12
318
319    def parse_test(self, ok: bool, num: int, name: str, directive: T.Optional[str], explanation: T.Optional[str]) -> \
320            T.Generator[T.Union['TAPParser.Test', 'TAPParser.Error'], None, None]:
321        name = name.strip()
322        explanation = explanation.strip() if explanation else None
323        if directive is not None:
324            directive = directive.upper()
325            if directive.startswith('SKIP'):
326                if ok:
327                    yield self.Test(num, name, TestResult.SKIP, explanation)
328                    return
329            elif directive == 'TODO':
330                yield self.Test(num, name, TestResult.UNEXPECTEDPASS if ok else TestResult.EXPECTEDFAIL, explanation)
331                return
332            else:
333                yield self.Error(f'invalid directive "{directive}"')
334
335        yield self.Test(num, name, TestResult.OK if ok else TestResult.FAIL, explanation)
336
337    async def parse_async(self, lines: T.AsyncIterator[str]) -> T.AsyncIterator[TYPE_TAPResult]:
338        async for line in lines:
339            for event in self.parse_line(line):
340                yield event
341        for event in self.parse_line(None):
342            yield event
343
344    def parse(self, io: T.Iterator[str]) -> T.Iterator[TYPE_TAPResult]:
345        for line in io:
346            yield from self.parse_line(line)
347        yield from self.parse_line(None)
348
349    def parse_line(self, line: T.Optional[str]) -> T.Iterator[TYPE_TAPResult]:
350        if line is not None:
351            self.lineno += 1
352            line = line.rstrip()
353
354            # YAML blocks are only accepted after a test
355            if self.state == self._AFTER_TEST:
356                if self.version >= 13:
357                    m = self._RE_YAML_START.match(line)
358                    if m:
359                        self.state = self._YAML
360                        self.yaml_lineno = self.lineno
361                        self.yaml_indent = m.group(1)
362                        return
363                self.state = self._MAIN
364
365            elif self.state == self._YAML:
366                if self._RE_YAML_END.match(line):
367                    self.state = self._MAIN
368                    return
369                if line.startswith(self.yaml_indent):
370                    return
371                yield self.Error(f'YAML block not terminated (started on line {self.yaml_lineno})')
372                self.state = self._MAIN
373
374            assert self.state == self._MAIN
375            if line.startswith('#'):
376                return
377
378            m = self._RE_TEST.match(line)
379            if m:
380                if self.plan and self.plan.late and not self.found_late_test:
381                    yield self.Error('unexpected test after late plan')
382                    self.found_late_test = True
383                self.num_tests += 1
384                num = self.num_tests if m.group(2) is None else int(m.group(2))
385                if num != self.num_tests:
386                    yield self.Error('out of order test numbers')
387                yield from self.parse_test(m.group(1) == 'ok', num,
388                                           m.group(3), m.group(4), m.group(5))
389                self.state = self._AFTER_TEST
390                return
391
392            m = self._RE_PLAN.match(line)
393            if m:
394                if self.plan:
395                    yield self.Error('more than one plan found')
396                else:
397                    num_tests = int(m.group(1))
398                    skipped = (num_tests == 0)
399                    if m.group(2):
400                        if m.group(2).upper().startswith('SKIP'):
401                            if num_tests > 0:
402                                yield self.Error('invalid SKIP directive for plan')
403                            skipped = True
404                        else:
405                            yield self.Error('invalid directive for plan')
406                    self.plan = self.Plan(num_tests=num_tests, late=(self.num_tests > 0),
407                                          skipped=skipped, explanation=m.group(3))
408                    yield self.plan
409                return
410
411            m = self._RE_BAILOUT.match(line)
412            if m:
413                yield self.Bailout(m.group(1))
414                self.bailed_out = True
415                return
416
417            m = self._RE_VERSION.match(line)
418            if m:
419                # The TAP version is only accepted as the first line
420                if self.lineno != 1:
421                    yield self.Error('version number must be on the first line')
422                    return
423                self.version = int(m.group(1))
424                if self.version < 13:
425                    yield self.Error('version number should be at least 13')
426                else:
427                    yield self.Version(version=self.version)
428                return
429
430            if not line:
431                return
432
433            yield self.Error('unexpected input at line {}'.format((self.lineno,)))
434        else:
435            # end of file
436            if self.state == self._YAML:
437                yield self.Error(f'YAML block not terminated (started on line {self.yaml_lineno})')
438
439            if not self.bailed_out and self.plan and self.num_tests != self.plan.num_tests:
440                if self.num_tests < self.plan.num_tests:
441                    yield self.Error(f'Too few tests run (expected {self.plan.num_tests}, got {self.num_tests})')
442                else:
443                    yield self.Error(f'Too many tests run (expected {self.plan.num_tests}, got {self.num_tests})')
444
445class TestLogger:
446    def flush(self) -> None:
447        pass
448
449    def start(self, harness: 'TestHarness') -> None:
450        pass
451
452    def start_test(self, harness: 'TestHarness', test: 'TestRun') -> None:
453        pass
454
455    def log_subtest(self, harness: 'TestHarness', test: 'TestRun', s: str, res: TestResult) -> None:
456        pass
457
458    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
459        pass
460
461    async def finish(self, harness: 'TestHarness') -> None:
462        pass
463
464    def close(self) -> None:
465        pass
466
467
468class TestFileLogger(TestLogger):
469    def __init__(self, filename: str, errors: str = 'replace') -> None:
470        self.filename = filename
471        self.file = open(filename, 'w', encoding='utf-8', errors=errors)
472
473    def close(self) -> None:
474        if self.file:
475            self.file.close()
476            self.file = None
477
478
479class ConsoleLogger(TestLogger):
480    SPINNER = "\U0001f311\U0001f312\U0001f313\U0001f314" + \
481              "\U0001f315\U0001f316\U0001f317\U0001f318"
482
483    SCISSORS = "\u2700 "
484    HLINE = "\u2015"
485    RTRI = "\u25B6 "
486
487    def __init__(self) -> None:
488        self.update = asyncio.Event()
489        self.running_tests = OrderedSet()  # type: OrderedSet['TestRun']
490        self.progress_test = None          # type: T.Optional['TestRun']
491        self.progress_task = None          # type: T.Optional[asyncio.Future]
492        self.max_left_width = 0            # type: int
493        self.stop = False
494        self.update = asyncio.Event()
495        self.should_erase_line = ''
496        self.test_count = 0
497        self.started_tests = 0
498        self.spinner_index = 0
499        try:
500            self.cols, _ = os.get_terminal_size(1)
501            self.is_tty = True
502        except OSError:
503            self.cols = 80
504            self.is_tty = False
505
506        self.output_start = dashes(self.SCISSORS, self.HLINE, self.cols - 2)
507        self.output_end = dashes('', self.HLINE, self.cols - 2)
508        self.sub = self.RTRI
509        try:
510            self.output_start.encode(sys.stdout.encoding or 'ascii')
511        except UnicodeEncodeError:
512            self.output_start = dashes('8<', '-', self.cols - 2)
513            self.output_end = dashes('', '-', self.cols - 2)
514            self.sub = '| '
515
516    def flush(self) -> None:
517        if self.should_erase_line:
518            print(self.should_erase_line, end='')
519            self.should_erase_line = ''
520
521    def print_progress(self, line: str) -> None:
522        print(self.should_erase_line, line, sep='', end='\r')
523        self.should_erase_line = '\x1b[K'
524
525    def request_update(self) -> None:
526        self.update.set()
527
528    def emit_progress(self, harness: 'TestHarness') -> None:
529        if self.progress_test is None:
530            self.flush()
531            return
532
533        if len(self.running_tests) == 1:
534            count = f'{self.started_tests}/{self.test_count}'
535        else:
536            count = '{}-{}/{}'.format(self.started_tests - len(self.running_tests) + 1,
537                                      self.started_tests, self.test_count)
538
539        left = '[{}] {} '.format(count, self.SPINNER[self.spinner_index])
540        self.spinner_index = (self.spinner_index + 1) % len(self.SPINNER)
541
542        right = '{spaces} {dur:{durlen}}'.format(
543            spaces=' ' * TestResult.maxlen(),
544            dur=int(time.time() - self.progress_test.starttime),
545            durlen=harness.duration_max_len)
546        if self.progress_test.timeout:
547            right += '/{timeout:{durlen}}'.format(
548                timeout=self.progress_test.timeout,
549                durlen=harness.duration_max_len)
550        right += 's'
551        detail = self.progress_test.detail
552        if detail:
553            right += '   ' + detail
554
555        line = harness.format(self.progress_test, colorize=True,
556                              max_left_width=self.max_left_width,
557                              left=left, right=right)
558        self.print_progress(line)
559
560    def start(self, harness: 'TestHarness') -> None:
561        async def report_progress() -> None:
562            loop = asyncio.get_event_loop()
563            next_update = 0.0
564            self.request_update()
565            while not self.stop:
566                await self.update.wait()
567                self.update.clear()
568
569                # We may get here simply because the progress line has been
570                # overwritten, so do not always switch.  Only do so every
571                # second, or if the printed test has finished
572                if loop.time() >= next_update:
573                    self.progress_test = None
574                    next_update = loop.time() + 1
575                    loop.call_at(next_update, self.request_update)
576
577                if (self.progress_test and
578                        self.progress_test.res is not TestResult.RUNNING):
579                    self.progress_test = None
580
581                if not self.progress_test:
582                    if not self.running_tests:
583                        continue
584                    # Pick a test in round robin order
585                    self.progress_test = self.running_tests.pop(last=False)
586                    self.running_tests.add(self.progress_test)
587
588                self.emit_progress(harness)
589            self.flush()
590
591        self.test_count = harness.test_count
592        self.cols = max(self.cols, harness.max_left_width + 30)
593
594        if self.is_tty and not harness.need_console:
595            # Account for "[aa-bb/cc] OO " in the progress report
596            self.max_left_width = 3 * len(str(self.test_count)) + 8
597            self.progress_task = asyncio.ensure_future(report_progress())
598
599    def start_test(self, harness: 'TestHarness', test: 'TestRun') -> None:
600        if harness.options.verbose and test.cmdline:
601            self.flush()
602            print(harness.format(test, mlog.colorize_console(),
603                                 max_left_width=self.max_left_width,
604                                 right=test.res.get_text(mlog.colorize_console())))
605            print(test.res.get_command_marker() + test.cmdline)
606            if test.needs_parsing:
607                pass
608            elif not test.is_parallel:
609                print(self.output_start, flush=True)
610            else:
611                print(flush=True)
612
613        self.started_tests += 1
614        self.running_tests.add(test)
615        self.running_tests.move_to_end(test, last=False)
616        self.request_update()
617
618    def shorten_log(self, harness: 'TestHarness', result: 'TestRun') -> str:
619        if not harness.options.verbose and not harness.options.print_errorlogs:
620            return ''
621
622        log = result.get_log(mlog.colorize_console(),
623                             stderr_only=result.needs_parsing)
624        if harness.options.verbose:
625            return log
626
627        lines = log.splitlines()
628        if len(lines) < 100:
629            return log
630        else:
631            return str(mlog.bold('Listing only the last 100 lines from a long log.\n')) + '\n'.join(lines[-100:])
632
633    def print_log(self, harness: 'TestHarness', result: 'TestRun') -> None:
634        if not harness.options.verbose:
635            cmdline = result.cmdline
636            if not cmdline:
637                print(result.res.get_command_marker() + result.stdo)
638                return
639            print(result.res.get_command_marker() + cmdline)
640
641        log = self.shorten_log(harness, result)
642        if log:
643            print(self.output_start)
644            print_safe(log)
645            print(self.output_end)
646
647    def log_subtest(self, harness: 'TestHarness', test: 'TestRun', s: str, result: TestResult) -> None:
648        if harness.options.verbose or (harness.options.print_errorlogs and result.is_bad()):
649            self.flush()
650            print(harness.format(test, mlog.colorize_console(), max_left_width=self.max_left_width,
651                                 prefix=self.sub,
652                                 middle=s,
653                                 right=result.get_text(mlog.colorize_console())), flush=True)
654
655            self.request_update()
656
657    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
658        self.running_tests.remove(result)
659        if result.res is TestResult.TIMEOUT and harness.options.verbose:
660            self.flush()
661            print(f'{result.name} time out (After {result.timeout} seconds)')
662
663        if not harness.options.quiet or not result.res.is_ok():
664            self.flush()
665            if harness.options.verbose and not result.is_parallel and result.cmdline:
666                if not result.needs_parsing:
667                    print(self.output_end)
668                print(harness.format(result, mlog.colorize_console(), max_left_width=self.max_left_width))
669            else:
670                print(harness.format(result, mlog.colorize_console(), max_left_width=self.max_left_width),
671                      flush=True)
672                if harness.options.verbose or result.res.is_bad():
673                    self.print_log(harness, result)
674            if harness.options.verbose or result.res.is_bad():
675                print(flush=True)
676
677        self.request_update()
678
679    async def finish(self, harness: 'TestHarness') -> None:
680        self.stop = True
681        self.request_update()
682        if self.progress_task:
683            await self.progress_task
684
685        if harness.collected_failures and \
686                (harness.options.print_errorlogs or harness.options.verbose):
687            print("\nSummary of Failures:\n")
688            for i, result in enumerate(harness.collected_failures, 1):
689                print(harness.format(result, mlog.colorize_console()))
690
691        print(harness.summary())
692
693
694class TextLogfileBuilder(TestFileLogger):
695    def start(self, harness: 'TestHarness') -> None:
696        self.file.write(f'Log of Meson test suite run on {datetime.datetime.now().isoformat()}\n\n')
697        inherit_env = env_tuple_to_str(os.environ.items())
698        self.file.write(f'Inherited environment: {inherit_env}\n\n')
699
700    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
701        self.file.write(harness.format(result, False) + '\n')
702        cmdline = result.cmdline
703        if cmdline:
704            starttime_str = time.strftime("%H:%M:%S", time.gmtime(result.starttime))
705            self.file.write(starttime_str + ' ' + cmdline + '\n')
706            self.file.write(dashes('output', '-', 78) + '\n')
707            self.file.write(result.get_log())
708            self.file.write(dashes('', '-', 78) + '\n\n')
709
710    async def finish(self, harness: 'TestHarness') -> None:
711        if harness.collected_failures:
712            self.file.write("\nSummary of Failures:\n\n")
713            for i, result in enumerate(harness.collected_failures, 1):
714                self.file.write(harness.format(result, False) + '\n')
715        self.file.write(harness.summary())
716
717        print(f'Full log written to {self.filename}')
718
719
720class JsonLogfileBuilder(TestFileLogger):
721    def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
722        jresult = {'name': result.name,
723                   'stdout': result.stdo,
724                   'result': result.res.value,
725                   'starttime': result.starttime,
726                   'duration': result.duration,
727                   'returncode': result.returncode,
728                   'env': result.env,
729                   'command': result.cmd}  # type: T.Dict[str, T.Any]
730        if result.stde:
731            jresult['stderr'] = result.stde
732        self.file.write(json.dumps(jresult) + '\n')
733
734
735class JunitBuilder(TestLogger):
736
737    """Builder for Junit test results.
738
739    Junit is impossible to stream out, it requires attributes counting the
740    total number of tests, failures, skips, and errors in the root element
741    and in each test suite. As such, we use a builder class to track each
742    test case, and calculate all metadata before writing it out.
743
744    For tests with multiple results (like from a TAP test), we record the
745    test as a suite with the project_name.test_name. This allows us to track
746    each result separately. For tests with only one result (such as exit-code
747    tests) we record each one into a suite with the name project_name. The use
748    of the project_name allows us to sort subproject tests separately from
749    the root project.
750    """
751
752    def __init__(self, filename: str) -> None:
753        self.filename = filename
754        self.root = et.Element(
755            'testsuites', tests='0', errors='0', failures='0')
756        self.suites = {}  # type: T.Dict[str, et.Element]
757
758    def log(self, harness: 'TestHarness', test: 'TestRun') -> None:
759        """Log a single test case."""
760        if test.junit is not None:
761            for suite in test.junit.findall('.//testsuite'):
762                # Assume that we don't need to merge anything here...
763                suite.attrib['name'] = '{}.{}.{}'.format(test.project, test.name, suite.attrib['name'])
764
765                # GTest can inject invalid attributes
766                for case in suite.findall('.//testcase[@result]'):
767                    del case.attrib['result']
768                for case in suite.findall('.//testcase[@timestamp]'):
769                    del case.attrib['timestamp']
770                self.root.append(suite)
771            return
772
773        # In this case we have a test binary with multiple results.
774        # We want to record this so that each result is recorded
775        # separately
776        if test.results:
777            suitename = f'{test.project}.{test.name}'
778            assert suitename not in self.suites or harness.options.repeat > 1, 'duplicate suite'
779
780            suite = self.suites[suitename] = et.Element(
781                'testsuite',
782                name=suitename,
783                tests=str(len(test.results)),
784                errors=str(sum(1 for r in test.results if r.result in
785                               {TestResult.INTERRUPT, TestResult.ERROR})),
786                failures=str(sum(1 for r in test.results if r.result in
787                                 {TestResult.FAIL, TestResult.UNEXPECTEDPASS, TestResult.TIMEOUT})),
788                skipped=str(sum(1 for r in test.results if r.result is TestResult.SKIP)),
789                time=str(test.duration),
790            )
791
792            for subtest in test.results:
793                # Both name and classname are required. Use the suite name as
794                # the class name, so that e.g. GitLab groups testcases correctly.
795                testcase = et.SubElement(suite, 'testcase', name=str(subtest), classname=suitename)
796                if subtest.result is TestResult.SKIP:
797                    et.SubElement(testcase, 'skipped')
798                elif subtest.result is TestResult.ERROR:
799                    et.SubElement(testcase, 'error')
800                elif subtest.result is TestResult.FAIL:
801                    et.SubElement(testcase, 'failure')
802                elif subtest.result is TestResult.UNEXPECTEDPASS:
803                    fail = et.SubElement(testcase, 'failure')
804                    fail.text = 'Test unexpected passed.'
805                elif subtest.result is TestResult.INTERRUPT:
806                    fail = et.SubElement(testcase, 'error')
807                    fail.text = 'Test was interrupted by user.'
808                elif subtest.result is TestResult.TIMEOUT:
809                    fail = et.SubElement(testcase, 'error')
810                    fail.text = 'Test did not finish before configured timeout.'
811                if subtest.explanation:
812                    et.SubElement(testcase, 'system-out').text = subtest.explanation
813            if test.stdo:
814                out = et.SubElement(suite, 'system-out')
815                out.text = test.stdo.rstrip()
816            if test.stde:
817                err = et.SubElement(suite, 'system-err')
818                err.text = test.stde.rstrip()
819        else:
820            if test.project not in self.suites:
821                suite = self.suites[test.project] = et.Element(
822                    'testsuite', name=test.project, tests='1', errors='0',
823                    failures='0', skipped='0', time=str(test.duration))
824            else:
825                suite = self.suites[test.project]
826                suite.attrib['tests'] = str(int(suite.attrib['tests']) + 1)
827
828            testcase = et.SubElement(suite, 'testcase', name=test.name,
829                                     classname=test.project, time=str(test.duration))
830            if test.res is TestResult.SKIP:
831                et.SubElement(testcase, 'skipped')
832                suite.attrib['skipped'] = str(int(suite.attrib['skipped']) + 1)
833            elif test.res is TestResult.ERROR:
834                et.SubElement(testcase, 'error')
835                suite.attrib['errors'] = str(int(suite.attrib['errors']) + 1)
836            elif test.res is TestResult.FAIL:
837                et.SubElement(testcase, 'failure')
838                suite.attrib['failures'] = str(int(suite.attrib['failures']) + 1)
839            if test.stdo:
840                out = et.SubElement(testcase, 'system-out')
841                out.text = test.stdo.rstrip()
842            if test.stde:
843                err = et.SubElement(testcase, 'system-err')
844                err.text = test.stde.rstrip()
845
846    async def finish(self, harness: 'TestHarness') -> None:
847        """Calculate total test counts and write out the xml result."""
848        for suite in self.suites.values():
849            self.root.append(suite)
850            # Skipped is really not allowed in the "testsuits" element
851            for attr in ['tests', 'errors', 'failures']:
852                self.root.attrib[attr] = str(int(self.root.attrib[attr]) + int(suite.attrib[attr]))
853
854        tree = et.ElementTree(self.root)
855        with open(self.filename, 'wb') as f:
856            tree.write(f, encoding='utf-8', xml_declaration=True)
857
858
859class TestRun:
860    TEST_NUM = 0
861    PROTOCOL_TO_CLASS: T.Dict[TestProtocol, T.Type['TestRun']] = {}
862
863    def __new__(cls, test: TestSerialisation, *args: T.Any, **kwargs: T.Any) -> T.Any:
864        return super().__new__(TestRun.PROTOCOL_TO_CLASS[test.protocol])
865
866    def __init__(self, test: TestSerialisation, test_env: T.Dict[str, str],
867                 name: str, timeout: T.Optional[int], is_parallel: bool):
868        self.res = TestResult.PENDING
869        self.test = test
870        self._num = None       # type: T.Optional[int]
871        self.name = name
872        self.timeout = timeout
873        self.results = list()  # type: T.List[TAPParser.Test]
874        self.returncode = 0
875        self.starttime = None  # type: T.Optional[float]
876        self.duration = None   # type: T.Optional[float]
877        self.stdo = None       # type: T.Optional[str]
878        self.stde = None       # type: T.Optional[str]
879        self.cmd = None        # type: T.Optional[T.List[str]]
880        self.env = test_env    # type: T.Dict[str, str]
881        self.should_fail = test.should_fail
882        self.project = test.project_name
883        self.junit = None      # type: T.Optional[et.ElementTree]
884        self.is_parallel = is_parallel
885
886    def start(self, cmd: T.List[str]) -> None:
887        self.res = TestResult.RUNNING
888        self.starttime = time.time()
889        self.cmd = cmd
890
891    @property
892    def num(self) -> int:
893        if self._num is None:
894            TestRun.TEST_NUM += 1
895            self._num = TestRun.TEST_NUM
896        return self._num
897
898    @property
899    def detail(self) -> str:
900        if self.res is TestResult.PENDING:
901            return ''
902        if self.returncode:
903            return returncode_to_status(self.returncode)
904        if self.results:
905            # running or succeeded
906            passed = sum(x.result.is_ok() for x in self.results)
907            ran = sum(x.result is not TestResult.SKIP for x in self.results)
908            if passed == ran:
909                return f'{passed} subtests passed'
910            else:
911                return f'{passed}/{ran} subtests passed'
912        return ''
913
914    def _complete(self, returncode: int, res: TestResult,
915                  stdo: T.Optional[str], stde: T.Optional[str]) -> None:
916        assert isinstance(res, TestResult)
917        if self.should_fail and res in (TestResult.OK, TestResult.FAIL):
918            res = TestResult.UNEXPECTEDPASS if res.is_ok() else TestResult.EXPECTEDFAIL
919
920        self.res = res
921        self.returncode = returncode
922        self.duration = time.time() - self.starttime
923        self.stdo = stdo
924        self.stde = stde
925
926    @property
927    def cmdline(self) -> T.Optional[str]:
928        if not self.cmd:
929            return None
930        test_only_env = set(self.env.items()) - set(os.environ.items())
931        return env_tuple_to_str(test_only_env) + \
932            ' '.join(sh_quote(x) for x in self.cmd)
933
934    def complete_skip(self, message: str) -> None:
935        self.starttime = time.time()
936        self._complete(GNU_SKIP_RETURNCODE, TestResult.SKIP, message, None)
937
938    def complete(self, returncode: int, res: TestResult,
939                 stdo: T.Optional[str], stde: T.Optional[str]) -> None:
940        self._complete(returncode, res, stdo, stde)
941
942    def get_log(self, colorize: bool = False, stderr_only: bool = False) -> str:
943        stdo = '' if stderr_only else self.stdo
944        if self.stde:
945            res = ''
946            if stdo:
947                res += mlog.cyan('stdout:').get_text(colorize) + '\n'
948                res += stdo
949                if res[-1:] != '\n':
950                    res += '\n'
951            res += mlog.cyan('stderr:').get_text(colorize) + '\n'
952            res += self.stde
953        else:
954            res = stdo
955        if res and res[-1:] != '\n':
956            res += '\n'
957        return res
958
959    @property
960    def needs_parsing(self) -> bool:
961        return False
962
963    async def parse(self, harness: 'TestHarness', lines: T.AsyncIterator[str]) -> T.Tuple[TestResult, str]:
964        async for l in lines:
965            pass
966        return TestResult.OK, ''
967
968
969class TestRunExitCode(TestRun):
970
971    def complete(self, returncode: int, res: TestResult,
972                 stdo: T.Optional[str], stde: T.Optional[str]) -> None:
973        if res:
974            pass
975        elif returncode == GNU_SKIP_RETURNCODE:
976            res = TestResult.SKIP
977        elif returncode == GNU_ERROR_RETURNCODE:
978            res = TestResult.ERROR
979        else:
980            res = TestResult.FAIL if bool(returncode) else TestResult.OK
981        super().complete(returncode, res, stdo, stde)
982
983TestRun.PROTOCOL_TO_CLASS[TestProtocol.EXITCODE] = TestRunExitCode
984
985
986class TestRunGTest(TestRunExitCode):
987    def complete(self, returncode: int, res: TestResult,
988                 stdo: T.Optional[str], stde: T.Optional[str]) -> None:
989        filename = f'{self.test.name}.xml'
990        if self.test.workdir:
991            filename = os.path.join(self.test.workdir, filename)
992
993        try:
994            self.junit = et.parse(filename)
995        except FileNotFoundError:
996            # This can happen if the test fails to run or complete for some
997            # reason, like the rpath for libgtest isn't properly set. ExitCode
998            # will handle the failure, don't generate a stacktrace.
999            pass
1000
1001        super().complete(returncode, res, stdo, stde)
1002
1003TestRun.PROTOCOL_TO_CLASS[TestProtocol.GTEST] = TestRunGTest
1004
1005
1006class TestRunTAP(TestRun):
1007    @property
1008    def needs_parsing(self) -> bool:
1009        return True
1010
1011    def complete(self, returncode: int, res: TestResult,
1012                 stdo: str, stde: str) -> None:
1013        if returncode != 0 and not res.was_killed():
1014            res = TestResult.ERROR
1015            stde = stde or ''
1016            stde += f'\n(test program exited with status code {returncode})'
1017
1018        super().complete(returncode, res, stdo, stde)
1019
1020    async def parse(self, harness: 'TestHarness', lines: T.AsyncIterator[str]) -> T.Tuple[TestResult, str]:
1021        res = TestResult.OK
1022        error = ''
1023
1024        async for i in TAPParser().parse_async(lines):
1025            if isinstance(i, TAPParser.Bailout):
1026                res = TestResult.ERROR
1027                harness.log_subtest(self, i.message, res)
1028            elif isinstance(i, TAPParser.Test):
1029                self.results.append(i)
1030                if i.result.is_bad():
1031                    res = TestResult.FAIL
1032                harness.log_subtest(self, i.name or f'subtest {i.number}', i.result)
1033            elif isinstance(i, TAPParser.Error):
1034                error = '\nTAP parsing error: ' + i.message
1035                res = TestResult.ERROR
1036
1037        if all(t.result is TestResult.SKIP for t in self.results):
1038            # This includes the case where self.results is empty
1039            res = TestResult.SKIP
1040        return res, error
1041
1042TestRun.PROTOCOL_TO_CLASS[TestProtocol.TAP] = TestRunTAP
1043
1044
1045class TestRunRust(TestRun):
1046    @property
1047    def needs_parsing(self) -> bool:
1048        return True
1049
1050    async def parse(self, harness: 'TestHarness', lines: T.AsyncIterator[str]) -> T.Tuple[TestResult, str]:
1051        def parse_res(n: int, name: str, result: str) -> TAPParser.Test:
1052            if result == 'ok':
1053                return TAPParser.Test(n, name, TestResult.OK, None)
1054            elif result == 'ignored':
1055                return TAPParser.Test(n, name, TestResult.SKIP, None)
1056            elif result == 'FAILED':
1057                return TAPParser.Test(n, name, TestResult.FAIL, None)
1058            return TAPParser.Test(n, name, TestResult.ERROR,
1059                                  f'Unsupported output from rust test: {result}')
1060
1061        n = 1
1062        async for line in lines:
1063            if line.startswith('test ') and not line.startswith('test result'):
1064                _, name, _, result = line.rstrip().split(' ')
1065                name = name.replace('::', '.')
1066                t = parse_res(n, name, result)
1067                self.results.append(t)
1068                harness.log_subtest(self, name, t.result)
1069                n += 1
1070
1071        if all(t.result is TestResult.SKIP for t in self.results):
1072            # This includes the case where self.results is empty
1073            return TestResult.SKIP, ''
1074        elif any(t.result is TestResult.ERROR for t in self.results):
1075            return TestResult.ERROR, ''
1076        elif any(t.result is TestResult.FAIL for t in self.results):
1077            return TestResult.FAIL, ''
1078        return TestResult.OK, ''
1079
1080TestRun.PROTOCOL_TO_CLASS[TestProtocol.RUST] = TestRunRust
1081
1082
1083def decode(stream: T.Union[None, bytes]) -> str:
1084    if stream is None:
1085        return ''
1086    try:
1087        return stream.decode('utf-8')
1088    except UnicodeDecodeError:
1089        return stream.decode('iso-8859-1', errors='ignore')
1090
1091async def read_decode(reader: asyncio.StreamReader, console_mode: ConsoleUser) -> str:
1092    stdo_lines = []
1093    try:
1094        while not reader.at_eof():
1095            line = decode(await reader.readline())
1096            stdo_lines.append(line)
1097            if console_mode is ConsoleUser.STDOUT:
1098                print(line, end='', flush=True)
1099        return ''.join(stdo_lines)
1100    except asyncio.CancelledError:
1101        return ''.join(stdo_lines)
1102
1103# Extract lines out of the StreamReader.  Print them
1104# along the way if requested, and at the end collect
1105# them all into a future.
1106async def read_decode_lines(reader: asyncio.StreamReader, q: 'asyncio.Queue[T.Optional[str]]',
1107                            console_mode: ConsoleUser) -> str:
1108    stdo_lines = []
1109    try:
1110        while not reader.at_eof():
1111            line = decode(await reader.readline())
1112            stdo_lines.append(line)
1113            if console_mode is ConsoleUser.STDOUT:
1114                print(line, end='', flush=True)
1115            await q.put(line)
1116        return ''.join(stdo_lines)
1117    except asyncio.CancelledError:
1118        return ''.join(stdo_lines)
1119    finally:
1120        await q.put(None)
1121
1122def run_with_mono(fname: str) -> bool:
1123    return fname.endswith('.exe') and not (is_windows() or is_cygwin())
1124
1125def check_testdata(objs: T.List[TestSerialisation]) -> T.List[TestSerialisation]:
1126    if not isinstance(objs, list):
1127        raise MesonVersionMismatchException('<unknown>', coredata_version)
1128    for obj in objs:
1129        if not isinstance(obj, TestSerialisation):
1130            raise MesonVersionMismatchException('<unknown>', coredata_version)
1131        if not hasattr(obj, 'version'):
1132            raise MesonVersionMismatchException('<unknown>', coredata_version)
1133        if major_versions_differ(obj.version, coredata_version):
1134            raise MesonVersionMismatchException(obj.version, coredata_version)
1135    return objs
1136
1137# Custom waiting primitives for asyncio
1138
1139async def try_wait_one(*awaitables: T.Any, timeout: T.Optional[T.Union[int, float]]) -> None:
1140    """Wait for completion of one of the given futures, ignoring timeouts."""
1141    await asyncio.wait(awaitables,
1142                       timeout=timeout, return_when=asyncio.FIRST_COMPLETED)
1143
1144async def queue_iter(q: 'asyncio.Queue[T.Optional[str]]') -> T.AsyncIterator[str]:
1145    while True:
1146        item = await q.get()
1147        q.task_done()
1148        if item is None:
1149            break
1150        yield item
1151
1152async def complete(future: asyncio.Future) -> None:
1153    """Wait for completion of the given future, ignoring cancellation."""
1154    try:
1155        await future
1156    except asyncio.CancelledError:
1157        pass
1158
1159async def complete_all(futures: T.Iterable[asyncio.Future],
1160                       timeout: T.Optional[T.Union[int, float]] = None) -> None:
1161    """Wait for completion of all the given futures, ignoring cancellation.
1162       If timeout is not None, raise an asyncio.TimeoutError after the given
1163       time has passed.  asyncio.TimeoutError is only raised if some futures
1164       have not completed and none have raised exceptions, even if timeout
1165       is zero."""
1166
1167    def check_futures(futures: T.Iterable[asyncio.Future]) -> None:
1168        # Raise exceptions if needed
1169        left = False
1170        for f in futures:
1171            if not f.done():
1172                left = True
1173            elif not f.cancelled():
1174                f.result()
1175        if left:
1176            raise asyncio.TimeoutError
1177
1178    # Python is silly and does not have a variant of asyncio.wait with an
1179    # absolute time as deadline.
1180    deadline = None if timeout is None else asyncio.get_event_loop().time() + timeout
1181    while futures and (timeout is None or timeout > 0):
1182        done, futures = await asyncio.wait(futures, timeout=timeout,
1183                                           return_when=asyncio.FIRST_EXCEPTION)
1184        check_futures(done)
1185        if deadline:
1186            timeout = deadline - asyncio.get_event_loop().time()
1187
1188    check_futures(futures)
1189
1190
1191class TestSubprocess:
1192    def __init__(self, p: asyncio.subprocess.Process,
1193                 stdout: T.Optional[int], stderr: T.Optional[int],
1194                 postwait_fn: T.Callable[[], None] = None):
1195        self._process = p
1196        self.stdout = stdout
1197        self.stderr = stderr
1198        self.stdo_task = None            # type: T.Optional[asyncio.Future[str]]
1199        self.stde_task = None            # type: T.Optional[asyncio.Future[str]]
1200        self.postwait_fn = postwait_fn   # type: T.Callable[[], None]
1201        self.all_futures = []            # type: T.List[asyncio.Future]
1202
1203    def stdout_lines(self, console_mode: ConsoleUser) -> T.AsyncIterator[str]:
1204        q = asyncio.Queue()              # type: asyncio.Queue[T.Optional[str]]
1205        decode_coro = read_decode_lines(self._process.stdout, q, console_mode)
1206        self.stdo_task = asyncio.ensure_future(decode_coro)
1207        return queue_iter(q)
1208
1209    def communicate(self, console_mode: ConsoleUser) -> T.Tuple[T.Optional[T.Awaitable[str]],
1210                                                                T.Optional[T.Awaitable[str]]]:
1211        # asyncio.ensure_future ensures that printing can
1212        # run in the background, even before it is awaited
1213        if self.stdo_task is None and self.stdout is not None:
1214            decode_coro = read_decode(self._process.stdout, console_mode)
1215            self.stdo_task = asyncio.ensure_future(decode_coro)
1216            self.all_futures.append(self.stdo_task)
1217        if self.stderr is not None and self.stderr != asyncio.subprocess.STDOUT:
1218            decode_coro = read_decode(self._process.stderr, console_mode)
1219            self.stde_task = asyncio.ensure_future(decode_coro)
1220            self.all_futures.append(self.stde_task)
1221
1222        return self.stdo_task, self.stde_task
1223
1224    async def _kill(self) -> T.Optional[str]:
1225        # Python does not provide multiplatform support for
1226        # killing a process and all its children so we need
1227        # to roll our own.
1228        p = self._process
1229        try:
1230            if is_windows():
1231                subprocess.run(['taskkill', '/F', '/T', '/PID', str(p.pid)])
1232            else:
1233                # Send a termination signal to the process group that setsid()
1234                # created - giving it a chance to perform any cleanup.
1235                os.killpg(p.pid, signal.SIGTERM)
1236
1237                # Make sure the termination signal actually kills the process
1238                # group, otherwise retry with a SIGKILL.
1239                await try_wait_one(p.wait(), timeout=0.5)
1240                if p.returncode is not None:
1241                    return None
1242
1243                os.killpg(p.pid, signal.SIGKILL)
1244
1245            await try_wait_one(p.wait(), timeout=1)
1246            if p.returncode is not None:
1247                return None
1248
1249            # An earlier kill attempt has not worked for whatever reason.
1250            # Try to kill it one last time with a direct call.
1251            # If the process has spawned children, they will remain around.
1252            p.kill()
1253            await try_wait_one(p.wait(), timeout=1)
1254            if p.returncode is not None:
1255                return None
1256            return 'Test process could not be killed.'
1257        except ProcessLookupError:
1258            # Sometimes (e.g. with Wine) this happens.  There's nothing
1259            # we can do, probably the process already died so just wait
1260            # for the event loop to pick that up.
1261            await p.wait()
1262            return None
1263        finally:
1264            if self.stdo_task:
1265                self.stdo_task.cancel()
1266            if self.stde_task:
1267                self.stde_task.cancel()
1268
1269    async def wait(self, timeout: T.Optional[int]) -> T.Tuple[int, TestResult, T.Optional[str]]:
1270        p = self._process
1271        result = None
1272        additional_error = None
1273
1274        self.all_futures.append(asyncio.ensure_future(p.wait()))
1275        try:
1276            await complete_all(self.all_futures, timeout=timeout)
1277        except asyncio.TimeoutError:
1278            additional_error = await self._kill()
1279            result = TestResult.TIMEOUT
1280        except asyncio.CancelledError:
1281            # The main loop must have seen Ctrl-C.
1282            additional_error = await self._kill()
1283            result = TestResult.INTERRUPT
1284        finally:
1285            if self.postwait_fn:
1286                self.postwait_fn()
1287
1288        return p.returncode or 0, result, additional_error
1289
1290class SingleTestRunner:
1291
1292    def __init__(self, test: TestSerialisation, env: T.Dict[str, str], name: str,
1293                 options: argparse.Namespace):
1294        self.test = test
1295        self.options = options
1296        self.cmd = self._get_cmd()
1297
1298        if self.cmd and self.test.extra_paths:
1299            env['PATH'] = os.pathsep.join(self.test.extra_paths + ['']) + env['PATH']
1300            winecmd = []
1301            for c in self.cmd:
1302                winecmd.append(c)
1303                if os.path.basename(c).startswith('wine'):
1304                    env['WINEPATH'] = get_wine_shortpath(
1305                        winecmd,
1306                        ['Z:' + p for p in self.test.extra_paths] + env.get('WINEPATH', '').split(';')
1307                    )
1308                    break
1309
1310        # If MALLOC_PERTURB_ is not set, or if it is set to an empty value,
1311        # (i.e., the test or the environment don't explicitly set it), set
1312        # it ourselves. We do this unconditionally for regular tests
1313        # because it is extremely useful to have.
1314        # Setting MALLOC_PERTURB_="0" will completely disable this feature.
1315        if ('MALLOC_PERTURB_' not in env or not env['MALLOC_PERTURB_']) and not options.benchmark:
1316            env['MALLOC_PERTURB_'] = str(random.randint(1, 255))
1317
1318        if self.options.gdb or self.test.timeout is None or self.test.timeout <= 0:
1319            timeout = None
1320        elif self.options.timeout_multiplier is None:
1321            timeout = self.test.timeout
1322        elif self.options.timeout_multiplier <= 0:
1323            timeout = None
1324        else:
1325            timeout = self.test.timeout * self.options.timeout_multiplier
1326
1327        is_parallel = test.is_parallel and self.options.num_processes > 1 and not self.options.gdb
1328        self.runobj = TestRun(test, env, name, timeout, is_parallel)
1329
1330        if self.options.gdb:
1331            self.console_mode = ConsoleUser.GDB
1332        elif self.options.verbose and not is_parallel and not self.runobj.needs_parsing:
1333            self.console_mode = ConsoleUser.STDOUT
1334        else:
1335            self.console_mode = ConsoleUser.LOGGER
1336
1337    def _get_test_cmd(self) -> T.Optional[T.List[str]]:
1338        if self.test.fname[0].endswith('.jar'):
1339            return ['java', '-jar'] + self.test.fname
1340        elif not self.test.is_cross_built and run_with_mono(self.test.fname[0]):
1341            return ['mono'] + self.test.fname
1342        elif self.test.cmd_is_built and self.test.is_cross_built and self.test.needs_exe_wrapper:
1343            if self.test.exe_runner is None:
1344                # Can not run test on cross compiled executable
1345                # because there is no execute wrapper.
1346                return None
1347            elif self.test.cmd_is_built:
1348                # If the command is not built (ie, its a python script),
1349                # then we don't check for the exe-wrapper
1350                if not self.test.exe_runner.found():
1351                    msg = ('The exe_wrapper defined in the cross file {!r} was not '
1352                           'found. Please check the command and/or add it to PATH.')
1353                    raise TestException(msg.format(self.test.exe_runner.name))
1354                return self.test.exe_runner.get_command() + self.test.fname
1355        return self.test.fname
1356
1357    def _get_cmd(self) -> T.Optional[T.List[str]]:
1358        test_cmd = self._get_test_cmd()
1359        if not test_cmd:
1360            return None
1361        return TestHarness.get_wrapper(self.options) + test_cmd
1362
1363    @property
1364    def is_parallel(self) -> bool:
1365        return self.runobj.is_parallel
1366
1367    @property
1368    def visible_name(self) -> str:
1369        return self.runobj.name
1370
1371    @property
1372    def timeout(self) -> T.Optional[int]:
1373        return self.runobj.timeout
1374
1375    async def run(self, harness: 'TestHarness') -> TestRun:
1376        if self.cmd is None:
1377            skip_stdout = 'Not run because can not execute cross compiled binaries.'
1378            harness.log_start_test(self.runobj)
1379            self.runobj.complete_skip(skip_stdout)
1380        else:
1381            cmd = self.cmd + self.test.cmd_args + self.options.test_args
1382            self.runobj.start(cmd)
1383            harness.log_start_test(self.runobj)
1384            await self._run_cmd(harness, cmd)
1385        return self.runobj
1386
1387    async def _run_subprocess(self, args: T.List[str], *,
1388                              stdout: int, stderr: int,
1389                              env: T.Dict[str, str], cwd: T.Optional[str]) -> TestSubprocess:
1390        # Let gdb handle ^C instead of us
1391        if self.options.gdb:
1392            previous_sigint_handler = signal.getsignal(signal.SIGINT)
1393            # Make the meson executable ignore SIGINT while gdb is running.
1394            signal.signal(signal.SIGINT, signal.SIG_IGN)
1395
1396        def preexec_fn() -> None:
1397            if self.options.gdb:
1398                # Restore the SIGINT handler for the child process to
1399                # ensure it can handle it.
1400                signal.signal(signal.SIGINT, signal.SIG_DFL)
1401            else:
1402                # We don't want setsid() in gdb because gdb needs the
1403                # terminal in order to handle ^C and not show tcsetpgrp()
1404                # errors avoid not being able to use the terminal.
1405                os.setsid()
1406
1407        def postwait_fn() -> None:
1408            if self.options.gdb:
1409                # Let us accept ^C again
1410                signal.signal(signal.SIGINT, previous_sigint_handler)
1411
1412        p = await asyncio.create_subprocess_exec(*args,
1413                                                 stdout=stdout,
1414                                                 stderr=stderr,
1415                                                 env=env,
1416                                                 cwd=cwd,
1417                                                 preexec_fn=preexec_fn if not is_windows() else None)
1418        return TestSubprocess(p, stdout=stdout, stderr=stderr,
1419                              postwait_fn=postwait_fn if not is_windows() else None)
1420
1421    async def _run_cmd(self, harness: 'TestHarness', cmd: T.List[str]) -> None:
1422        if self.console_mode is ConsoleUser.GDB:
1423            stdout = None
1424            stderr = None
1425        else:
1426            stdout = asyncio.subprocess.PIPE
1427            stderr = asyncio.subprocess.STDOUT \
1428                if not self.options.split and not self.runobj.needs_parsing \
1429                else asyncio.subprocess.PIPE
1430
1431        extra_cmd = []  # type: T.List[str]
1432        if self.test.protocol is TestProtocol.GTEST:
1433            gtestname = self.test.name
1434            if self.test.workdir:
1435                gtestname = os.path.join(self.test.workdir, self.test.name)
1436            extra_cmd.append(f'--gtest_output=xml:{gtestname}.xml')
1437
1438        p = await self._run_subprocess(cmd + extra_cmd,
1439                                       stdout=stdout,
1440                                       stderr=stderr,
1441                                       env=self.runobj.env,
1442                                       cwd=self.test.workdir)
1443
1444        parse_task = None
1445        if self.runobj.needs_parsing:
1446            parse_coro = self.runobj.parse(harness, p.stdout_lines(self.console_mode))
1447            parse_task = asyncio.ensure_future(parse_coro)
1448
1449        stdo_task, stde_task = p.communicate(self.console_mode)
1450        returncode, result, additional_error = await p.wait(self.runobj.timeout)
1451
1452        if parse_task is not None:
1453            res, error = await parse_task
1454            if error:
1455                additional_error = join_lines(additional_error, error)
1456            result = result or res
1457
1458        stdo = await stdo_task if stdo_task else ''
1459        stde = await stde_task if stde_task else ''
1460        stde = join_lines(stde, additional_error)
1461        self.runobj.complete(returncode, result, stdo, stde)
1462
1463
1464class TestHarness:
1465    def __init__(self, options: argparse.Namespace):
1466        self.options = options
1467        self.collected_failures = []  # type: T.List[TestRun]
1468        self.fail_count = 0
1469        self.expectedfail_count = 0
1470        self.unexpectedpass_count = 0
1471        self.success_count = 0
1472        self.skip_count = 0
1473        self.timeout_count = 0
1474        self.test_count = 0
1475        self.name_max_len = 0
1476        self.is_run = False
1477        self.loggers = []         # type: T.List[TestLogger]
1478        self.loggers.append(ConsoleLogger())
1479        self.need_console = False
1480
1481        self.logfile_base = None  # type: T.Optional[str]
1482        if self.options.logbase and not self.options.gdb:
1483            namebase = None
1484            self.logfile_base = os.path.join(self.options.wd, 'meson-logs', self.options.logbase)
1485
1486            if self.options.wrapper:
1487                namebase = os.path.basename(self.get_wrapper(self.options)[0])
1488            elif self.options.setup:
1489                namebase = self.options.setup.replace(":", "_")
1490
1491            if namebase:
1492                self.logfile_base += '-' + namebase.replace(' ', '_')
1493
1494        startdir = os.getcwd()
1495        try:
1496            os.chdir(self.options.wd)
1497            self.build_data = build.load(os.getcwd())
1498            if not self.options.setup:
1499                self.options.setup = self.build_data.test_setup_default_name
1500            if self.options.benchmark:
1501                self.tests = self.load_tests('meson_benchmark_setup.dat')
1502            else:
1503                self.tests = self.load_tests('meson_test_setup.dat')
1504        finally:
1505            os.chdir(startdir)
1506
1507        ss = set()
1508        for t in self.tests:
1509            for s in t.suite:
1510                ss.add(s)
1511        self.suites = list(ss)
1512
1513    def load_tests(self, file_name: str) -> T.List[TestSerialisation]:
1514        datafile = Path('meson-private') / file_name
1515        if not datafile.is_file():
1516            raise TestException(f'Directory {self.options.wd!r} does not seem to be a Meson build directory.')
1517        with datafile.open('rb') as f:
1518            objs = check_testdata(pickle.load(f))
1519        return objs
1520
1521    def __enter__(self) -> 'TestHarness':
1522        return self
1523
1524    def __exit__(self, exc_type: T.Any, exc_value: T.Any, traceback: T.Any) -> None:
1525        self.close_logfiles()
1526
1527    def close_logfiles(self) -> None:
1528        for l in self.loggers:
1529            l.close()
1530
1531    def get_test_setup(self, test: T.Optional[TestSerialisation]) -> build.TestSetup:
1532        if ':' in self.options.setup:
1533            if self.options.setup not in self.build_data.test_setups:
1534                sys.exit(f"Unknown test setup '{self.options.setup}'.")
1535            return self.build_data.test_setups[self.options.setup]
1536        else:
1537            full_name = test.project_name + ":" + self.options.setup
1538            if full_name not in self.build_data.test_setups:
1539                sys.exit(f"Test setup '{self.options.setup}' not found from project '{test.project_name}'.")
1540            return self.build_data.test_setups[full_name]
1541
1542    def merge_setup_options(self, options: argparse.Namespace, test: TestSerialisation) -> T.Dict[str, str]:
1543        current = self.get_test_setup(test)
1544        if not options.gdb:
1545            options.gdb = current.gdb
1546        if options.gdb:
1547            options.verbose = True
1548        if options.timeout_multiplier is None:
1549            options.timeout_multiplier = current.timeout_multiplier
1550    #    if options.env is None:
1551    #        options.env = current.env # FIXME, should probably merge options here.
1552        if options.wrapper is None:
1553            options.wrapper = current.exe_wrapper
1554        elif current.exe_wrapper:
1555            sys.exit('Conflict: both test setup and command line specify an exe wrapper.')
1556        return current.env.get_env(os.environ.copy())
1557
1558    def get_test_runner(self, test: TestSerialisation) -> SingleTestRunner:
1559        name = self.get_pretty_suite(test)
1560        options = deepcopy(self.options)
1561        if self.options.setup:
1562            env = self.merge_setup_options(options, test)
1563        else:
1564            env = os.environ.copy()
1565        test_env = test.env.get_env(env)
1566        env.update(test_env)
1567        if (test.is_cross_built and test.needs_exe_wrapper and
1568                test.exe_runner and test.exe_runner.found()):
1569            env['MESON_EXE_WRAPPER'] = join_args(test.exe_runner.get_command())
1570        return SingleTestRunner(test, env, name, options)
1571
1572    def process_test_result(self, result: TestRun) -> None:
1573        if result.res is TestResult.TIMEOUT:
1574            self.timeout_count += 1
1575        elif result.res is TestResult.SKIP:
1576            self.skip_count += 1
1577        elif result.res is TestResult.OK:
1578            self.success_count += 1
1579        elif result.res in {TestResult.FAIL, TestResult.ERROR, TestResult.INTERRUPT}:
1580            self.fail_count += 1
1581        elif result.res is TestResult.EXPECTEDFAIL:
1582            self.expectedfail_count += 1
1583        elif result.res is TestResult.UNEXPECTEDPASS:
1584            self.unexpectedpass_count += 1
1585        else:
1586            sys.exit(f'Unknown test result encountered: {result.res}')
1587
1588        if result.res.is_bad():
1589            self.collected_failures.append(result)
1590        for l in self.loggers:
1591            l.log(self, result)
1592
1593    @property
1594    def numlen(self) -> int:
1595        return len(str(self.test_count))
1596
1597    @property
1598    def max_left_width(self) -> int:
1599        return 2 * self.numlen + 2
1600
1601    def format(self, result: TestRun, colorize: bool,
1602               max_left_width: int = 0,
1603               prefix: str = '',
1604               left: T.Optional[str] = None,
1605               middle: T.Optional[str] = None,
1606               right: T.Optional[str] = None) -> str:
1607
1608        if left is None:
1609            left = '{num:{numlen}}/{testcount} '.format(
1610                numlen=self.numlen,
1611                num=result.num,
1612                testcount=self.test_count)
1613
1614        # A non-default max_left_width lets the logger print more stuff before the
1615        # name, while ensuring that the rightmost columns remain aligned.
1616        max_left_width = max(max_left_width, self.max_left_width)
1617
1618        if middle is None:
1619            middle = result.name
1620        extra_mid_width = max_left_width + self.name_max_len + 1 - uniwidth(middle) - uniwidth(left) - uniwidth(prefix)
1621        middle += ' ' * max(1, extra_mid_width)
1622
1623        if right is None:
1624            right = '{res} {dur:{durlen}.2f}s'.format(
1625                res=result.res.get_text(colorize),
1626                dur=result.duration,
1627                durlen=self.duration_max_len + 3)
1628            detail = result.detail
1629            if detail:
1630                right += '   ' + detail
1631        return prefix + left + middle + right
1632
1633    def summary(self) -> str:
1634        return textwrap.dedent('''
1635
1636            Ok:                 {:<4}
1637            Expected Fail:      {:<4}
1638            Fail:               {:<4}
1639            Unexpected Pass:    {:<4}
1640            Skipped:            {:<4}
1641            Timeout:            {:<4}
1642            ''').format(self.success_count, self.expectedfail_count, self.fail_count,
1643                        self.unexpectedpass_count, self.skip_count, self.timeout_count)
1644
1645    def total_failure_count(self) -> int:
1646        return self.fail_count + self.unexpectedpass_count + self.timeout_count
1647
1648    def doit(self) -> int:
1649        if self.is_run:
1650            raise RuntimeError('Test harness object can only be used once.')
1651        self.is_run = True
1652        tests = self.get_tests()
1653        if not tests:
1654            return 0
1655        if not self.options.no_rebuild and not rebuild_deps(self.options.wd, tests):
1656            # We return 125 here in case the build failed.
1657            # The reason is that exit code 125 tells `git bisect run` that the current
1658            # commit should be skipped.  Thus users can directly use `meson test` to
1659            # bisect without needing to handle the does-not-build case separately in a
1660            # wrapper script.
1661            sys.exit(125)
1662
1663        self.name_max_len = max([uniwidth(self.get_pretty_suite(test)) for test in tests])
1664        startdir = os.getcwd()
1665        try:
1666            os.chdir(self.options.wd)
1667            runners = []             # type: T.List[SingleTestRunner]
1668            for i in range(self.options.repeat):
1669                runners.extend(self.get_test_runner(test) for test in tests)
1670                if i == 0:
1671                    self.duration_max_len = max([len(str(int(runner.timeout or 99)))
1672                                                 for runner in runners])
1673                    # Disable the progress report if it gets in the way
1674                    self.need_console = any(runner.console_mode is not ConsoleUser.LOGGER
1675                                             for runner in runners)
1676
1677            self.test_count = len(runners)
1678            self.run_tests(runners)
1679        finally:
1680            os.chdir(startdir)
1681        return self.total_failure_count()
1682
1683    @staticmethod
1684    def split_suite_string(suite: str) -> T.Tuple[str, str]:
1685        if ':' in suite:
1686            split = suite.split(':', 1)
1687            assert len(split) == 2
1688            return split[0], split[1]
1689        else:
1690            return suite, ""
1691
1692    @staticmethod
1693    def test_in_suites(test: TestSerialisation, suites: T.List[str]) -> bool:
1694        for suite in suites:
1695            (prj_match, st_match) = TestHarness.split_suite_string(suite)
1696            for prjst in test.suite:
1697                (prj, st) = TestHarness.split_suite_string(prjst)
1698
1699                # the SUITE can be passed as
1700                #     suite_name
1701                # or
1702                #     project_name:suite_name
1703                # so we need to select only the test belonging to project_name
1704
1705                # this if handle the first case (i.e., SUITE == suite_name)
1706
1707                # in this way we can run tests belonging to different
1708                # (sub)projects which share the same suite_name
1709                if not st_match and st == prj_match:
1710                    return True
1711
1712                # these two conditions are needed to handle the second option
1713                # i.e., SUITE == project_name:suite_name
1714
1715                # in this way we select the only the tests of
1716                # project_name with suite_name
1717                if prj_match and prj != prj_match:
1718                    continue
1719                if st_match and st != st_match:
1720                    continue
1721                return True
1722        return False
1723
1724    def test_suitable(self, test: TestSerialisation) -> bool:
1725        if TestHarness.test_in_suites(test, self.options.exclude_suites):
1726            return False
1727
1728        if self.options.include_suites:
1729            # Both force inclusion (overriding add_test_setup) and exclude
1730            # everything else
1731            return TestHarness.test_in_suites(test, self.options.include_suites)
1732
1733        if self.options.setup:
1734            setup = self.get_test_setup(test)
1735            if TestHarness.test_in_suites(test, setup.exclude_suites):
1736                return False
1737
1738        return True
1739
1740    def tests_from_args(self, tests: T.List[TestSerialisation]) -> T.Generator[TestSerialisation, None, None]:
1741        '''
1742        Allow specifying test names like "meson test foo1 foo2", where test('foo1', ...)
1743
1744        Also support specifying the subproject to run tests from like
1745        "meson test subproj:" (all tests inside subproj) or "meson test subproj:foo1"
1746        to run foo1 inside subproj. Coincidentally also "meson test :foo1" to
1747        run all tests with that name across all subprojects, which is
1748        identical to "meson test foo1"
1749        '''
1750        for arg in self.options.args:
1751            if ':' in arg:
1752                subproj, name = arg.split(':', maxsplit=1)
1753            else:
1754                subproj, name = '', arg
1755            for t in tests:
1756                if subproj and t.project_name != subproj:
1757                    continue
1758                if name and t.name != name:
1759                    continue
1760                yield t
1761
1762    def get_tests(self) -> T.List[TestSerialisation]:
1763        if not self.tests:
1764            print('No tests defined.')
1765            return []
1766
1767        tests = [t for t in self.tests if self.test_suitable(t)]
1768        if self.options.args:
1769            tests = list(self.tests_from_args(tests))
1770
1771        if not tests:
1772            print('No suitable tests defined.')
1773            return []
1774
1775        return tests
1776
1777    def flush_logfiles(self) -> None:
1778        for l in self.loggers:
1779            l.flush()
1780
1781    def open_logfiles(self) -> None:
1782        if not self.logfile_base:
1783            return
1784
1785        self.loggers.append(JunitBuilder(self.logfile_base + '.junit.xml'))
1786        self.loggers.append(JsonLogfileBuilder(self.logfile_base + '.json'))
1787        self.loggers.append(TextLogfileBuilder(self.logfile_base + '.txt', errors='surrogateescape'))
1788
1789    @staticmethod
1790    def get_wrapper(options: argparse.Namespace) -> T.List[str]:
1791        wrap = []  # type: T.List[str]
1792        if options.gdb:
1793            wrap = [options.gdb_path, '--quiet', '--nh']
1794            if options.repeat > 1:
1795                wrap += ['-ex', 'run', '-ex', 'quit']
1796            # Signal the end of arguments to gdb
1797            wrap += ['--args']
1798        if options.wrapper:
1799            wrap += options.wrapper
1800        return wrap
1801
1802    def get_pretty_suite(self, test: TestSerialisation) -> str:
1803        if len(self.suites) > 1 and test.suite:
1804            rv = TestHarness.split_suite_string(test.suite[0])[0]
1805            s = "+".join(TestHarness.split_suite_string(s)[1] for s in test.suite)
1806            if s:
1807                rv += ":"
1808            return rv + s + " / " + test.name
1809        else:
1810            return test.name
1811
1812    def run_tests(self, runners: T.List[SingleTestRunner]) -> None:
1813        try:
1814            self.open_logfiles()
1815            # Replace with asyncio.run once we can require Python 3.7
1816            loop = asyncio.get_event_loop()
1817            loop.run_until_complete(self._run_tests(runners))
1818        finally:
1819            self.close_logfiles()
1820
1821    def log_subtest(self, test: TestRun, s: str, res: TestResult) -> None:
1822        for l in self.loggers:
1823            l.log_subtest(self, test, s, res)
1824
1825    def log_start_test(self, test: TestRun) -> None:
1826        for l in self.loggers:
1827            l.start_test(self, test)
1828
1829    async def _run_tests(self, runners: T.List[SingleTestRunner]) -> None:
1830        semaphore = asyncio.Semaphore(self.options.num_processes)
1831        futures = deque()  # type: T.Deque[asyncio.Future]
1832        running_tests = dict() # type: T.Dict[asyncio.Future, str]
1833        interrupted = False
1834        ctrlc_times = deque(maxlen=MAX_CTRLC) # type: T.Deque[float]
1835
1836        async def run_test(test: SingleTestRunner) -> None:
1837            async with semaphore:
1838                if interrupted or (self.options.repeat > 1 and self.fail_count):
1839                    return
1840                res = await test.run(self)
1841                self.process_test_result(res)
1842
1843        def test_done(f: asyncio.Future) -> None:
1844            if not f.cancelled():
1845                f.result()
1846            futures.remove(f)
1847            try:
1848                del running_tests[f]
1849            except KeyError:
1850                pass
1851
1852        def cancel_one_test(warn: bool) -> None:
1853            future = futures.popleft()
1854            futures.append(future)
1855            if warn:
1856                self.flush_logfiles()
1857                mlog.warning('CTRL-C detected, interrupting {}'.format(running_tests[future]))
1858            del running_tests[future]
1859            future.cancel()
1860
1861        def cancel_all_tests() -> None:
1862            nonlocal interrupted
1863            interrupted = True
1864            while running_tests:
1865                cancel_one_test(False)
1866
1867        def sigterm_handler() -> None:
1868            if interrupted:
1869                return
1870            self.flush_logfiles()
1871            mlog.warning('Received SIGTERM, exiting')
1872            cancel_all_tests()
1873
1874        def sigint_handler() -> None:
1875            # We always pick the longest-running future that has not been cancelled
1876            # If all the tests have been CTRL-C'ed, just stop
1877            nonlocal interrupted
1878            if interrupted:
1879                return
1880            ctrlc_times.append(asyncio.get_event_loop().time())
1881            if len(ctrlc_times) == MAX_CTRLC and ctrlc_times[-1] - ctrlc_times[0] < 1:
1882                self.flush_logfiles()
1883                mlog.warning('CTRL-C detected, exiting')
1884                cancel_all_tests()
1885            elif running_tests:
1886                cancel_one_test(True)
1887            else:
1888                self.flush_logfiles()
1889                mlog.warning('CTRL-C detected, exiting')
1890                interrupted = True
1891
1892        for l in self.loggers:
1893            l.start(self)
1894
1895        if sys.platform != 'win32':
1896            if os.getpgid(0) == os.getpid():
1897                asyncio.get_event_loop().add_signal_handler(signal.SIGINT, sigint_handler)
1898            else:
1899                asyncio.get_event_loop().add_signal_handler(signal.SIGINT, sigterm_handler)
1900            asyncio.get_event_loop().add_signal_handler(signal.SIGTERM, sigterm_handler)
1901        try:
1902            for runner in runners:
1903                if not runner.is_parallel:
1904                    await complete_all(futures)
1905                future = asyncio.ensure_future(run_test(runner))
1906                futures.append(future)
1907                running_tests[future] = runner.visible_name
1908                future.add_done_callback(test_done)
1909                if not runner.is_parallel:
1910                    await complete(future)
1911                if self.options.repeat > 1 and self.fail_count:
1912                    break
1913
1914            await complete_all(futures)
1915        finally:
1916            if sys.platform != 'win32':
1917                asyncio.get_event_loop().remove_signal_handler(signal.SIGINT)
1918                asyncio.get_event_loop().remove_signal_handler(signal.SIGTERM)
1919            for l in self.loggers:
1920                await l.finish(self)
1921
1922def list_tests(th: TestHarness) -> bool:
1923    tests = th.get_tests()
1924    for t in tests:
1925        print(th.get_pretty_suite(t))
1926    return not tests
1927
1928def rebuild_deps(wd: str, tests: T.List[TestSerialisation]) -> bool:
1929    def convert_path_to_target(path: str) -> str:
1930        path = os.path.relpath(path, wd)
1931        if os.sep != '/':
1932            path = path.replace(os.sep, '/')
1933        return path
1934
1935    if not (Path(wd) / 'build.ninja').is_file():
1936        print('Only ninja backend is supported to rebuild tests before running them.')
1937        return True
1938
1939    ninja = environment.detect_ninja()
1940    if not ninja:
1941        print("Can't find ninja, can't rebuild test.")
1942        return False
1943
1944    depends = set()            # type: T.Set[str]
1945    targets = set()            # type: T.Set[str]
1946    intro_targets = dict()     # type: T.Dict[str, T.List[str]]
1947    for target in load_info_file(get_infodir(wd), kind='targets'):
1948        intro_targets[target['id']] = [
1949            convert_path_to_target(f)
1950            for f in target['filename']]
1951    for t in tests:
1952        for d in t.depends:
1953            if d in depends:
1954                continue
1955            depends.update(d)
1956            targets.update(intro_targets[d])
1957
1958    ret = subprocess.run(ninja + ['-C', wd] + sorted(targets)).returncode
1959    if ret != 0:
1960        print(f'Could not rebuild {wd}')
1961        return False
1962
1963    return True
1964
1965def run(options: argparse.Namespace) -> int:
1966    if options.benchmark:
1967        options.num_processes = 1
1968
1969    if options.verbose and options.quiet:
1970        print('Can not be both quiet and verbose at the same time.')
1971        return 1
1972
1973    check_bin = None
1974    if options.gdb:
1975        options.verbose = True
1976        if options.wrapper:
1977            print('Must not specify both a wrapper and gdb at the same time.')
1978            return 1
1979        check_bin = 'gdb'
1980
1981    if options.wrapper:
1982        check_bin = options.wrapper[0]
1983
1984    if sys.platform == 'win32':
1985        loop = asyncio.ProactorEventLoop()
1986        asyncio.set_event_loop(loop)
1987
1988    if check_bin is not None:
1989        exe = ExternalProgram(check_bin, silent=True)
1990        if not exe.found():
1991            print(f'Could not find requested program: {check_bin!r}')
1992            return 1
1993
1994    with TestHarness(options) as th:
1995        try:
1996            if options.list:
1997                return list_tests(th)
1998            return th.doit()
1999        except TestException as e:
2000            print('Meson test encountered an error:\n')
2001            if os.environ.get('MESON_FORCE_BACKTRACE'):
2002                raise e
2003            else:
2004                print(e)
2005            return 1
2006
2007def run_with_args(args: T.List[str]) -> int:
2008    parser = argparse.ArgumentParser(prog='meson test')
2009    add_arguments(parser)
2010    options = parser.parse_args(args)
2011    return run(options)
2012