1#! /usr/bin/env python3
2#
3# Main test driver.
4#
5# pylint: disable=line-too-long,too-many-lines,invalid-name,missing-function-docstring,missing-class-docstring
6
7from __future__ import print_function
8
9import atexit
10import copy
11import fnmatch
12import glob
13import io
14import json
15import locale
16import multiprocessing
17import multiprocessing.managers
18import multiprocessing.sharedctypes
19import optparse
20import os
21import os.path
22import platform as pform
23import re
24import shutil
25import signal
26import socket
27import subprocess
28import sys
29import tempfile
30import threading
31import time
32import uuid
33import xml.dom.minidom
34
35from datetime import datetime
36
37try:
38    import ConfigParser as configparser
39except ImportError:
40    import configparser
41
42VERSION = "0.71"  # Automatically filled in.
43
44using_py3 = (sys.version_info[0] == 3)
45
46Name = "btest"
47Config = None
48
49try:
50    ConfigDefault = os.environ["BTEST_CFG"]
51except KeyError:
52    ConfigDefault = "btest.cfg"
53
54
55def output(msg, nl=True, file=None):
56    if not file:
57        file = sys.stderr
58
59    if nl:
60        print(msg, file=file)
61    else:
62        print(msg, end=" ", file=file)
63
64
65def warning(msg):
66    print("warning: %s" % msg, file=sys.stderr)
67
68
69def error(msg):
70    print(msg, file=sys.stderr)
71    sys.exit(1)
72
73
74def mkdir(folder):
75    if not os.path.exists(folder):
76        try:
77            os.makedirs(folder)
78        except OSError as exc:
79            error("cannot create directory %s: %s" % (folder, exc))
80
81    else:
82        if not os.path.isdir(folder):
83            error("path %s exists but is not a directory" % folder)
84
85
86def which(cmd):
87    # Adapted from http://stackoverflow.com/a/377028
88    def is_exe(fpath):
89        return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
90
91    (fpath, _) = os.path.split(cmd)
92
93    if fpath:
94        if is_exe(cmd):
95            return cmd
96
97    else:
98        for path in os.environ["PATH"].split(os.pathsep):
99            path = path.strip('"')
100            exe_file = os.path.join(path, cmd)
101            if is_exe(exe_file):
102                return exe_file
103
104    return None
105
106
107def platform():
108    return pform.system()
109
110
111def getDefaultBtestEncoding():
112    if locale.getdefaultlocale()[1] is None:
113        return 'utf-8'
114
115    return locale.getpreferredencoding()
116
117
118def validate_version_requirement(required: str, present: str):
119    '''Helper function to validate that a `present` version is semantically newer or equal than a `required` version.'''
120    def extract_version(v: str):
121        '''Helper function to extract version components from a string.'''
122        try:
123            xyz = [int(x) for x in re.split(r'\.|-', v)]
124        except ValueError:
125            error("invalid version %s: versions must contain only numeric identifiers" % v)
126
127        return xyz
128
129    v_present = extract_version(present)
130    v_required = extract_version(required)
131
132    if v_present < v_required:
133        error("%s requires at least BTest %s, this is %s. Please upgrade." %
134              (Options.config, min_version, VERSION))
135
136
137# Get the value of the specified option in the specified section (or
138# section "btest" if not specified), or return the specified default value
139# if the option or section is not found.  The returned value has macros and
140# backticks from the config file expanded, but if the default value is returned
141# it will not be modified in any way.
142def getOption(key, default, section="btest"):
143    try:
144        value = Config.get(section, key)
145    except (configparser.NoSectionError, configparser.NoOptionError):
146        return default
147
148    return ExpandBackticks(value)
149
150
151reBackticks = re.compile(r"`(([^`]|\`)*)`")
152
153
154def readStateFile():
155    try:
156        # Read state file.
157        tests = []
158
159        for line in open(StateFile):
160            line = line.strip()
161            if not line or line.startswith("#"):
162                continue
163
164            tests += [line]
165
166        tests = findTests(tests)
167
168    except IOError:
169        return (False, [])
170
171    return (True, tests)
172
173
174# Expand backticks in a config option value and return the result.
175def ExpandBackticks(origvalue):
176    def _exec(m):
177        cmd = m.group(1)
178        if not cmd:
179            return ""
180
181        try:
182            pp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
183        except OSError as e:
184            error("cannot execute '%s': %s" % (cmd, e))
185
186        out = pp.communicate()[0]
187        out = out.decode()
188
189        return out.strip()
190
191    value = reBackticks.sub(_exec, origvalue)
192
193    return value
194
195
196# We monkey-patch the config parser to provide an alternative method that
197# expands backticks in option values and does not include defaults in
198# returned section items.
199def cpItemsNoDefaults(self, section):
200    # Get the keys from the specified section without anything from the
201    # default section (the values are raw, so we need to fetch the actual
202    # value below).
203    try:
204        items = self._sections[section].items()
205    except KeyError:
206        raise configparser.NoSectionError(section)
207
208    result = {}
209
210    for (key, rawvalue) in items:
211        # Python 2 includes a key of "__name__" that we don't want (Python 3
212        # doesn't include this)
213        if not key.startswith("__"):
214            # Expand macros such as %(testbase)s.
215            value = self.get(section, key)
216            # Expand backticks (if any) in the value.
217            result[key] = ExpandBackticks(value)
218
219    return result.items()
220
221
222# Replace environment variables in string.
223def replaceEnvs(s):
224    def replace_with_env(m):
225        try:
226            return os.environ[m.group(1)]
227        except KeyError:
228            return ""
229
230    return RE_ENV.sub(replace_with_env, s)
231
232
233# Execute one of test's command line *cmdline*. *measure_time* indicates if
234# timing measurement is desired. *kw_args* are further keyword arguments
235# interpreted the same way as with subprocess.check_call().
236# Returns a 3-tuple (success, rc, time) where the former two likewise
237# have the same meaning as with runSubprocess(), and 'time' is an integer
238# value corresponding to the commands execution time measured in some
239# appropiate integer measure. If 'time' is negative, that's an indicator
240# that time measurement wasn't possible and the value is to be ignored.
241def runTestCommandLine(cmdline, measure_time, **kwargs):
242    if measure_time and Timer:
243        return Timer.timeSubprocess(cmdline, **kwargs)
244    (success, rc) = runSubprocess(cmdline, **kwargs)
245    return (success, rc, -1)
246
247
248# Runs a subprocess. Takes same arguments as subprocess.check_call()
249# and returns a 2-tuple (success, rc) where *success* is a boolean
250# indicating if the command executed, and *rc* is its exit code if it did.
251def runSubprocess(*args, **kwargs):
252    def child(q):
253        try:
254            subprocess.check_call(*args, **kwargs)
255            success = True
256            rc = 0
257
258        except subprocess.CalledProcessError as e:
259            success = False
260            rc = e.returncode
261
262        except KeyboardInterrupt:
263            success = False
264            rc = 0
265
266        q.put([success, rc])
267
268    try:
269        q = multiprocessing.Queue()
270        p = multiprocessing.Process(target=child, args=(q, ))
271        p.start()
272        result = q.get()
273        p.join()
274
275    except KeyboardInterrupt:
276        # Bail out here directly as otherwise we'd get a bunch of errors.
277        # from all the childs.
278        os._exit(1)
279
280    return result
281
282
283def getcfgparser(defaults):
284    configparser.ConfigParser.itemsNoDefaults = cpItemsNoDefaults
285    cfg = configparser.ConfigParser(defaults)
286    return cfg
287
288
289# Description of an alternative configuration.
290class Alternative:
291    def __init__(self, name):
292        self.name = name
293        self.filters = {}
294        self.substitutions = {}
295        self.envs = {}
296
297
298# Exception class thrown to signal manager to abort processing.
299# The message passed to the constructor will be printed to the console.
300class Abort(Exception):
301    pass
302
303
304# Main class distributing the work across threads.
305class TestManager(multiprocessing.managers.SyncManager):
306    def __init__(self, *args, **kwargs):
307        super(TestManager, self).__init__(*args, **kwargs)
308
309        self._output_handler = None
310        self._lock = None
311        self._succeeded = None
312        self._failed = None
313        self._failed_expected = None
314        self._unstable = None
315        self._skipped = None
316        self._tests = None
317        self._failed_tests = None
318        self._num_tests = None
319        self._timing = None
320        self._ports = None
321
322    def run(self, tests, output_handler):
323        self.start()
324
325        output_handler.prepare(self)
326        self._output_handler = output_handler
327        self._lock = self.RLock()
328        self._succeeded = multiprocessing.sharedctypes.RawValue('i', 0)
329        self._failed = multiprocessing.sharedctypes.RawValue('i', 0)
330        self._failed_expected = multiprocessing.sharedctypes.RawValue('i', 0)
331        self._unstable = multiprocessing.sharedctypes.RawValue('i', 0)
332        self._skipped = multiprocessing.sharedctypes.RawValue('i', 0)
333        self._tests = self.list(tests)
334        self._failed_tests = self.list([])
335        self._num_tests = len(self._tests)
336        self._timing = self.loadTiming()
337
338        port_range = getOption("PortRange", "1024-65535")
339        port_range_lo = int(port_range.split("-")[0])
340        port_range_hi = int(port_range.split("-")[1])
341
342        if port_range_lo > port_range_hi:
343            error("invalid PortRange value: {0}".format(port_range))
344
345        max_test_ports = 0
346        test_with_most_ports = None
347
348        for t in self._tests:
349            if len(t.ports) > max_test_ports:
350                max_test_ports = len(t.ports)
351                test_with_most_ports = t
352
353        if max_test_ports > port_range_hi - port_range_lo + 1:
354            error("PortRange {0} cannot satisfy requirement of {1} ports in test {2}".format(
355                port_range, max_test_ports, test_with_most_ports.name))
356
357        self._ports = self.list([p for p in range(port_range_lo, port_range_hi + 1)])
358
359        threads = []
360
361        # With interactive input possibly required, we run tests
362        # directly. This avoids noisy output appearing from detached
363        # processes post-btest-exit when using CTRL-C during the input
364        # stage.
365        if Options.mode == "UPDATE_INTERACTIVE":
366            self.threadRun(0)
367        else:
368            try:
369                for i in range(Options.threads):
370                    t = multiprocessing.Process(name="#%d" % (i + 1),
371                                                target=lambda: self.threadRun(i))
372                    t.start()
373                    threads += [t]
374
375                for t in threads:
376                    t.join()
377
378            except KeyboardInterrupt:
379                for t in threads:
380                    t.terminate()
381                    t.join()
382
383        if Options.abort_on_failure and self._failed.value > 0 and self._failed.value > self._failed_expected.value:
384            # Signal abort. The child processes will already have
385            # finished because the join() above still ran.
386            raise Abort("Aborted after first failure.")
387
388        # Record failed tests if not updating.
389        if Options.mode != "UPDATE" and Options.mode != "UPDATE_INTERACTIVE":
390            try:
391                state = open(StateFile, "w")
392            except IOError:
393                error("cannot open state file %s" % StateFile)
394
395            for t in sorted(self._failed_tests):
396                print(t, file=state)
397
398            state.close()
399
400        return (self._succeeded.value, self._failed.value, self._skipped.value,
401                self._unstable.value, self._failed_expected.value)
402
403    def percentage(self):
404        if not self._num_tests:
405            return 0
406
407        count = self._succeeded.value + self._failed.value + self._skipped.value
408        return 100.0 * count / self._num_tests
409
410    def threadRun(self, thread_num):
411        signal.signal(signal.SIGINT, signal.SIG_IGN)
412
413        all_tests = []
414
415        while True:
416            tests = self.nextTests(thread_num)
417            if tests is None:
418                # No more work for us.
419                return
420
421            all_tests += tests
422
423            for t in tests:
424                t.run(self)
425                self.testReplayOutput(t)
426
427            if Options.update_times:
428                self.saveTiming(all_tests)
429
430    def rerun(self, test):
431        test.reruns += 1
432        self._tests += [test.clone(increment=False)]
433
434    def nextTests(self, thread_num):
435        with self._lock:
436            if Options.abort_on_failure and self._failed.value > 0 and self._failed.value > self._failed_expected.value:
437                # Don't hand out any more tests if we are to abort after
438                # first failure. Doing so will let all the processes terminate.
439                return None
440
441            for i in range(len(self._tests)):
442                t = self._tests[i]
443
444                if not t:
445                    continue
446
447                if t.serialize and hash(t.serialize) % Options.threads != thread_num:
448                    # Not ours.
449                    continue
450
451                # We'll execute it, delete from queue.
452                del self._tests[i]
453
454                if Options.alternatives:
455                    tests = []
456
457                    for alternative in Options.alternatives:
458
459                        if alternative in t.ignore_alternatives:
460                            continue
461
462                        if t.include_alternatives and alternative not in t.include_alternatives:
463                            continue
464
465                        alternative_test = copy.deepcopy(t)
466
467                        if alternative == "-":
468                            alternative = ""
469
470                        alternative_test.setAlternative(alternative)
471                        tests += [alternative_test]
472
473                else:
474                    if t.include_alternatives and "default" not in t.include_alternatives:
475                        tests = []
476
477                    elif "default" in t.ignore_alternatives:
478                        tests = []
479
480                    else:
481                        tests = [t]
482
483                return tests
484
485        # No more tests for us.
486        return None
487
488    def returnPorts(self, ports):
489        with self._lock:
490            for p in ports:
491                self._ports.append(p)
492
493    def getAvailablePorts(self, count):
494        with self._lock:
495
496            if count > len(self._ports):
497                return []
498
499            first_port = -1
500            rval = []
501
502            for _ in range(count):
503                while True:
504                    if len(self._ports) == 0:
505                        for s in rval:
506                            s.close()
507                            self._ports.append(s.getsockname()[1])
508                        return []
509
510                    next_port = self._ports[0]
511
512                    if next_port == first_port:
513                        # Looped over port pool once, bail out.
514                        for s in rval:
515                            s.close()
516                            self._ports.append(s.getsockname()[1])
517
518                        return []
519
520                    if first_port == -1:
521                        first_port = next_port
522
523                    del self._ports[0]
524
525                    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
526
527                    # Setting REUSEADDR would allow ports to be recycled
528                    # more quickly, but on macOS, seems to also have the
529                    # effect of allowing multiple sockets to bind to the
530                    # same port, even if REUSEPORT is off, so just try to
531                    # ensure both are off.
532                    if hasattr(socket, 'SO_REUSEADDR'):
533                        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0)
534                    if hasattr(socket, 'SO_REUSEPORT'):
535                        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 0)
536
537                    try:
538                        sock.bind(('', next_port))
539                    except:
540                        self._ports.append(next_port)
541                        continue
542                    else:
543                        break
544
545                rval.append(sock)
546
547            return rval
548
549    def lock(self):
550        return self._lock
551
552    def testStart(self, test):
553        with self._lock:
554            self._output_handler.testStart(test)
555
556    def testCommand(self, test, cmdline):
557        with self._lock:
558            self._output_handler.testCommand(test, cmdline)
559
560    def testProgress(self, test, msg):
561        with self._lock:
562            self._output_handler.testProgress(test, msg)
563
564    def testSucceeded(self, test):
565        test.parseProgress()
566
567        msg = "ok"
568
569        if test.known_failure:
570            msg += " (but expected to fail)"
571
572        msg += test.timePostfix()
573
574        with self._lock:
575            if test.reruns == 0:
576                self._succeeded.value += 1
577                self._output_handler.testSucceeded(test, msg)
578            else:
579                self._failed.value -= 1
580                if test.known_failure:
581                    self._failed_expected.value -= 1
582
583                self._unstable.value += 1
584                msg += " on retry #{0}, unstable".format(test.reruns)
585                self._output_handler.testUnstable(test, msg)
586
587            self._output_handler.testFinished(test, msg)
588
589    def testFailed(self, test):
590        test.parseProgress()
591
592        msg = "failed"
593
594        if test.reruns > 0:
595            msg += " on retry #{0}".format(test.reruns)
596
597        if test.known_failure:
598            msg += " (expected)"
599
600        msg += test.timePostfix()
601
602        with self._lock:
603            self._output_handler.testFailed(test, msg)
604            self._output_handler.testFinished(test, msg)
605
606            if test.reruns == 0:
607                self._failed.value += 1
608
609                if test.known_failure:
610                    self._failed_expected.value += 1
611                else:
612                    self._failed_tests += [test.name]
613
614            if test.reruns < Options.retries and not test.known_failure:
615                self.rerun(test)
616
617    def testSkipped(self, test):
618        msg = "not available, skipped"
619
620        with self._lock:
621            self._output_handler.testSkipped(test, msg)
622            self._skipped.value += 1
623
624    def testReplayOutput(self, test):
625        with self._lock:
626            self._output_handler.replayOutput(test)
627
628    def testTimingBaseline(self, test):
629        return self._timing.get(test.name, -1)
630
631    # Returns the name of the file to store the timing baseline in for this host.
632    def timingPath(self):
633        id = uuid.uuid3(uuid.NAMESPACE_DNS, str(uuid.getnode()))
634        return os.path.abspath(os.path.join(BaselineTimingDir, id.hex))
635
636    # Loads baseline timing information for this host if available. Returns
637    # empty directory if not.
638    def loadTiming(self):
639        timing = {}
640
641        with self._lock:
642            path = self.timingPath()
643
644            if not os.path.exists(path):
645                return {}
646
647            for line in open(path):
648                (k, v) = line.split()
649                timing[k] = float(v)
650
651        return timing
652
653    # Updates the timing baseline for the given tests on this host.
654    def saveTiming(self, tests):
655        with self._lock:
656            changed = False
657            timing = self.loadTiming()
658
659            for t in tests:
660                if t and t.measure_time and t.utime >= 0:
661                    changed = True
662                    timing[t.name] = t.utime
663
664            if not changed:
665                return
666
667            path = self.timingPath()
668            (dir, base) = os.path.split(path)
669            mkdir(dir)
670
671            out = open(path, "w")
672
673            for (k, v) in timing.items():
674                print("%s %u" % (k, v), file=out)
675
676            out.close()
677
678
679class CmdLine:
680    """A single command to invoke.
681
682    These commands can be provided by @TEST-{EXEC,REQUIRES} instructions, an
683    Initializer, Finalizer, or Teardown, or their part-specific equivalents.
684    """
685    def __init__(self, cmdline, expect_success, part, file):
686        self.cmdline = cmdline
687        self.expect_success = expect_success
688        self.part = part
689        self.file = file
690
691
692class CmdSeq:
693    """A sequence of commands, with potential subsequent teardown.
694
695    Tracking the teardown separately allows us to skip to it when commands
696    fail. Commands can be invidual CmdLines or CmdSeq instances. Test.run()
697    processes the latter recursively.
698    """
699    def __init__(self):
700        self.cmds = []  # CmdLine or CmdSeq instances
701        self.teardown = None
702
703
704# One test.
705class Test(object):
706    def __init__(self, file=None, directory=None):  # Allow dir to be directly defined
707
708        if file is not None: self.dir = os.path.abspath(os.path.dirname(file))
709        else: self.dir = directory
710
711        self.alternative = None
712        self.baselines = []
713        self.basename = None
714        self.bound_ports = []
715        self.cloned = False
716        self.cmdseqs = []
717        self.contents = []
718        self.copy_files = []
719        self.diag = None
720        self.diagmsgs = []
721        self.doc = []
722        self.files = []
723        self.groups = set()
724        self.ignore_alternatives = []
725        self.include_alternatives = []
726        self.known_failure = False
727        self.log = None
728        self.measure_time = False
729        self.mgr = None
730        self.monitor = None
731        self.monitor_quit = None
732        self.name = None
733        self.number = 1
734        self.part = -1
735        self.ports = set()
736        self.progress_lock = None
737        self.requires = []
738        self.reruns = 0
739        self.serialize = []
740        self.start = None
741        self.stdout = None
742        self.stderr = None
743        self.tmpdir = None
744        self.utime = -1
745        self.utime_base = -1
746        self.utime_exceeded = False
747        self.utime_perc = 0.0
748        self.verbose = None
749
750    def __lt__(self, value):
751        return self.name and value.name and self.name < value.name
752
753    def displayName(self):
754        name = self.name
755
756        if self.alternative:
757            name = "%s [%s]" % (name, self.alternative)
758
759        return name
760
761    def setAlternative(self, alternative):
762        self.alternative = alternative
763
764        # Parse the test's content.
765    def parse(self, content, file):
766        cmds = {}
767        for line in content:
768
769            m = RE_IGNORE.search(line)
770            if m:
771                # Ignore this file.
772                return False
773
774            for (tag, regexp, multiple, optional, group1, group2) in Commands:
775                m = regexp.search(line)
776
777                if m:
778                    value = None
779
780                    if group1 >= 0:
781                        value = m.group(group1)
782
783                    if group2 >= 0:
784                        value = (value, m.group(group2))
785
786                    if not multiple:
787                        if tag in cmds:
788                            error("%s: %s defined multiple times." % (file, tag))
789
790                        cmds[tag] = value
791
792                    else:
793                        try:
794                            cmds[tag] += [value]
795                        except KeyError:
796                            cmds[tag] = [value]
797
798        # Make sure all non-optional commands are there.
799        for (tag, regexp, multiple, optional, group1, group2) in Commands:
800            if not optional and tag not in cmds:
801                if tag == "exec":
802                    error("%s: mandatory keyword '@TEST-EXEC' or '@TEST-EXEC-FAIL' is missing." %
803                          file)
804                else:
805                    error("%s: mandatory %s command not found." % (file, tag))
806
807        basename = file
808
809        part = 1
810        m = RE_PART.match(file)
811
812        if m:
813            basename = m.group(1)
814            part = int(m.group(2))
815
816        name = os.path.relpath(basename, TestBase)
817        (name, ext) = os.path.splitext(name)
818
819        name = name.replace("/", ".")
820        while name.startswith("."):
821            name = name[1:]
822
823        self.name = name
824        self.part = part
825        self.basename = name
826        self.contents += [(file, content)]
827
828        seq = CmdSeq()
829
830        if PartInitializer:
831            seq.cmds.append(
832                CmdLine("%s %s" % (PartInitializer, self.name), True, part, "<PartInitializer>"))
833
834        for (cmd, success) in cmds["exec"]:
835            seq.cmds.append(CmdLine(cmd.strip(), success != "-FAIL", part, file))
836
837        if PartFinalizer:
838            seq.cmds.append(
839                CmdLine("%s %s" % (PartFinalizer, self.name), True, part, "<PartFinalizer>"))
840
841        if PartTeardown:
842            seq.teardown = CmdLine("%s %s" % (PartTeardown, self.name), True, part,
843                                   "<PartTeardown>")
844
845        self.cmdseqs.append(seq)
846
847        if "serialize" in cmds:
848            self.serialize = cmds["serialize"]
849
850        if "port" in cmds:
851            self.ports |= set(cmd.strip() for cmd in cmds['port'])
852
853        if "group" in cmds:
854            self.groups |= set(cmd.strip() for cmd in cmds["group"])
855
856        if "requires" in cmds:
857            for cmd in cmds["requires"]:
858                self.requires.append(CmdLine(cmd.strip(), True, part, file))
859
860        if "copy-file" in cmds:
861            self.copy_files += [cmd.strip() for cmd in cmds["copy-file"]]
862
863        if "alternative" in cmds:
864            self.include_alternatives = [cmd.strip() for cmd in cmds["alternative"]]
865
866        if "not-alternative" in cmds:
867            self.ignore_alternatives = [cmd.strip() for cmd in cmds["not-alternative"]]
868
869        if "known-failure" in cmds:
870            self.known_failure = True
871
872        if "measure-time" in cmds:
873            self.measure_time = True
874
875        if "doc" in cmds:
876            self.doc = cmds["doc"]
877
878        return True
879
880    # Copies all control information over to a new Test but replacing the test's
881    # content with a new one.
882    def clone(self, content=None, increment=True):
883        clone = Test("")
884        clone.number = self.number
885        clone.basename = self.basename
886        clone.name = self.basename
887
888        if increment:
889            clone.number = self.number + 1
890            clone.name = "%s-%d" % (self.basename, clone.number)
891
892        clone.requires = self.requires
893        clone.reruns = self.reruns
894        clone.serialize = self.serialize
895        clone.ports = self.ports
896        clone.groups = self.groups
897        clone.cmdseqs = self.cmdseqs
898        clone.known_failure = self.known_failure
899        clone.measure_time = self.measure_time
900        clone.doc = self.doc
901
902        if content:
903            assert len(self.contents) == 1
904            clone.contents = [(self.contents[0][0], content)]
905        else:
906            clone.contents = self.contents
907
908        clone.files = self.files
909        clone.dir = self.dir
910        self.cloned = True
911
912        return clone
913
914    def mergePart(self, part):
915
916        if self.cloned or part.cloned:
917            error("cannot use @TEST-START-NEXT with tests split across parts (%s)" % self.basename)
918
919        self.serialize += part.serialize
920        self.ports |= part.ports
921        self.groups |= part.groups
922        self.cmdseqs += part.cmdseqs
923        self.ignore_alternatives += part.ignore_alternatives
924        self.include_alternatives += part.include_alternatives
925        self.files += part.files
926        self.requires += part.requires
927        self.copy_files += part.copy_files
928        self.contents += part.contents
929        self.doc += part.doc
930        self.known_failure |= part.known_failure
931        self.measure_time |= part.measure_time
932
933    def getPorts(self, mgr, count):
934        if not count:
935            return []
936
937        attempts = 5
938
939        while True:
940            rval = mgr.getAvailablePorts(count)
941
942            if rval:
943                return rval
944
945            attempts -= 1
946
947            if attempts == 0:
948                error("failed to obtain {0} ports for test {1}".format(count, self.name))
949
950            warning("failed to obtain {0} ports for test {1}, will try {2} more times".format(
951                count, self.name, attempts))
952
953            time.sleep(15)
954
955    def run(self, mgr):
956        bound_sockets = self.getPorts(mgr, len(self.ports))
957        self.bound_ports = [s.getsockname()[1] for s in bound_sockets]
958
959        for bs in bound_sockets:
960            bs.close()
961
962        self.progress_lock = threading.Lock()
963        self.start = time.time()
964        self.mgr = mgr
965        mgr.testStart(self)
966
967        self.tmpdir = os.path.abspath(os.path.join(TmpDir, self.name))
968        self.diag = os.path.join(self.tmpdir, ".diag")
969        self.verbose = os.path.join(self.tmpdir, ".verbose")
970        self.baselines = [os.path.abspath(os.path.join(d, self.name)) for d in BaselineDirs]
971        self.diagmsgs = []
972        self.utime = -1
973        self.utime_base = self.mgr.testTimingBaseline(self)
974        self.utime_perc = 0.0
975        self.utime_exceeded = False
976
977        self.rmTmp()
978        mkdir(self.tmpdir)
979
980        for d in self.baselines:
981            mkdir(d)
982
983        for (fname, lines) in self.files:
984            fname = os.path.join(self.tmpdir, fname)
985
986            subdir = os.path.dirname(fname)
987
988            if subdir != "":
989                mkdir(subdir)
990            try:
991                ffile = open(fname, "w")
992            except IOError as e:
993                error("cannot write test's additional file '%s'" % fname)
994
995            for line in lines:
996                ffile.write(line)
997
998            ffile.close()
999
1000        for file in self.copy_files:
1001            src = replaceEnvs(file)
1002            try:
1003                shutil.copy2(src, self.tmpdir)
1004            except IOError as e:
1005                error("cannot copy %s: %s" % (src, e))
1006
1007        for (file, content) in self.contents:
1008            localfile = os.path.join(self.tmpdir, os.path.basename(file))
1009            out = io.open(localfile, "w", encoding=getDefaultBtestEncoding())
1010
1011            try:
1012                for line in content:
1013                    out.write(line)
1014            except UnicodeEncodeError as e:
1015                error("unicode encode error in file %s: %s" % (localfile, e))
1016
1017            out.close()
1018
1019        self.log = open(os.path.join(self.tmpdir, ".log"), "w")
1020        self.stdout = open(os.path.join(self.tmpdir, ".stdout"), "w")
1021        self.stderr = open(os.path.join(self.tmpdir, ".stderr"), "w")
1022
1023        for cmd in self.requires:
1024            (success, rc) = self.execute(cmd, apply_alternative=self.alternative)
1025
1026            if not success:
1027                self.mgr.testSkipped(self)
1028                if not Options.tmps:
1029                    self.rmTmp()
1030                self.finish()
1031                return
1032
1033        # Spawn thread that monitors for progress updates.
1034        # Note: We do indeed spawn a thread here, not a process, so
1035        # that the callback can modify the test object to maintain
1036        # state.
1037        def monitor_cb():
1038            while not self.monitor_quit.is_set():
1039                self.parseProgress()
1040                time.sleep(0.1)
1041
1042        self.monitor = threading.Thread(target=monitor_cb)
1043        self.monitor_quit = threading.Event()
1044        self.monitor.start()
1045
1046        # Run test's commands. First, construct a series of command sequences:
1047        # each sequence consists of test commands with an optional teardown that
1048        # always runs, regardless of prior test failures.
1049
1050        seq = CmdSeq()
1051
1052        if Initializer:
1053            seq.cmds.append(CmdLine("%s %s" % (Initializer, self.name), True, 1, "<Initializer>"))
1054
1055        seq.cmds += self.cmdseqs
1056
1057        if Finalizer:
1058            seq.cmds.append(CmdLine("%s %s" % (Finalizer, self.name), True, 1, "<Finalizer>"))
1059
1060        if Teardown:
1061            seq.teardown = CmdLine("%s %s" % (Teardown, self.name), True, 1, "<Teardown>")
1062
1063        failures = 0
1064        rc = 0
1065
1066        # Executes the provided Cmdseq command sequence. Helper function, so we
1067        # can recurse when a Cmdseq's command list includes other sequences.
1068        def run_cmdseq(seq):
1069            nonlocal failures, rc
1070            need_teardown = False
1071
1072            # Run commands only when successful so far, if the most recent
1073            # command asked to continue despite error (code 100), or in Sphinx
1074            # mode.
1075            if failures == 0 or rc == 100 or Options.sphinx:
1076                skip_part = -1
1077
1078                for cmd in seq.cmds:
1079                    # If the next command is a CmdSeq, process it recursively
1080                    # first. This processes teardowns for those sequences as
1081                    # needed, and skips them when nothing was actually run in a
1082                    # CmdSeq.
1083                    if isinstance(cmd, CmdSeq):
1084                        need_teardown |= run_cmdseq(cmd)
1085                        continue
1086
1087                    if skip_part >= 0 and skip_part == cmd.part:
1088                        continue
1089
1090                    (success, rc) = self.execute(cmd, apply_alternative=self.alternative)
1091                    need_teardown = True
1092
1093                    if not success:
1094                        failures += 1
1095
1096                        if Options.sphinx:
1097                            # We still execute the remaining commands and
1098                            # raise a failure for each one that fails.
1099                            self.mgr.testFailed(self)
1100                            skip_part = cmd.part
1101                            continue
1102
1103                        if failures == 1:
1104                            self.mgr.testFailed(self)
1105
1106                        if rc != 100:
1107                            break
1108
1109            if need_teardown and seq.teardown:
1110                (success, teardown_rc) = self.execute(seq.teardown,
1111                                                      apply_alternative=self.alternative,
1112                                                      addl_envs={
1113                                                          'TEST_FAILED': int(failures > 0),
1114                                                          'TEST_LAST_RETCODE': rc
1115                                                      })
1116
1117                # A teardown can fail an otherwise successful test run, with the
1118                # same special-casing of return codes 100 and 200. When failing
1119                # on top of an already failing run, the return code will
1120                # override the previous one. If a failing teardown wants to
1121                # preserve the run's existing failing error code, it has access
1122                # to it via the TEST_LAST_RETCODE environment variable.
1123                if not success:
1124                    rc = teardown_rc
1125                    failures += 1
1126
1127                    if Options.sphinx or failures == 1:
1128                        self.mgr.testFailed(self)
1129
1130            return need_teardown
1131
1132        run_cmdseq(seq)
1133
1134        # Return code 200 aborts further processing, now that any teardowns have
1135        # run. btest-diff uses this code when we run with --update-interactive
1136        # and the user aborts the run.
1137        if rc == 200:
1138            # Abort all tests.
1139            self.monitor_quit.set()
1140            # Flush remaining command output prior to exit:
1141            mgr.testReplayOutput(self)
1142            sys.exit(1)
1143
1144        self.utime_perc = 0.0
1145        self.utime_exceeded = False
1146
1147        if failures == 0:
1148            # If we don't have a timing baseline, we silently ignore that so that
1149            # on systems that can't measure execution time, the test will just pass.
1150            if self.utime_base >= 0 and self.utime >= 0:
1151                delta = getOption("TimingDeltaPerc", "1.0")
1152                self.utime_perc = (100.0 * (self.utime - self.utime_base) / self.utime_base)
1153                self.utime_exceeded = (abs(self.utime_perc) > float(delta))
1154
1155            if self.utime_exceeded and not Options.update_times:
1156                self.diagmsgs += [
1157                    "'%s' exceeded permitted execution time deviation%s" %
1158                    (self.name, self.timePostfix())
1159                ]
1160                self.mgr.testFailed(self)
1161
1162            else:
1163                self.mgr.testSucceeded(self)
1164
1165            if not Options.tmps and self.reruns == 0:
1166                self.rmTmp()
1167
1168        self.finish()
1169
1170    def finish(self):
1171        if self.bound_ports:
1172            self.mgr.returnPorts([p for p in self.bound_ports])
1173
1174        self.bound_ports = []
1175
1176        for d in self.baselines:
1177            try:
1178                # Try removing the baseline directory. If it works, it's empty, i.e., no baseline was created.
1179                os.rmdir(d)
1180            except OSError:
1181                pass
1182
1183        self.log.close()
1184        self.stdout.close()
1185        self.stderr.close()
1186
1187        if self.monitor:
1188            self.monitor_quit.set()
1189            self.monitor.join()
1190
1191    def execute(self, cmd, apply_alternative=None, addl_envs=None):
1192        filter_cmd = None
1193        cmdline = cmd.cmdline
1194        env = {}
1195
1196        # Apply alternative if requested.
1197        if apply_alternative:
1198
1199            alt = Alternatives[apply_alternative]
1200
1201            try:
1202                (path, executable) = os.path.split(cmdline.split()[0])
1203                filter_cmd = alt.filters[executable]
1204            except LookupError:
1205                pass
1206
1207            for (key, val) in alt.substitutions.items():
1208                cmdline = re.sub("\\b" + re.escape(key) + "\\b", val, cmdline)
1209
1210            env = alt.envs
1211
1212        localfile = os.path.join(self.tmpdir, os.path.basename(cmd.file))
1213
1214        if filter_cmd and cmd.expect_success:  # Do not apply filter if we expect failure.
1215            # This is not quite correct as it does not necessarily need to be
1216            # the %INPUT file which we are filtering ...
1217            filtered = os.path.join(self.tmpdir, "filtered-%s" % os.path.basename(localfile))
1218
1219            filter = CmdLine("%s %s %s" % (filter_cmd, localfile, filtered), True, 1, "<Filter>")
1220
1221            (success, rc) = self.execute(filter, apply_alternative=None)
1222            if not success:
1223                return (False, rc)
1224
1225            mv = CmdLine("mv %s %s" % (filtered, localfile), True, 1, "<Filter-Move>")
1226            (success, rc) = self.execute(mv, apply_alternative=None)
1227
1228            if not success:
1229                return (False, rc)
1230
1231        self.mgr.testCommand(self, cmd)
1232
1233        # Replace special names.
1234
1235        if localfile:
1236            cmdline = RE_INPUT.sub(localfile, cmdline)
1237
1238        cmdline = RE_DIR.sub(self.dir, cmdline)
1239
1240        print("%s (expect %s)" % (cmdline, ("failure", "success")[cmd.expect_success]),
1241              file=self.log)
1242
1243        # Additional environment variables provided by the caller override any
1244        # existing ones, but are generally not assumed to collide:
1245        if addl_envs:
1246            env.update(addl_envs)
1247
1248        env = self.prepareEnv(cmd, env)
1249        measure_time = self.measure_time and (Options.update_times or self.utime_base >= 0)
1250
1251        (success, rc, utime) = runTestCommandLine(cmdline,
1252                                                  measure_time,
1253                                                  cwd=self.tmpdir,
1254                                                  shell=True,
1255                                                  env=env,
1256                                                  stderr=self.stderr,
1257                                                  stdout=self.stdout)
1258
1259        if utime > 0:
1260            self.utime += utime
1261
1262        if success:
1263            if cmd.expect_success:
1264                return (True, rc)
1265
1266            self.diagmsgs += ["'%s' succeeded unexpectedly (exit code 0)" % cmdline]
1267            return (False, 0)
1268
1269        else:
1270            if not cmd.expect_success:
1271                return (True, rc)
1272
1273            self.diagmsgs += ["'%s' failed unexpectedly (exit code %s)" % (cmdline, rc)]
1274            return (False, rc)
1275
1276    def rmTmp(self):
1277        try:
1278            if os.path.isfile(self.tmpdir):
1279                os.remove(self.tmpdir)
1280
1281            if os.path.isdir(self.tmpdir):
1282                subprocess.call("rm -rf %s 2>/dev/null" % self.tmpdir, shell=True)
1283
1284        except OSError as e:
1285            error("cannot remove tmp directory %s: %s" % (self.tmpdir, e))
1286
1287    # Prepares the environment for the child processes.
1288    def prepareEnv(self, cmd, addl={}):
1289        env = copy.deepcopy(os.environ)
1290
1291        env["TEST_BASELINE"] = ":".join(self.baselines)
1292        env["TEST_DIAGNOSTICS"] = self.diag
1293        env["TEST_MODE"] = Options.mode.upper()
1294        env["TEST_NAME"] = self.name
1295        env["TEST_VERBOSE"] = self.verbose
1296        env["TEST_PART"] = str(cmd.part)
1297        env["TEST_BASE"] = TestBase
1298
1299        for (key, val) in addl.items():
1300            # Convert val to string since otherwise os.environ (and our clone)
1301            # trigger a TypeError upon insertion, and the caller may be unaware.
1302            env[key.upper()] = str(val)
1303
1304        for idx, key in enumerate(sorted(self.ports)):
1305            env[key] = str(self.bound_ports[idx]) + "/tcp"
1306
1307        return env
1308
1309    def addFiles(self, files):
1310        # files is a list of tuple (fname, lines).
1311        self.files = files
1312
1313    # If timing information is requested and available returns a
1314    # string that summarizes the time spent for the test.
1315    # Otherwise, returns an empty string.
1316    def timePostfix(self):
1317        if self.utime_base >= 0 and self.utime >= 0:
1318            return " (%+.1f%%)" % self.utime_perc
1319        else:
1320            return ""
1321
1322    # Picks up any progress output that has a test has written out.
1323    def parseProgress(self):
1324        with self.progress_lock:
1325            path = os.path.join(self.tmpdir, ".progress.*")
1326            for file in sorted(glob.glob(path)):
1327                try:
1328                    for line in open(file):
1329                        msg = line.strip()
1330                        self.mgr.testProgress(self, msg)
1331
1332                    os.unlink(file)
1333                except (IOError, OSError):
1334                    pass
1335
1336
1337### Output handlers.
1338
1339
1340class OutputHandler:
1341    def __init__(self, options):
1342        """Base class for reporting progress and results to user. We derive
1343        several classes from this one, with the one being used depending on
1344        which output the users wants.
1345
1346        A handler's method are called from test TestMgr and may be called
1347        interleaved from different tests. However, the TestMgr locks before
1348        each call so that it's guaranteed that two calls don't run
1349        concurrently.
1350
1351        options: An optparser with the global options.
1352        """
1353        self._buffered_output = {}
1354        self._options = options
1355
1356    def prepare(self, mgr):
1357        """The TestManager calls this with itself as an argument just before
1358        it starts running tests."""
1359        pass
1360
1361    def options(self):
1362        """Returns the current optparser instance."""
1363        return self._options
1364
1365    def threadPrefix(self):
1366        """With multiple threads, returns a string with the thread's name in
1367        a form suitable to prefix output with. With a single thread, returns
1368        the empty string."""
1369        if self.options().threads > 1:
1370            return "[%s]" % multiprocessing.current_process().name
1371        else:
1372            return ""
1373
1374    def _output(self, msg, nl=True, file=None):
1375        if not file:
1376            file = sys.stderr
1377
1378        if nl:
1379            print(msg, file=file)
1380        else:
1381            if msg:
1382                print(msg, end=" ", file=file)
1383
1384    def output(self, test, msg, nl=True, file=None):
1385        """Output one line of output to user. Unless we're just using a single
1386        thread, this will be buffered until the test has finished;
1387        then all output is printed as a block.
1388
1389        This should only be called from other members of this class, or
1390        derived classes, not from tests.
1391        """
1392        if self.options().threads < 2:
1393            self._output(msg, nl, file)
1394            return
1395
1396        try:
1397            self._buffered_output[test.name] += [(msg, nl, file)]
1398        except KeyError:
1399            self._buffered_output[test.name] = [(msg, nl, file)]
1400
1401    def replayOutput(self, test):
1402        """Prints out all output buffered in threaded mode by output()."""
1403        if test.name not in self._buffered_output:
1404            return
1405
1406        for (msg, nl, file) in self._buffered_output[test.name]:
1407            self._output(msg, nl, file)
1408
1409        self._buffered_output[test.name] = []
1410
1411    # Methods to override.
1412    def testStart(self, test):
1413        """Called just before a test begins."""
1414
1415    def testCommand(self, test, cmdline):
1416        """Called just before a command line is exected for a trace."""
1417
1418    def testProgress(self, test, msg):
1419        """Called when a test signals having made progress."""
1420
1421    def testSucceeded(self, test, msg):
1422        """Called when a test was successful."""
1423
1424    def testFailed(self, test, msg):
1425        """Called when a test failed."""
1426
1427    def testSkipped(self, test, msg):
1428        """Called when a test is skipped because its dependencies aren't met."""
1429
1430    def testFinished(self, test, msg):
1431        """
1432        Called just after a test has finished being processed, independent of
1433        success or failure. Not called for skipped tests.
1434        """
1435
1436    def testUnstable(self, test, msg):
1437        """Called when a test failed initially but succeeded in a retry."""
1438
1439    def finished(self):
1440        """Called when all tests have been executed."""
1441
1442
1443class Forwarder(OutputHandler):
1444    """
1445    Forwards output to several other handlers.
1446
1447    options: An optparser with the global options.
1448
1449    handlers: List of output handlers to forward to.
1450    """
1451    def __init__(self, options, handlers):
1452        OutputHandler.__init__(self, options)
1453        self._handlers = handlers
1454
1455    def prepare(self, mgr):
1456        """Called just before test manager starts running tests."""
1457        for h in self._handlers:
1458            h.prepare(mgr)
1459
1460    def testStart(self, test):
1461        """Called just before a test begins."""
1462        for h in self._handlers:
1463            h.testStart(test)
1464
1465    def testCommand(self, test, cmdline):
1466        """Called just before a command line is exected for a trace."""
1467        for h in self._handlers:
1468            h.testCommand(test, cmdline)
1469
1470    def testProgress(self, test, msg):
1471        """Called when a test signals having made progress."""
1472        for h in self._handlers:
1473            h.testProgress(test, msg)
1474
1475    def testSucceeded(self, test, msg):
1476        """Called when a test was successful."""
1477        for h in self._handlers:
1478            h.testSucceeded(test, msg)
1479
1480    def testFailed(self, test, msg):
1481        """Called when a test failed."""
1482        for h in self._handlers:
1483            h.testFailed(test, msg)
1484
1485    def testSkipped(self, test, msg):
1486        for h in self._handlers:
1487            h.testSkipped(test, msg)
1488
1489    def testFinished(self, test, msg):
1490        for h in self._handlers:
1491            h.testFinished(test, msg)
1492
1493    def testUnstable(self, test, msg):
1494        """Called when a test failed initially but succeeded in a retry."""
1495        for h in self._handlers:
1496            h.testUnstable(test, msg)
1497
1498    def replayOutput(self, test):
1499        for h in self._handlers:
1500            h.replayOutput(test)
1501
1502    def finished(self):
1503        for h in self._handlers:
1504            h.finished()
1505
1506
1507class Standard(OutputHandler):
1508    def testStart(self, test):
1509        self.output(test, self.threadPrefix(), nl=False)
1510        self.output(test, "%s ..." % test.displayName(), nl=False)
1511        test._std_nl = False
1512
1513    def testCommand(self, test, cmdline):
1514        pass
1515
1516    def testProgress(self, test, msg):
1517        """Called when a test signals having made progress."""
1518        if not test._std_nl:
1519            self.output(test, "")
1520
1521        self.output(test, "  - " + msg)
1522        test._std_nl = True
1523
1524    def testSucceeded(self, test, msg):
1525        sys.stdout.flush()
1526        self.finalMsg(test, msg)
1527
1528    def testFailed(self, test, msg):
1529        self.finalMsg(test, msg)
1530
1531    def testSkipped(self, test, msg):
1532        self.finalMsg(test, msg)
1533
1534    def finalMsg(self, test, msg):
1535        if test._std_nl:
1536            self.output(test, self.threadPrefix(), nl=False)
1537            self.output(test, "%s ..." % test.displayName(), nl=False)
1538
1539        self.output(test, msg)
1540
1541    def testUnstable(self, test, msg):
1542        self.finalMsg(test, msg)
1543
1544
1545class Console(OutputHandler):
1546    """
1547    Output handler that writes colorful progress report to the console.
1548
1549    This handler works well in settings that can handle coloring but not
1550    cursor placement commands (for example because moving to the beginning of
1551    the line overwrites other surrounding output); it's what the
1552    ``--show-all`` output uses. In contrast, the *CompactConsole* handler uses
1553    cursor placement in addition for a more space-efficient output.
1554    """
1555    Green = "\033[32m"
1556    Red = "\033[31m"
1557    Yellow = "\033[33m"
1558    Gray = "\033[37m"
1559    DarkGray = "\033[1;30m"
1560    Normal = "\033[0m"
1561
1562    def __init__(self, options):
1563        OutputHandler.__init__(self, options)
1564        self.show_all = True
1565
1566    def testStart(self, test):
1567        msg = "[%3d%%] %s ..." % (test.mgr.percentage(), test.displayName())
1568        self._consoleOutput(test, msg, False)
1569
1570    def testProgress(self, test, msg):
1571        """Called when a test signals having made progress."""
1572        msg = self.DarkGray + "(%s)" % msg + self.Normal
1573        self._consoleOutput(test, msg, True)
1574
1575    def testSucceeded(self, test, msg):
1576        if test.known_failure:
1577            msg = self.Yellow + msg + self.Normal
1578        else:
1579            msg = self.Green + msg + self.Normal
1580
1581        self._consoleOutput(test, msg, self.show_all)
1582
1583    def testFailed(self, test, msg):
1584        if test.known_failure:
1585            msg = self.Yellow + msg + self.Normal
1586        else:
1587            msg = self.Red + msg + self.Normal
1588
1589        self._consoleOutput(test, msg, True)
1590
1591    def testUnstable(self, test, msg):
1592        msg = self.Yellow + msg + self.Normal
1593        self._consoleOutput(test, msg, True)
1594
1595    def testSkipped(self, test, msg):
1596        msg = self.Gray + msg + self.Normal
1597        self._consoleOutput(test, msg, self.show_all)
1598
1599    def finished(self):
1600        sys.stdout.flush()
1601
1602    def _consoleOutput(self, test, msg, sticky):
1603        self._consoleWrite(test, msg, sticky)
1604
1605    def _consoleWrite(self, test, msg, sticky):
1606        sys.stdout.write(msg.strip() + " ")
1607
1608        if sticky:
1609            sys.stdout.write("\n")
1610
1611        sys.stdout.flush()
1612
1613
1614class CompactConsole(Console):
1615    """
1616    Output handler that writes compact, colorful progress report to
1617    the console while also keeping the output compact by keeping
1618    output only for failing tests.
1619
1620    This handler adds cursor mods and navigation to the coloring provided by
1621    the Console class and hence needs settings that can handle both.
1622    """
1623    CursorOff = "\033[?25l"
1624    CursorOn = "\033[?25h"
1625    EraseToEndOfLine = "\033[2K"
1626
1627    def __init__(self, options):
1628        Console.__init__(self, options)
1629        self.show_all = False
1630
1631        def cleanup():
1632            sys.stdout.write(self.CursorOn)
1633
1634        atexit.register(cleanup)
1635
1636    def testStart(self, test):
1637        test.console_last_line = None
1638        self._consoleOutput(test, "", False)
1639        sys.stdout.write(self.CursorOff)
1640
1641    def testProgress(self, test, msg):
1642        """Called when a test signals having made progress."""
1643        msg = " " + self.DarkGray + "(%s)" % msg + self.Normal
1644        self._consoleAugment(test, msg)
1645
1646    def testFinished(self, test, msg):
1647        test.console_last_line = None
1648
1649    def finished(self):
1650        sys.stdout.write(self.EraseToEndOfLine)
1651        sys.stdout.write("\r")
1652        sys.stdout.write(self.CursorOn)
1653        sys.stdout.flush()
1654
1655    def _consoleOutput(self, test, msg, sticky):
1656        line = "[%3d%%] %s ..." % (test.mgr.percentage(), test.displayName())
1657
1658        if msg:
1659            line += " " + msg
1660
1661        test.console_last_line = line
1662        self._consoleWrite(test, line, sticky)
1663
1664    def _consoleAugment(self, test, msg):
1665        sys.stdout.write(self.EraseToEndOfLine)
1666        sys.stdout.write(" %s" % msg.strip())
1667        sys.stdout.write("\r%s" % test.console_last_line)
1668        sys.stdout.flush()
1669
1670    def _consoleWrite(self, test, msg, sticky):
1671        sys.stdout.write(chr(27) + '[2K')
1672        sys.stdout.write("\r%s" % msg.strip())
1673
1674        if sticky:
1675            sys.stdout.write("\n")
1676            test.console_last_line = None
1677
1678        sys.stdout.flush()
1679
1680
1681class Brief(OutputHandler):
1682    """Output handler for producing the brief output format."""
1683    def testStart(self, test):
1684        pass
1685
1686    def testCommand(self, test, cmdline):
1687        pass
1688
1689    def testProgress(self, test, msg):
1690        """Called when a test signals having made progress."""
1691        pass
1692
1693    def testSucceeded(self, test, msg):
1694        pass
1695
1696    def testFailed(self, test, msg):
1697        self.output(test, self.threadPrefix(), nl=False)
1698        self.output(test, "%s ... %s" % (test.displayName(), msg))
1699
1700    def testUnstable(self, test, msg):
1701        self.output(test, self.threadPrefix(), nl=False)
1702        self.output(test, "%s ... %s" % (test.displayName(), msg))
1703
1704    def testSkipped(self, test, msg):
1705        pass
1706
1707
1708class Verbose(OutputHandler):
1709    """Output handler for producing the verbose output format."""
1710    def testStart(self, test):
1711        self.output(test, self.threadPrefix(), nl=False)
1712        self.output(test, "%s ..." % test.displayName())
1713
1714    def testCommand(self, test, cmdline):
1715        part = ""
1716
1717        if cmdline.part > 1:
1718            part = " [part #%d]" % cmdline.part
1719
1720        self.output(test, self.threadPrefix(), nl=False)
1721        self.output(test, "  > %s%s" % (cmdline.cmdline, part))
1722
1723    def testProgress(self, test, msg):
1724        """Called when a test signals having made progress."""
1725        self.output(test, "  - " + msg)
1726
1727    def testSucceeded(self, test, msg):
1728        self.output(test, self.threadPrefix(), nl=False)
1729        self.showTestVerbose(test)
1730        self.output(test, "... %s %s" % (test.displayName(), msg))
1731
1732    def testFailed(self, test, msg):
1733        self.output(test, self.threadPrefix(), nl=False)
1734        self.showTestVerbose(test)
1735        self.output(test, "... %s %s" % (test.displayName(), msg))
1736
1737    def testUnstable(self, test, msg):
1738        self.output(test, self.threadPrefix(), nl=False)
1739        self.showTestVerbose(test)
1740        self.output(test, "... %s %s" % (test.displayName(), msg))
1741
1742    def testSkipped(self, test, msg):
1743        self.output(test, self.threadPrefix(), nl=False)
1744        self.showTestVerbose(test)
1745        self.output(test, "... %s %s" % (test.displayName(), msg))
1746
1747    def showTestVerbose(self, test):
1748        if not os.path.exists(test.verbose):
1749            return
1750
1751        for line in open(test.verbose):
1752            self.output(test, "  > [test-verbose] %s" % line.strip())
1753
1754
1755class Diag(OutputHandler):
1756    def __init__(self, options, all=False, file=None):
1757        """Output handler for producing the diagnostic output format.
1758
1759        options: An optparser with the global options.
1760
1761        all: Print diagnostics also for succeeding tests.
1762
1763        file: Output into given file rather than console.
1764        """
1765        OutputHandler.__init__(self, options)
1766        self._all = all
1767        self._file = file
1768
1769    def showDiag(self, test):
1770        """Generates diagnostics for a test."""
1771        for line in test.diagmsgs:
1772            self.output(test, "  % " + line, True, self._file)
1773
1774        for f in (test.diag, os.path.join(test.tmpdir, ".stderr")):
1775            if not f:
1776                continue
1777
1778            if os.path.isfile(f):
1779                self.output(test, "  % cat " + os.path.basename(f), True, self._file)
1780                for line in open(f):
1781                    self.output(test, "  " + line.rstrip(), True, self._file)
1782                self.output(test, "", True, self._file)
1783
1784        if self.options().wait and not self._file:
1785            self.output(test, "<Enter> ...")
1786            try:
1787                sys.stdin.readline()
1788            except KeyboardInterrupt:
1789                sys.exit(1)
1790
1791    def testCommand(self, test, cmdline):
1792        pass
1793
1794    def testSucceeded(self, test, msg):
1795        if self._all:
1796            if self._file:
1797                self.output(test, "%s ... %s" % (test.displayName(), msg), True, self._file)
1798
1799            self.showDiag(test)
1800
1801    def testFailed(self, test, msg):
1802        if self._file:
1803            self.output(test, "%s ... %s" % (test.displayName(), msg), True, self._file)
1804
1805        if (not test.known_failure) or self._all:
1806            self.showDiag(test)
1807
1808    def testUnstable(self, test, msg):
1809        if self._file:
1810            self.output(test, "%s ... %s" % (test.displayName(), msg), True, self._file)
1811
1812    def testSkipped(self, test, msg):
1813        if self._file:
1814            self.output(test, "%s ... %s" % (test.displayName(), msg), True, self._file)
1815
1816
1817class SphinxOutput(OutputHandler):
1818    def __init__(self, options, all=False, file=None):
1819        """Output handler for producing output when running from
1820        Sphinx. The main point here is that we save all diagnostic output to
1821        $BTEST_RST_OUTPUT.
1822
1823        options: An optparser with the global options.
1824        """
1825        OutputHandler.__init__(self, options)
1826
1827        self._output = None
1828
1829        try:
1830            self._rst_output = os.environ["BTEST_RST_OUTPUT"]
1831        except KeyError:
1832            print("warning: environment variable BTEST_RST_OUTPUT not set, will not produce output",
1833                  file=sys.stderr)
1834            self._rst_output = None
1835
1836    def testStart(self, test):
1837        self._output = None
1838
1839    def testCommand(self, test, cmdline):
1840        if not self._rst_output:
1841            return
1842
1843        self._output = "%s#%s" % (self._rst_output, cmdline.part)
1844        self._part = cmdline.part
1845
1846    def testSucceeded(self, test, msg):
1847        pass
1848
1849    def testFailed(self, test, msg):
1850        if not self._output:
1851            return
1852
1853        out = open(self._output, "a")
1854
1855        print("\n.. code-block:: none ", file=out)
1856        print("\n  ERROR executing test '%s' (part %s)\n" % (test.displayName(), self._part),
1857              file=out)
1858
1859        for line in test.diagmsgs:
1860            print("  % " + line, file=out)
1861
1862        test.diagmsgs = []
1863
1864        for f in (test.diag, os.path.join(test.tmpdir, ".stderr")):
1865            if not f:
1866                continue
1867
1868            if os.path.isfile(f):
1869                print("  % cat " + os.path.basename(f), file=out)
1870                for line in open(f):
1871                    print("   %s" % line.strip(), file=out)
1872                print(file=out)
1873
1874    def testUnstable(self, test, msg):
1875        pass
1876
1877    def testSkipped(self, test, msg):
1878        pass
1879
1880
1881class XMLReport(OutputHandler):
1882
1883    RESULT_PASS = "pass"
1884    RESULT_FAIL = "failure"
1885    RESULT_SKIP = "skipped"
1886    RESULT_UNSTABLE = "unstable"
1887
1888    def __init__(self, options, xmlfile):
1889        """Output handler for producing an XML report of test results.
1890
1891        options: An optparser with the global options.
1892
1893        file: Output into given file
1894        """
1895        OutputHandler.__init__(self, options)
1896        self._file = xmlfile
1897        self._start = time.time()
1898        self._timestamp = datetime.now().isoformat()
1899
1900    def prepare(self, mgr):
1901        self._results = mgr.list([])
1902
1903    def testStart(self, test):
1904        pass
1905
1906    def testCommand(self, test, cmdline):
1907        pass
1908
1909    def makeTestCaseElement(self, doc, testsuite, name, duration):
1910        parts = name.split('.')
1911        if len(parts) > 1:
1912            classname = ".".join(parts[:-1])
1913            name = parts[-1]
1914        else:
1915            classname = parts[0]
1916            name = parts[0]
1917
1918        e = doc.createElement("testcase")
1919        e.setAttribute("classname", classname)
1920        e.setAttribute("name", name)
1921        e.setAttribute("time", str(duration))
1922        testsuite.appendChild(e)
1923
1924        return e
1925
1926    def getContext(self, test, context_file):
1927        context = ""
1928        for line in test.diagmsgs:
1929            context += "  % " + line + "\n"
1930
1931        for f in (test.diag, os.path.join(test.tmpdir, context_file)):
1932            if not f:
1933                continue
1934
1935            if os.path.isfile(f):
1936                context += "  % cat " + os.path.basename(f) + "\n"
1937                for line in open(f):
1938                    context += "  " + line.strip() + "\n"
1939
1940        return context
1941
1942    def addTestResult(self, test, status):
1943        context = ""
1944
1945        if status != self.RESULT_PASS:
1946            context = self.getContext(test, ".stderr")
1947
1948        res = {
1949            "name": test.displayName(),
1950            "status": status,
1951            "context": context,
1952            "duration": time.time() - test.start,
1953        }
1954
1955        self._results.append(res)
1956
1957    def testSucceeded(self, test, msg):
1958        self.addTestResult(test, self.RESULT_PASS)
1959
1960    def testFailed(self, test, msg):
1961        self.addTestResult(test, self.RESULT_FAIL)
1962
1963    def testUnstable(self, test, msg):
1964        self.addTestResult(test, self.RESULT_UNSTABLE)
1965
1966    def testSkipped(self, test, msg):
1967        self.addTestResult(test, self.RESULT_SKIP)
1968
1969    def finished(self):
1970        num_tests = 0
1971        num_failures = 0
1972        doc = xml.dom.minidom.Document()
1973        testsuite = doc.createElement("testsuite")
1974        doc.appendChild(testsuite)
1975
1976        for res in self._results:
1977            test_case = self.makeTestCaseElement(doc, testsuite, res["name"], res["duration"])
1978
1979            if res["status"] != self.RESULT_PASS:
1980                e = doc.createElement(res["status"])
1981                e.setAttribute("type", res["status"])
1982                text_node = doc.createTextNode(res["context"])
1983                e.appendChild(text_node)
1984                test_case.appendChild(e)
1985
1986                if res["status"] == self.RESULT_FAIL:
1987                    num_failures += 1
1988
1989            num_tests += 1
1990
1991        # These attributes are set in sorted order so that resulting XML output
1992        # is stable across Python versions.  Before Python 3.8, attributes
1993        # appear in sorted order.  After Python 3.8, attributes appear in
1994        # order specified by the user.  Would be best to use an XML canonifier
1995        # method here and Python 3.8+ does provide one, except earlier versions
1996        # would need to rely on a third-party lib to do the same. References:
1997        #   https://bugs.python.org/issue34160
1998        #   https://mail.python.org/pipermail/python-dev/2019-March/156709.html
1999        testsuite.setAttribute("errors", str(0))
2000        testsuite.setAttribute("failures", str(num_failures))
2001        testsuite.setAttribute("hostname", socket.gethostname())
2002        testsuite.setAttribute("tests", str(num_tests))
2003        testsuite.setAttribute("time", str(time.time() - self._start))
2004        testsuite.setAttribute("timestamp", self._timestamp)
2005
2006        print(doc.toprettyxml(indent="    "), file=self._file)
2007        self._file.close()
2008
2009
2010class ChromeTracing(OutputHandler):
2011    """Output in Chrome tracing format.
2012
2013    Output files can be loaded into Chrome browser under about:tracing, or
2014    converted to standalone HTML files with `trace2html`.
2015    """
2016    def __init__(self, options, tracefile):
2017        OutputHandler.__init__(self, options)
2018        self._file = tracefile
2019
2020    def prepare(self, mgr):
2021        self._results = mgr.list([])
2022
2023    def testFinished(self, test, _):
2024        self._results.append({
2025            "name": test.name,
2026            "ts": test.start * 1e6,
2027            "tid": multiprocessing.current_process().pid,
2028            "pid": 1,
2029            "ph": "X",
2030            "cat": "test",
2031            "dur": (time.time() - test.start) * 1e6,
2032        })
2033
2034    def finished(self):
2035        print(json.dumps(list(self._results)), file=self._file)
2036        self._file.close()
2037
2038
2039### Timing measurements.
2040
2041
2042# Base class for all timers.
2043class TimerBase:
2044    # Returns true if time measurement are supported by this class on the
2045    # current platform. Must be overidden by derived classes.
2046    def available(self):
2047        raise NotImplementedError("Timer.available not implemented")
2048
2049    # Runs a subprocess and measures its execution time. Arguments are as with
2050    # runSubprocess. Return value is the same with runTestCommandLine(). This
2051    # method must only be called if available() returns True. Must be overidden
2052    # by derived classes.
2053    def timeSubprocess(self, *args, **kwargs):
2054        raise NotImplementedError("Timer.timeSubprocess not implemented")
2055
2056
2057# Linux version of time measurements. Uses "perf".
2058class LinuxTimer(TimerBase):
2059    def __init__(self):
2060        self.perf = getOption("PerfPath", which("perf"))
2061
2062    def available(self):
2063        if not platform() == "Linux":
2064            return False
2065
2066        if not self.perf or not os.path.exists(self.perf):
2067            return False
2068
2069        # Make sure it works.
2070        (success, rc) = runSubprocess("%s stat -o /dev/null true 2>/dev/null" % self.perf,
2071                                      shell=True)
2072        return success and rc == 0
2073
2074    def timeSubprocess(self, *args, **kwargs):
2075        assert self.perf
2076
2077        cargs = args
2078        ckwargs = kwargs
2079
2080        targs = [self.perf, "stat", "-o", ".timing", "-x", " ", "-e", "instructions", "sh", "-c"]
2081        targs += [" ".join(cargs)]
2082        cargs = [targs]
2083        del ckwargs["shell"]
2084
2085        (success, rc) = runSubprocess(*cargs, **ckwargs)
2086
2087        utime = -1
2088
2089        try:
2090            cwd = kwargs["cwd"] if "cwd" in kwargs else "."
2091            for line in open(os.path.join(cwd, ".timing")):
2092                if "instructions" in line and "not supported" not in line:
2093                    try:
2094                        m = line.split()
2095                        utime = int(m[0])
2096                    except ValueError:
2097                        pass
2098
2099        except IOError:
2100            pass
2101
2102        return (success, rc, utime)
2103
2104
2105# Walk the given directory and return all test files.
2106def findTests(paths, expand_globs=False):
2107    tests = []
2108
2109    ignore_files = getOption("IgnoreFiles", "").split()
2110    ignore_dirs = getOption("IgnoreDirs", "").split()
2111
2112    expanded = []
2113
2114    for p in paths:
2115        p = os.path.join(TestBase, p)
2116
2117        if expand_globs:
2118            expanded += [d for d in glob.glob(p) if os.path.isdir(d)]
2119        else:
2120            expanded.append(p)
2121
2122    for path in expanded:
2123        rpath = os.path.relpath(path, TestBase)
2124
2125        if os.path.isdir(path) and os.path.basename(path) in ignore_dirs:
2126            continue
2127
2128        ignores = [os.path.join(path, dir) for dir in ignore_dirs]
2129
2130        m = RE_PART.match(rpath)
2131        if m:
2132            error("Do not specify files with part numbers directly, use the base test name (%s)" %
2133                  rpath)
2134
2135        if os.path.isfile(path):
2136            tests += readTestFile(path)
2137
2138            # See if there are more parts.
2139            for part in glob.glob("%s#*" % rpath):
2140                tests += readTestFile(part)
2141
2142        elif os.path.isdir(path):
2143            for (dirpath, dirnames, filenames) in os.walk(path):
2144
2145                ign = os.path.join(dirpath, ".btest-ignore")
2146
2147                if os.path.isfile(os.path.join(ign)):
2148                    del dirnames[0:len(dirnames)]
2149                    continue
2150
2151                for file in filenames:
2152                    for gl in ignore_files:
2153                        if fnmatch.fnmatch(file, gl):
2154                            break
2155                    else:
2156                        tests += readTestFile(os.path.join(dirpath, file))
2157
2158                # Don't recurse into these.
2159                for (dir, path) in [(dir, os.path.join(dirpath, dir)) for dir in dirnames]:
2160                    for skip in ignores:
2161                        if path == skip:
2162                            dirnames.remove(dir)
2163
2164        else:
2165            # See if we have test(s) named like this in our configured set.
2166            found = False
2167            for t in Config.configured_tests:
2168                if t and rpath == t.name:
2169                    tests += [t]
2170                    found = True
2171
2172            if not found:
2173                # See if there are parts.
2174                for part in glob.glob("%s#*" % rpath):
2175                    tests += readTestFile(part)
2176                    found = True
2177
2178                if not found:
2179                    error("cannot read %s" % path)
2180
2181    return tests
2182
2183
2184# Merge parts belonging to the same test into one.
2185def mergeTestParts(tests):
2186    def key(t):
2187        return (t.basename, t.number, t.part)
2188
2189    out = {}
2190
2191    for t in sorted(tests, key=key):
2192        try:
2193            other = out[t.name]
2194
2195            assert t.part != other.part
2196            out[t.name].mergePart(t)
2197
2198        except KeyError:
2199            out[t.name] = t
2200
2201    return sorted([t for t in out.values()], key=key)
2202
2203
2204# Read the given test file and instantiate one or more tests from it.
2205def readTestFile(filename):
2206    def newTest(content, previous):
2207        if not previous:
2208            t = Test(filename)
2209            if t.parse(content, filename):
2210                return t
2211            else:
2212                return None
2213        else:
2214            return previous.clone(content)
2215
2216    if os.path.basename(filename) == ".btest-ignore":
2217        return []
2218
2219    try:
2220        input = io.open(filename, encoding=getDefaultBtestEncoding(), newline='')
2221    except IOError as e:
2222        error("cannot read test file: %s" % e)
2223
2224    tests = []
2225    files = []
2226
2227    content = []
2228    previous = None
2229    file = (None, [])
2230
2231    state = "test"
2232
2233    try:
2234        lines = [line for line in input]
2235    except UnicodeDecodeError as e:
2236        # This error is caused by either a test file with an invalid UTF-8 byte
2237        # sequence, or if python makes the wrong assumption about the encoding
2238        # of a test file (this can happen if a test file has valid UTF-8 but
2239        # none of the locale environment variables LANG, LC_CTYPE, or LC_ALL,
2240        # were defined prior to running btest).  However, if all test files
2241        # are ASCII, then this error should never occur.
2242        error("unicode decode error in file %s: %s" % (filename, e))
2243
2244    for line in lines:
2245
2246        if state == "test":
2247            m = RE_START_FILE.search(line)
2248            if m:
2249                state = "file"
2250                file = (m.group(1), [])
2251                continue
2252
2253            m = RE_END_FILE.search(line)
2254            if m:
2255                error("%s: unexpected %sEND-FILE" % (filename, CommandPrefix))
2256
2257            m = RE_START_NEXT_TEST.search(line)
2258            if not m:
2259                content += [line]
2260                continue
2261
2262            t = newTest(content, previous)
2263            if not t:
2264                return []
2265
2266            tests += [t]
2267
2268            previous = t
2269            content = []
2270
2271        elif state == "file":
2272            m = RE_END_FILE.search(line)
2273            if m:
2274                state = "test"
2275                files += [file]
2276                file = (None, [])
2277                continue
2278
2279            file = (file[0], file[1] + [line])
2280
2281        else:
2282            error("internal: unknown state %s" % state)
2283
2284    if state == "file":
2285        files += [file]
2286
2287    input.close()
2288
2289    t = newTest(content, previous)
2290    if t:
2291        tests.append(t)
2292
2293    for t in tests:
2294        if t:
2295            t.addFiles(files)
2296
2297    return tests
2298
2299
2300def jOption(option, _, __, parser):
2301    val = multiprocessing.cpu_count()
2302
2303    if parser.rargs and not parser.rargs[0].startswith('-'):
2304        try:
2305            # Next argument should be the non-negative number of threads.
2306            # Turn 0 into 1, for backward compatibility.
2307            val = max(1, int(parser.rargs[0]))
2308            parser.rargs.pop(0)
2309        except ValueError:
2310            # Default to using all CPUs. Flagging this as error risks
2311            # confusing subsequent non-option arguments with arguments
2312            # intended for -j.
2313            pass
2314
2315    setattr(parser.values, option.dest, val)
2316
2317
2318# Output markup language documenting tests.
2319def outputDocumentation(tests, fmt):
2320    def indent(i):
2321        return "    " * i
2322
2323    def doc(t):
2324        return t.doc if t.doc else ["No documentation."]
2325
2326    # The "sectionlist" ensures that sections are output in same order as
2327    # they appear in the "tests" list.
2328    sectionlist = []
2329    sections = {}
2330
2331    for t in tests:
2332        ids = t.name.split(".")
2333        path = ".".join(ids[:-1])
2334        if path not in sectionlist:
2335            sectionlist.append(path)
2336        s = sections.setdefault(path, [])
2337        s.append(t)
2338
2339    for s in sectionlist:
2340        tests = sections[s]
2341
2342        if fmt == "rst":
2343            print("%s" % s)
2344            print("-" * len(s))
2345            print()
2346
2347            for t in tests:
2348                print("%s``%s``:" % (indent(1), t.name))
2349                for d in doc(t):
2350                    print("%s%s" % (indent(2), d))
2351                print()
2352
2353        if fmt == "md":
2354            print("# %s" % s)
2355            print()
2356
2357            for t in tests:
2358                print("* `%s`:" % t.name)
2359                for d in doc(t):
2360                    print("%s%s" % (indent(1), d))
2361
2362            print()
2363
2364
2365### Main
2366
2367if __name__ == '__main__':
2368    # Python 3.8+ on macOS no longer uses "fork" as the default start-method
2369    # See https://github.com/zeek/btest/issues/26
2370    pyver_maj = sys.version_info[0]
2371    pyver_min = sys.version_info[1]
2372
2373    if (pyver_maj == 3 and pyver_min >= 8) or pyver_maj > 3:
2374        multiprocessing.set_start_method('fork')
2375
2376optparser = optparse.OptionParser(usage="%prog [options] <directories>", version=VERSION)
2377optparser.add_option("-U",
2378                     "--update-baseline",
2379                     action="store_const",
2380                     dest="mode",
2381                     const="UPDATE",
2382                     help="create a new baseline from the tests' output")
2383optparser.add_option("-u",
2384                     "--update-interactive",
2385                     action="store_const",
2386                     dest="mode",
2387                     const="UPDATE_INTERACTIVE",
2388                     help="interactively asks whether to update baseline for a failed test")
2389optparser.add_option("-d",
2390                     "--diagnostics",
2391                     action="store_true",
2392                     dest="diag",
2393                     default=False,
2394                     help="show diagnostic output for failed tests")
2395optparser.add_option("-D",
2396                     "--diagnostics-all",
2397                     action="store_true",
2398                     dest="diagall",
2399                     default=False,
2400                     help="show diagnostic output for ALL tests")
2401optparser.add_option(
2402    "-f",
2403    "--file-diagnostics",
2404    action="store",
2405    type="string",
2406    dest="diagfile",
2407    default="",
2408    help="write diagnostic output for failed tests into file; if file exists, it is overwritten")
2409optparser.add_option("-v",
2410                     "--verbose",
2411                     action="store_true",
2412                     dest="verbose",
2413                     default=False,
2414                     help="show commands as they are executed")
2415optparser.add_option("-w",
2416                     "--wait",
2417                     action="store_true",
2418                     dest="wait",
2419                     default=False,
2420                     help="wait for <enter> after each failed (with -d) or all (with -D) tests")
2421optparser.add_option("-b",
2422                     "--brief",
2423                     action="store_true",
2424                     dest="brief",
2425                     default=False,
2426                     help="outputs only failed tests")
2427optparser.add_option("-c",
2428                     "--config",
2429                     action="store",
2430                     type="string",
2431                     dest="config",
2432                     default=ConfigDefault,
2433                     help="configuration file")
2434optparser.add_option("-t",
2435                     "--tmp-keep",
2436                     action="store_true",
2437                     dest="tmps",
2438                     default=False,
2439                     help="do not delete tmp files created for running tests")
2440optparser.add_option(
2441    "-j",
2442    "--jobs",
2443    action="callback",
2444    callback=jOption,
2445    dest="threads",
2446    default=1,
2447    help="number of threads running tests in parallel; with no argument will use all CPUs")
2448optparser.add_option("-g",
2449                     "--groups",
2450                     action="store",
2451                     type="string",
2452                     dest="groups",
2453                     default="",
2454                     help="execute only tests of given comma-separated list of groups")
2455optparser.add_option("-r",
2456                     "--rerun",
2457                     action="store_true",
2458                     dest="rerun",
2459                     default=False,
2460                     help="execute commands for tests that failed last time")
2461optparser.add_option("-q",
2462                     "--quiet",
2463                     action="store_true",
2464                     dest="quiet",
2465                     default=False,
2466                     help="suppress information output other than about failed tests")
2467optparser.add_option(
2468    "-x",
2469    "--xml",
2470    action="store",
2471    type="string",
2472    dest="xmlfile",
2473    default="",
2474    help=
2475    "write a report of test results in JUnit XML format to file; if file exists, it is overwritten")
2476optparser.add_option("-a",
2477                     "--alternative",
2478                     action="store",
2479                     type="string",
2480                     dest="alternatives",
2481                     default=None,
2482                     help="activate given alternative")
2483optparser.add_option("-S",
2484                     "--sphinx",
2485                     action="store_true",
2486                     dest="sphinx",
2487                     default=False,
2488                     help="indicates that we're running from inside Sphinx; for internal purposes")
2489optparser.add_option("-T",
2490                     "--update-times",
2491                     action="store_true",
2492                     dest="update_times",
2493                     default=False,
2494                     help="create a new timing baseline for tests being measured")
2495optparser.add_option("-R",
2496                     "--documentation",
2497                     action="store",
2498                     type="choice",
2499                     dest="doc",
2500                     choices=("rst", "md"),
2501                     metavar="format",
2502                     default=None,
2503                     help="Output documentation for tests, supported formats: rst, md")
2504optparser.add_option(
2505    "-A",
2506    "--show-all",
2507    action="store_true",
2508    default=False,
2509    help=
2510    "For console output, show one-liners for passing/skipped tests in addition to any failing ones")
2511optparser.add_option("-z",
2512                     "--retries",
2513                     action="store",
2514                     dest="retries",
2515                     type="int",
2516                     default=0,
2517                     help="Retry failed tests this many times to determine if they are unstable")
2518optparser.add_option("--trace-file",
2519                     action="store",
2520                     dest="tracefile",
2521                     default="",
2522                     help="write Chrome tracing file to file; if file exists, it is overwritten")
2523optparser.add_option("-F",
2524                     "--abort-on-failure",
2525                     action="store_true",
2526                     dest="abort_on_failure",
2527                     help="terminate after first test failure")
2528optparser.add_option("-l",
2529                     "--list",
2530                     action="store_true",
2531                     dest="list",
2532                     default=False,
2533                     help="list available tests instead of executing them")
2534
2535optparser.set_defaults(mode="TEST")
2536(Options, args) = optparser.parse_args()
2537
2538# Update-interactive mode implies single-threaded operation
2539if Options.mode == "UPDATE_INTERACTIVE" and Options.threads > 1:
2540    warning("ignoring requested parallelism in interactive-update mode")
2541    Options.threads = 1
2542
2543if not os.path.exists(Options.config):
2544    error("configuration file '%s' not found" % Options.config)
2545
2546# The defaults come from environment variables, plus a few additional items.
2547defaults = {}
2548# Changes to defaults should not change os.environ
2549defaults.update(os.environ)
2550defaults["default_path"] = os.environ["PATH"]
2551
2552dirname = os.path.dirname(Options.config)
2553if not dirname:
2554    dirname = os.getcwd()
2555
2556# If the BTEST_TEST_BASE envirnoment var is set, we'll use that as the testbase.
2557# If not, we'll use the current directory.
2558TestBase = os.path.abspath(os.environ.get("BTEST_TEST_BASE", dirname))
2559defaults["testbase"] = TestBase
2560defaults["baselinedir"] = os.path.abspath(
2561    os.environ.get("BTEST_BASELINE_DIR", os.path.join(TestBase, "Baseline")))
2562
2563# Parse our config
2564Config = getcfgparser(defaults)
2565Config.read(Options.config)
2566
2567defaults["baselinedir"] = getOption("BaselineDir", defaults["baselinedir"])
2568
2569min_version = getOption("MinVersion", None)
2570if min_version:
2571    validate_version_requirement(min_version, VERSION)
2572
2573if Options.alternatives:
2574    # Preprocess to split into list.
2575    Options.alternatives = [alt.strip() for alt in Options.alternatives.split(",") if alt != "-"]
2576
2577    # Helper function that, if an option wasn't explicitly specified as an
2578    # environment variable, checks if an alternative sets its through
2579    # its own environment section. If so, we make that value our new default.
2580    # If multiple alternatives set it, we pick the value from the first.
2581    def get_env_from_alternative(env, opt, default, transform=None):
2582        for tag in Options.alternatives:
2583            value = getOption(env, None, section="environment-%s" % tag)
2584            if value is not None:
2585                if transform:
2586                    value = transform(value)
2587
2588                defaults[opt] = value
2589
2590                # At this point, our defaults have changed, so we
2591                # reread the configuration.
2592                new_config = getcfgparser(defaults)
2593                new_config.read(Options.config)
2594                return new_config, value
2595
2596        return Config, default
2597
2598    (Config, TestBase) = get_env_from_alternative("BTEST_TEST_BASE", "testbase", TestBase,
2599                                                  lambda x: os.path.abspath(x))
2600    # Need to update BaselineDir - it may be interpolated from testbase.
2601    defaults["baselinedir"] = getOption("BaselineDir", defaults["baselinedir"])
2602    (Config, _) = get_env_from_alternative("BTEST_BASELINE_DIR", "baselinedir", None)
2603
2604os.chdir(TestBase)
2605
2606if Options.sphinx:
2607    Options.quiet = True
2608
2609if Options.quiet:
2610    Options.brief = True
2611
2612# Determine output handlers to use.
2613
2614output_handlers = []
2615
2616if Options.verbose:
2617    output_handlers += [Verbose(Options, )]
2618
2619elif Options.brief:
2620    output_handlers += [Brief(Options, )]
2621
2622else:
2623    if sys.stdout.isatty():
2624        if Options.show_all:
2625            output_handlers += [Console(Options, )]
2626        else:
2627            output_handlers += [CompactConsole(Options, )]
2628    else:
2629        output_handlers += [Standard(Options, )]
2630
2631if Options.diagall:
2632    output_handlers += [Diag(Options, True, None)]
2633
2634elif Options.diag:
2635    output_handlers += [Diag(Options, False, None)]
2636
2637if Options.diagfile:
2638    try:
2639        diagfile = open(Options.diagfile, "w", 1)
2640        output_handlers += [Diag(Options, Options.diagall, diagfile)]
2641
2642    except IOError as e:
2643        print("cannot open %s: %s" % (Options.diagfile, e), file=sys.stderr)
2644
2645if Options.sphinx:
2646    output_handlers += [SphinxOutput(Options)]
2647
2648if Options.xmlfile:
2649    try:
2650        xmlfile = open(Options.xmlfile, "w", 1)
2651        output_handlers += [XMLReport(Options, xmlfile)]
2652
2653    except IOError as e:
2654        print("cannot open %s: %s" % (Options.xmlfile, e), file=sys.stderr)
2655
2656if Options.tracefile:
2657    try:
2658        tracefile = open(Options.tracefile, "w", 1)
2659        output_handlers += [ChromeTracing(Options, tracefile)]
2660
2661    except IOError as e:
2662        print("cannot open %s: %s" % (Options.tracefile, e), file=sys.stderr)
2663
2664output_handler = Forwarder(Options, output_handlers)
2665
2666# Determine Timer to use.
2667
2668Timer = None
2669
2670if platform() == "Linux":
2671    t = LinuxTimer()
2672    if t.available():
2673        Timer = t
2674
2675if Options.update_times and not Timer:
2676    warning("unable to create timing baseline because timer is not available")
2677
2678# Evaluate other command line options.
2679
2680if Config.has_section("environment"):
2681    for (name, value) in Config.itemsNoDefaults("environment"):
2682        # Here we don't want to include items from defaults
2683        os.environ[name.upper()] = value
2684
2685Alternatives = {}
2686
2687if Options.alternatives:
2688    for tag in Options.alternatives:
2689        a = Alternative(tag)
2690
2691        try:
2692            for (name, value) in Config.itemsNoDefaults("filter-%s" % tag):
2693                a.filters[name] = value
2694
2695        except configparser.NoSectionError:
2696            pass
2697
2698        try:
2699            for (name, value) in Config.itemsNoDefaults("substitution-%s" % tag):
2700                a.substitutions[name] = value
2701
2702        except configparser.NoSectionError:
2703            pass
2704
2705        try:
2706            for (name, value) in Config.itemsNoDefaults("environment-%s" % tag):
2707                a.envs[name] = value
2708
2709        except configparser.NoSectionError:
2710            pass
2711
2712        Alternatives[tag] = a
2713
2714CommandPrefix = getOption("CommandPrefix", "@TEST-")
2715
2716RE_INPUT = re.compile(r"%INPUT")
2717RE_DIR = re.compile(r"%DIR")
2718RE_ENV = re.compile(r"\$\{(\w+)\}")
2719RE_PART = re.compile(r"^(.*)#([0-9]+)$")
2720RE_IGNORE = re.compile(CommandPrefix + "IGNORE")
2721RE_START_NEXT_TEST = re.compile(CommandPrefix + "START-NEXT")
2722RE_START_FILE = re.compile(CommandPrefix + "START-FILE +([^\r\n ]*)")
2723RE_END_FILE = re.compile(CommandPrefix + "END-FILE")
2724
2725# Commands as tuple (tag, regexp, more-than-one-is-ok, optional, group-main, group-add)
2726# pylint: disable=bad-whitespace
2727# yapf: disable
2728RE_EXEC                = ("exec",            re.compile(CommandPrefix + "EXEC(-FAIL)?: *(.*)"), True, False, 2, 1)
2729RE_REQUIRES            = ("requires",        re.compile(CommandPrefix + "REQUIRES: *(.*)"), True, True, 1, -1)
2730RE_GROUP               = ("group",           re.compile(CommandPrefix + "GROUP: *(.*)"), True, True, 1, -1)
2731RE_SERIALIZE           = ("serialize",       re.compile(CommandPrefix + "SERIALIZE: *(.*)"), False, True, 1, -1)
2732RE_PORT                = ("port",            re.compile(CommandPrefix + "PORT: *(.*)"), True, True, 1, -1)
2733RE_INCLUDE_ALTERNATIVE = ("alternative",     re.compile(CommandPrefix + "ALTERNATIVE: *(.*)"), True, True, 1, -1)
2734RE_IGNORE_ALTERNATIVE  = ("not-alternative", re.compile(CommandPrefix + "NOT-ALTERNATIVE: *(.*)"), True, True, 1, -1)
2735RE_COPY_FILE           = ("copy-file",       re.compile(CommandPrefix + "COPY-FILE: *(.*)"), True, True, 1, -1)
2736RE_KNOWN_FAILURE       = ("known-failure",   re.compile(CommandPrefix + "KNOWN-FAILURE"), False, True, -1, -1)
2737RE_MEASURE_TIME        = ("measure-time",    re.compile(CommandPrefix + "MEASURE-TIME"), False, True, -1, -1)
2738RE_DOC                 = ("doc",             re.compile(CommandPrefix + "DOC: *(.*)"), True, True, 1, -1)
2739# yapf: enable
2740# pylint: enable=bad-whitespace
2741
2742Commands = (RE_EXEC, RE_REQUIRES, RE_GROUP, RE_SERIALIZE, RE_PORT, RE_INCLUDE_ALTERNATIVE,
2743            RE_IGNORE_ALTERNATIVE, RE_COPY_FILE, RE_KNOWN_FAILURE, RE_MEASURE_TIME, RE_DOC)
2744
2745StateFile = os.path.abspath(
2746    getOption("StateFile", os.path.join(defaults["testbase"], ".btest.failed.dat")))
2747TmpDir = os.path.abspath(getOption("TmpDir", os.path.join(defaults["testbase"], ".tmp")))
2748BaselineDirs = [os.path.abspath(dir) for dir in defaults["baselinedir"].split(":")]
2749BaselineTimingDir = os.path.abspath(
2750    getOption("TimingBaselineDir", os.path.join(BaselineDirs[0], "_Timing")))
2751
2752Initializer = getOption("Initializer", "")
2753Finalizer = getOption("Finalizer", "")
2754Teardown = getOption("Teardown", "")
2755
2756PartInitializer = getOption("PartInitializer", "")
2757PartFinalizer = getOption("PartFinalizer", "")
2758PartTeardown = getOption("PartTeardown", "")
2759
2760Config.configured_tests = []
2761
2762testdirs = getOption("TestDirs", "").split()
2763if testdirs:
2764    Config.configured_tests = findTests(testdirs, True)
2765
2766if args:
2767    tests = findTests(args)
2768
2769else:
2770    if Options.rerun:
2771        (success, tests) = readStateFile()
2772
2773        if success:
2774            if not tests:
2775                output("no tests failed last time")
2776                sys.exit(0)
2777
2778        else:
2779            warning("cannot read state file, executing all tests")
2780            tests = Config.configured_tests
2781
2782    else:
2783        tests = Config.configured_tests
2784
2785if Options.groups:
2786    groups = Options.groups.split(",")
2787    Options.groups = set([g for g in groups if not g.startswith("-")])
2788    Options.no_groups = set([g[1:] for g in groups if g.startswith("-")])
2789
2790    def rightGroup(t):
2791        if not t:
2792            return True
2793
2794        if t.groups & Options.groups:
2795            return True
2796
2797        if "" in Options.no_groups:
2798            if not t.groups:
2799                return True
2800
2801        elif Options.no_groups:
2802            if t.groups & Options.no_groups:
2803                return False
2804
2805            return True
2806
2807        return False
2808
2809    tests = [t for t in tests if rightGroup(t)]
2810
2811if not tests:
2812    output("no tests to execute")
2813    sys.exit(0)
2814
2815tests = mergeTestParts(tests)
2816
2817if Options.doc:
2818    outputDocumentation(tests, Options.doc)
2819    sys.exit(0)
2820
2821for d in BaselineDirs:
2822    mkdir(d)
2823
2824mkdir(TmpDir)
2825
2826# Building our own path to avoid "error: AF_UNIX path too long" on
2827# some platforms. See BIT-862.
2828sname = "btest-socket-%d" % os.getpid()
2829addr = os.path.join(tempfile.gettempdir(), sname)
2830
2831# Check if the pathname is too long to fit in struct sockaddr_un (the
2832# maximum length is system-dependent, so here we just use 100, which seems
2833# a safe default choice).
2834if len(addr) > 100:
2835    # Try relative path to TmpDir (which would usually be ".tmp").
2836    addr = os.path.join(os.path.relpath(TmpDir), sname)
2837
2838    # If the path is still too long, then use the global tmp directory.
2839    if len(addr) > 100:
2840        addr = os.path.join("/tmp", sname)
2841
2842mgr = TestManager(address=addr)
2843
2844try:
2845    if Options.list:
2846        for test in sorted(tests):
2847            if test.name:
2848                print(test.name)
2849        sys.exit(0)
2850    else:
2851        (succeeded, failed, skipped, unstable,
2852         failed_expected) = mgr.run(copy.deepcopy(tests), output_handler)
2853        total = succeeded + failed + skipped
2854
2855    output_handler.finished()
2856
2857# Ctrl-C can lead to broken pipe (e.g. FreeBSD), so include IOError here:
2858except (Abort, KeyboardInterrupt, IOError) as exc:
2859    output_handler.finished()
2860    print(str(exc) or "Aborted with %s." % type(exc).__name__, file=sys.stderr)
2861    sys.stderr.flush()
2862    # Explicitly shut down sync manager to avoid leaking manager
2863    # processes, particularly with --abort-on-failure:
2864    mgr.shutdown()
2865    os._exit(1)
2866
2867skip = (", %d skipped" % skipped) if skipped > 0 else ""
2868unstablestr = (", %d unstable" % unstable) if unstable > 0 else ""
2869failed_expectedstr = (" (with %d expected to fail)" %
2870                      failed_expected) if failed_expected > 0 else ""
2871
2872if failed > 0:
2873    if not Options.quiet:
2874        output("%d of %d test%s failed%s%s%s" %
2875               (failed, total, "s" if total > 1 else "", failed_expectedstr, skip, unstablestr))
2876
2877    if failed == failed_expected:
2878        sys.exit(0)
2879    else:
2880        sys.exit(1)
2881
2882elif skipped > 0 or unstable > 0:
2883    if not Options.quiet:
2884        output("%d test%s successful%s%s" %
2885               (succeeded, "s" if succeeded != 1 else "", skip, unstablestr))
2886
2887    sys.exit(0)
2888
2889else:
2890    if not Options.quiet:
2891        output("all %d tests successful" % total)
2892
2893    sys.exit(0)
2894