1import os
2import sys
3import platform
4import textwrap
5import re
6import subprocess
7import difflib
8import time
9import shutil
10import enum
11from pathlib import Path
12import logging
13from typing import TYPE_CHECKING
14import collections
15
16try:
17    import numpy as np
18except ImportError:
19    np = None
20
21__all__ = ("logger", "remove_directory", "remove_file", "test_results",
22           "add_RegressionTest", "get_command_output", "listify", "which",
23           "ConfigBuilder", "multi_glob", "get_spawn", "help", "quoted")
24
25if TYPE_CHECKING:
26    from typing import Iterable, Union, List, TypeVar, Dict
27    import SCons.Environment
28    import SCons.Node.FS
29    import SCons.Variables
30
31    SCEnvironment = SCons.Environment.Environment
32    LFSNode = List[Union[SCons.Node.FS.File, SCons.Node.FS.Dir]]
33    SCVariables = SCons.Variables.Variables
34    TPathLike = TypeVar("TPathLike", Path, str)
35
36
37class LevelAdapter(logging.LoggerAdapter):
38    """This adapter processes the ``print_level`` keyword-argument to log functions.
39
40    In the default Logger functions, it is not possible to add extra keyword arguments
41    to modify behavior. This Adapter allows the Logger functions (.debug(), .info(),
42    etc.) to include the keyword argument ``print_level``, which takes a Boolean value
43    to determine whether or not the level of the message should be shown with the rest
44    of the message. The actual message formatting is handled elsewhere. Example::
45
46       >>> logger.info("Message", print_level=True)
47       INFO: Message
48       >>> logger.error("Message", print_level=False)
49       Message
50    """
51    def __init__(self, logger):
52        self.logger = logger
53
54    def process(self, msg, kwargs):
55        """Pop the value of ``print_level`` into the ``extra`` dictionary.
56
57        Key-value pairs in the "extra" dictionary are set as attributes on the
58        ``LogRecord`` instance.
59        """
60        if "print_level" in kwargs:
61            print_level = kwargs.pop("print_level")
62            if "extra" in kwargs:
63                kwargs["extra"].update(print_level=print_level)
64            else:
65                kwargs["extra"] = {"print_level": print_level}
66        return msg, kwargs
67
68
69# Modified from https://stackoverflow.com/a/42823461
70class BraceLogRecord(logging.LogRecord):
71    """Format a log record using brace syntax {} instead of %."""
72
73    def getMessage(self) -> str:
74        msg = str(self.msg)
75        if self.args:
76            if isinstance(self.args, collections.Mapping):
77                msg = msg.format_map(self.args)
78            else:
79                msg = msg.format(*self.args)
80        return msg
81
82
83class OutputFormatter(logging.Formatter):
84    """Format log output depending on whether the level should be shown.
85
86    Intended to be used with the LevelAdapter class to allow the ``print_level``
87    keyword argument to be added to Logger method calls (``.info()`` and others). The
88    ``print_level`` Boolean value is used to determine whether or not the level of the
89    logging message should be printed. By default, the level is shown. Example::
90
91       >>> logger.info("Message", print_level=False)
92       Message
93       >>> logger.info("Message")
94       INFO: Message
95    """
96
97    def format(self, record: logging.LogRecord) -> str:
98        if record.exc_info or record.exc_text:
99            raise ValueError("This formatter does not support exceptions")
100        elif record.stack_info:
101            raise ValueError("This formatter does not support stack traces")
102
103        no_level_style = "{message}"
104        level_style = "{levelname}: " + no_level_style
105        record.message = record.getMessage()
106        if getattr(record, "print_level", True):
107            s = level_style.format(**record.__dict__)
108        else:
109            s = no_level_style.format(**record.__dict__)
110        return s
111
112
113# Modified from https://stackoverflow.com/a/36338212
114class LevelFilter(logging.Filter):
115    """Filter out log messages above or below preset cutoffs.
116
117    Log levels in Python correspond to integers, with the lowest, DEBUG, set to
118    10 and the highest, CRITICAL, set to 50. This filter causes a log handler to
119    reject messages that are above or below numerical cutoffs. Example::
120
121    >>> # Handles log levels from debug up to, but not including, error
122    >>> handler.addFilter(LevelFilter(logging.DEBUG, logging.ERROR))
123    >>> # Handles log levels from warning up to and including critical
124    >>> handler.addFilter(LevelFilter(logging.WARNING, logging.CRITICAL+1))
125    """
126
127    def __init__(self, low: int, high: int) -> None:
128        self._low = low
129        self._high = high
130        super().__init__()
131
132    def filter(self, record: logging.LogRecord) -> bool:
133        if self._low <= record.levelno < self._high:
134            return True
135        return False
136
137
138logging.setLogRecordFactory(BraceLogRecord)
139logger = logging.getLogger("cantera")
140logger.setLevel(logging.INFO)
141f = OutputFormatter()
142stdout_handler = logging.StreamHandler(sys.stdout)
143stdout_handler.setFormatter(f)
144stdout_handler.addFilter(LevelFilter(logging.DEBUG, logging.ERROR))
145logger.addHandler(stdout_handler)
146
147stderr_handler = logging.StreamHandler(sys.stderr)
148stderr_handler.setFormatter(f)
149stderr_handler.addFilter(LevelFilter(logging.ERROR, logging.CRITICAL + 1))
150logger.addHandler(stderr_handler)
151
152logger = LevelAdapter(logger)
153
154
155class TestResult(enum.IntEnum):
156    """Represent the passing/failing result of a test.
157
158    To be used instead of a bare integer for clarity.
159    """
160
161    PASS = 0
162    FAIL = 1
163
164
165class DefineDict:
166    """
167    A dictionary-like object which generates appropriate preprocessor
168    define statements from its dict of variable / value
169    pairs. Variables whose value is None or that are not in the dict
170    are left undefined.
171    """
172
173    def __init__(self, data: dict) -> None:
174        self.data = data
175        self.undefined = set()
176
177    def __getitem__(self, key: str) -> str:
178        if key not in self.data or self.data[key] is None:
179            self.undefined.add(key)
180            return f"/* #undef {key!s} */"
181        else:
182            return f"#define {key!s} {self.data[key]!s}"
183
184
185class ConfigBuilder:
186    """
187    Used along with DefineDict to generate a customized config.h file
188    from a config.h.in file using the variables given in 'defines'.
189    """
190
191    def __init__(self, defines: dict) -> None:
192        self.defines = DefineDict(defines)
193
194    def __call__(self, target: "LFSNode", source: "LFSNode", env):
195        """
196        Note that all three arguments are required by SCons although only the first
197        two are used. All of them must be keyword arguments.
198        """
199        for s, t in zip(source, target):
200            config_h_in = Path(str(s)).read_text()
201            config_h = Path(str(t))
202
203            config_h.write_text(config_h_in.format_map(self.defines))
204            self.print_config(str(t))
205
206    def print_config(self, filename: str):
207        message = [f"Generating {filename!s} with the following settings:"]
208
209        for key, val in sorted(self.defines.data.items()):
210            if val is not None:
211                message.append(f"    {key!s:<35} {val}")
212        for key in sorted(self.defines.undefined):
213            message.append(f"    {key!s:<35} *undefined*")
214
215        logger.info("\n".join(message))
216
217
218class TestResults:
219    """
220    A class that stores information about all the regression tests
221    that are defined and which ones have passed / failed in order to
222    print a summary at the end of the build process.
223    """
224
225    def __init__(self):
226        self.tests = {}
227        self.passed = {}
228        self.failed = {}
229
230    def print_report(self, target, source, env):
231        """Print the test results report.
232
233        Note that the three arguments are not used here but are required by SCons,
234        and they must be keyword arguments.
235        """
236        values = {
237            "passed": sum(self.passed.values()),
238            "failed": sum(self.failed.values()),
239            "skipped": len(self.tests),
240        }
241        message = textwrap.dedent(
242            """
243            *****************************
244            ***    Testing Summary    ***
245            *****************************
246
247            Tests passed: {passed!s}
248            Up-to-date tests skipped: {skipped!s}
249            Tests failed: {failed!s}
250            """
251        ).format_map(values)
252        if self.failed:
253            message = (message + "Failed tests:" +
254                       "".join("\n    - " + n for n in self.failed) +
255                       "\n")
256        message = message + "*****************************"
257        if self.failed:
258            logger.error("One or more tests failed.\n" + message, print_level=False)
259            sys.exit(1)
260        else:
261            logger.info(message, print_level=False)
262
263
264test_results = TestResults()
265
266
267def regression_test(target: "LFSNode", source: "LFSNode", env: "SCEnvironment"):
268    """
269    Run a regression test comparing the output of a test program with
270    existing "blessed" output files.
271
272    target - The name of an output file that will be generated if
273    the test is successful.
274
275    source - A list containing the name of the program to run and
276    (optionally) a list of file names to be passed as command line
277    arguments.
278
279    The test also relies on several parameters that are passed in via
280    variables in the SCons environment:
281
282    env['test_command_options'] - non-file command line options
283    to be passed to to the test program
284    """
285    # unpack:
286    program = source[0]
287    if len(source) > 1:
288        clargs = [s.name for s in source[1:]]
289    else:
290        clargs = []
291
292    # Name to use for the output file
293    blessed_name = env["test_blessed_file"]
294    if blessed_name is not None and "blessed" in blessed_name:
295        output_name = Path(blessed_name.replace("blessed", "output"))
296    else:
297        output_name = Path("test_output.txt")
298
299    # command line options
300    clopts = env["test_command_options"].split()
301
302    # Run the test program
303    dir = Path(target[0].dir.abspath)
304    ret = subprocess.run(
305        [program.abspath] + clopts + clargs,
306        stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
307        cwd=dir, env=env["ENV"], universal_newlines=True,
308    )
309    if ret.returncode:
310        logger.error("FAILED (program exit code:{})", ret.returncode)
311    dir.joinpath(output_name).write_text(ret.stdout)
312
313    diff = 0
314    # Compare output files
315    comparisons = env["test_comparisons"]
316    if blessed_name is not None:
317        comparisons.append((Path(blessed_name), output_name))
318
319    for blessed, output in comparisons:
320        logger.info(f"Comparing '{blessed}' with '{output}'", print_level=False)
321        d = compare_files(env, dir.joinpath(blessed), dir.joinpath(output))
322        if d:
323            logger.error("FAILED", print_level=False)
324        diff |= d
325
326    for blessed, output in env["test_profiles"]:
327        logger.info(f"Comparing '{blessed}' with '{output}'", print_level=False)
328        d = compare_profiles(env, dir.joinpath(blessed), dir.joinpath(output))
329        if d:
330            logger.error("FAILED", print_level=False)
331        diff |= d
332
333    del test_results.tests[env["active_test_name"]]
334
335    passed_file = Path(target[0].abspath)
336    if diff or ret.returncode:
337        if passed_file.exists():
338            passed_file.unlink()
339
340        test_results.failed[env["active_test_name"]] = 1
341        if env["fast_fail_tests"]:
342            sys.exit(1)
343    else:
344        logger.info("PASSED", print_level=False)
345        passed_file.write_text(time.asctime())
346        test_results.passed[env["active_test_name"]] = 1
347
348
349def compare_files(env: "SCEnvironment", file1: Path, file2: Path) -> TestResult:
350    """
351    Compare the contents of two files, using a method chosen based on
352    their file extensions.
353    """
354    if file1.suffix == ".csv" and file2.suffix == ".csv":
355        return compare_csv_files(env, file1, file2)
356    else:
357        return compare_text_files(env, file1, file2)
358
359
360def compare_text_files(env: "SCEnvironment", file1: Path, file2: Path) -> TestResult:
361    """
362    Compare the contents of two text files while:
363       - ignoring trailing whitespace
364       - ignoring any lines starting with strings specified in the
365         variable env['test_ignoreLines'].
366       - comparing floating point numbers only up to the printed precision
367    """
368    text1 = [line.rstrip() for line in file1.read_text().split("\n")
369             if not line.startswith(tuple(env["test_ignoreLines"]))]
370    text2 = [line.rstrip() for line in file2.read_text().split("\n")
371             if not line.startswith(tuple(env["test_ignoreLines"]))]
372
373    # Try to compare the files without testing the floating point numbers
374    diff = list(difflib.unified_diff(text1, text2))
375    if not diff:
376        return TestResult.PASS
377
378    atol = env["test_csv_threshold"]
379    rtol = env["test_csv_tolerance"]
380
381    # Replace nearly-equal floating point numbers with exactly equivalent
382    # representations to avoid confusing difflib
383    float_regex = re.compile(r"(\s*)([+-]{0,1}\d+\.{0,1}\d*([eE][+-]{0,1}\d*){0,1})")
384    for i, (line1, line2) in enumerate(zip(text1, text2)):
385        if line1 == line2:
386            continue
387
388        # group(1) is the left space padding
389        # group(2) is the number
390        floats1 = [(m.group(1), m.group(2)) for m in float_regex.finditer(line1)]
391        floats2 = [(m.group(1), m.group(2)) for m in float_regex.finditer(line2)]
392
393        # If the lines don't contain the same number of numbers,
394        # we're not going to pass the diff comparison no matter what
395        if len(floats1) != len(floats2):
396            continue
397
398        # if the lines don't have the same non-numeric text,
399        # we're not going to pass the diff comparison
400        if float_regex.sub("", line1).strip() != float_regex.sub("", line2).strip():
401            continue
402
403        all_match = True
404        for float_1, float_2 in zip(floats1, floats2):
405            if float_1 == float_2:
406                # String representations match, so replacement is unnecessary
407                continue
408
409            try:
410                num1 = float(float_1[1])
411                num2 = float(float_2[1])
412            except ValueError:
413                # Something went wrong -- one of the strings isn't actually a number,
414                # so just ignore this line and let the test fail
415                pass
416            else:
417                precision = max(get_precision(float_1[1]), get_precision(float_2[1]))
418                atol = atol + pow(10, precision) * 1.1
419                abserr = abs(num1 - num2)
420                relerr = abserr / (0.5 * abs(num1 + num2) + atol)
421                if abserr > atol and relerr > rtol:
422                    logger.error(
423                        "Values differ: {:14g} {:14g}; "
424                        "rel. err = {:.3e}; abs. err = {:.3e}",
425                        num1, num2, relerr, abserr, print_level=False,
426                    )
427                    all_match = False
428                    break
429
430        # All the values are sufficiently close, so replace the string
431        # so that the diff of this line will succeed
432        if all_match:
433            text2[i] = line1
434
435    # Try the comparison again
436    diff = list(difflib.unified_diff(text1, text2))
437    if diff:
438        message = [f"Found differences between {file1!s} and {file2!s}:", ">>>"]
439        message.extend(diff)
440        message.append("<<<")
441        logger.error("\n".join(message), print_level=False)
442        return TestResult.FAIL
443
444    return TestResult.PASS
445
446
447def get_precision(number: str) -> int:
448    """Return the precision of the least significant digit in a number.
449
450    Return an integer representing the power of 10 of the least significant digit in
451    ``number``, which must be a string.
452
453    Patterns to consider:
454    123 -> 0
455    123.45 -> -2
456    123.45e6 -> 4
457    123e4 -> 4
458    """
459
460    number = number.lower()
461    if "e" in number:
462        number, exponent = number.split("e")
463        exponent = int(exponent)
464    else:
465        exponent = 0
466
467    if "." in number:
468        digits = -len(number.split(".")[1])
469    else:
470        digits = 0
471
472    return exponent + digits
473
474
475def compare_profiles(
476    env: "SCEnvironment", ref_file: Path, sample_file: Path
477) -> TestResult:
478    """
479    Compare two 2D arrays of spatial or time profiles. Each data set should
480    contain the time or space coordinate in the first column and data series
481    to be compared in successive columns.
482
483    The coordinates in each data set do not need to be the same: The data from
484    the second data set will be interpolated onto the coordinates in the first
485    data set before being compared. This means that the range of the "sample"
486    data set should be at least as long as the "reference" data set.
487
488    After interpolation, each data point must satisfy a combined relative and absolute
489    error criterion specified by `rtol` and `atol`.
490    """
491    if not np:
492        logger.warning("Skipping profile comparison because numpy is not available")
493        return TestResult.PASS
494
495    atol = env["test_csv_threshold"]
496    rtol = env["test_csv_tolerance"]
497    xtol = env["test_csv_tolerance"]
498
499    reference = np.genfromtxt(ref_file, delimiter=",").T
500    sample = np.genfromtxt(sample_file, delimiter=",").T
501    if reference.shape[0] != sample.shape[0]:
502        logger.error(
503            "The output array does not have the same number of variabls as the "
504            "reference array."
505        )
506        return TestResult.FAIL
507
508    # trim header columns if present
509    if np.isnan(sample[0, 0]) and np.isnan(reference[0, 0]):
510        reference = reference[:, 1:]
511        sample = sample[:, 1:]
512    if np.isnan(reference).any() or np.isnan(sample).any():
513        logger.error(
514            "The output array and reference array have different headers "
515            "or contain non-numeric data."
516        )
517        return TestResult.FAIL
518
519    n_vars = reference.shape[0]
520    n_times = reference.shape[1]
521
522    bad = []
523    template = "{0:10.4e}  {1:5d}  {2:14.7e}  {3:14.7e}  {4:9.3e}  {5:9.3e}  {6:9.3e}"
524    header = ["Failed series comparisons:"]
525    header.append("{:10s}  {:5s}  {:14s}  {:14s}  {:9s}  {:9s}  {:9s}".format(
526        "coordinate", "comp.", "reference val.", "test value", "abs. err", "rel. err",
527        "pos. err"
528    ))
529    header.append(f"{10*'-'}  -----  {14*'-'}  {14*'-'}  {9*'-'}  {9*'-'}  {9*'-'}")
530    ref_ptp = reference.ptp(axis=1)
531    ref_max = np.abs(reference).max(axis=1)
532    sample_ptp = sample.ptp(axis=1)
533    sample_max = np.abs(sample).max(axis=1)
534    scale = np.maximum(
535        np.maximum(ref_ptp[1:], ref_max[1:]),
536        np.maximum(sample_ptp[1:], sample_max[1:])
537    ).reshape(n_vars - 1, -1)
538    ref_diff = np.diff(reference)
539    slope = ref_diff[1:, :] / ref_diff[0, :] * ref_ptp[0]
540    slope = np.hstack((np.zeros((n_vars - 1, 1)), slope))
541    comp = np.zeros((n_vars - 1, n_times))
542    for i, row in enumerate(sample[1:]):
543        comp[i, :] = np.interp(reference[0, :], sample[0, :], row)
544
545    abserr = np.abs(reference[1:] - comp)
546    relerr = abserr / (scale + atol)
547    # error that can be accounted for by shifting the profile along
548    # the time / spatial coordinate
549    xerr = abserr / (np.abs(slope) + atol)
550    if np.any(abserr > atol) and np.any(relerr > rtol) and np.any(xerr > xtol):
551        it = np.nditer((abserr, relerr, xerr), flags=["multi_index"])
552        for a, r, x in it:
553            i, j = it.multi_index
554            bad.append((reference[0, j], i, reference[i, j], comp[i, j], a, r, x))
555
556    footer = []
557    maxrows = 10
558    if len(bad) > maxrows:
559        bad.sort(key=lambda row: -row[5])
560        footer += [f"Plus {len(bad) - maxrows} more points exceeding error thresholds."]
561        bad = bad[:maxrows]
562
563    if bad:
564        logger.error(
565            "\n".join(header + [template.format(*row) for row in bad] + footer),
566            print_level=False,
567        )
568        return TestResult.FAIL
569    else:
570        return TestResult.PASS
571
572
573def compare_csv_files(env: "SCEnvironment", file1: Path, file2: Path) -> TestResult:
574    """
575    Compare the contents of two .csv file to see if they are
576    similar. Similarity is defined according to tolerances stored in
577    the environment as:
578
579        env['test_csv_threshold']
580        env['test_csv_tolerance']
581
582    The comparison for each variable is:
583
584        |a-b|/(max(|a|,|b|) + threshold) < tolerance
585
586    Returns 0 if all variables in the files are similar and 1 if the
587    files are dissimilar. Lines containing non-numeric data are
588    automatically ignored.
589    """
590    if not np:
591        logger.warning("Skipping profile comparison because numpy is not available")
592        return TestResult.PASS
593
594    # decide how many header lines to skip
595    f = Path(file1).read_text().split("\n")
596    header_rows = 0
597    good_chars = set("0123456789.+-eE, ")
598    for line in f:
599        if not set(line).issubset(good_chars):
600            header_rows += 1
601        else:
602            break
603
604    try:
605        data1 = np.genfromtxt(file1, skip_header=header_rows, delimiter=",")
606        data2 = np.genfromtxt(file2, skip_header=header_rows, delimiter=",")
607    except (IOError, StopIteration) as e:
608        logger.error(f"Could not read data files: {file1}; {file2}", exc_info=e)
609        return TestResult.FAIL
610
611    threshold = env["test_csv_threshold"]
612    try:
613        denom = np.maximum(np.abs(data2), np.abs(data1)) + threshold
614        relerror = np.abs(data2 - data1) / denom
615        maxerror = np.nanmax(relerror.flat)
616    except (ValueError, TypeError) as e:
617        logger.error("Could not compute error.", exc_info=e)
618        return TestResult.FAIL
619
620    tol = env["test_csv_tolerance"]
621    if maxerror < tol:  # Threshold based on printing 6 digits in the CSV file
622        return TestResult.PASS
623
624    n_fail = np.sum(relerror > tol)
625    n_tot = relerror.size
626    message = [
627        "Files differ.",
628        f"{n_fail:d} / {n_tot:d} elements above specified tolerance ({tol:f})",
629        "  row   col   reference       test            rel. error",
630        "  ----  ----  --------------  --------------  ----------",
631    ]
632    it = np.nditer([relerror, data1, data2], flags=["multi_index"])
633    for rele, ref, test in it:
634        if rele > tol:
635            r = it.multi_index[0] + header_rows + 1
636            c = it.multi_index[1] + 1
637            message.append(
638                f"  {r:4d}  {c:4d}  {ref:14.7e}  {test:14.7e}  {rele:10.4e}"
639            )
640    logger.error("\n".join(message))
641    return TestResult.FAIL
642
643
644def regression_test_message(target, source, env: "SCEnvironment") -> str:
645    """
646    Determines the message printed by SCons while building a
647    RegressionTest target.
648
649    Note that the first two arguments are not used but are required by SCons and they
650    must be keyword arguments.
651    """
652    return f"""* Running test '{env["active_test_name"]}'..."""
653
654
655def add_RegressionTest(env: "SCEnvironment") -> None:
656    """
657    Add "RegressionTest" as a Builder in the specified Scons Environment.
658    """
659    env["BUILDERS"]["RegressionTest"] = env.Builder(
660        action=env.Action(regression_test, regression_test_message)
661    )
662
663
664def quoted(s: str) -> str:
665    """Return the given string wrapped in double quotes."""
666    return f'"{s}"'
667
668
669def multi_glob(env: "SCEnvironment", subdir: str, *args: str):
670    """Use SCons Glob to find nodes in a subdirectory using many file extensions.
671
672    Each argument in ``args`` is assumed to be file extension,
673    unless the arg starts with a ``'^'``, in which case the remainder
674    of the argument is taken to be a complete pattern.
675    """
676    matches = []
677    for ext in args:
678        if ext.startswith("^"):
679            matches += env.Glob(Path(subdir).joinpath(ext[1:]))
680        else:
681            matches += env.Glob(Path(subdir).joinpath(f"*.{ext}"))
682    return matches
683
684
685def which(program: str) -> bool:
686    """Replicates the functionality of the 'which' shell command."""
687    for ext in ("", ".exe", ".bat"):
688        fpath = Path(program + ext)
689        for path in os.environ["PATH"].split(os.pathsep):
690            exe_file = Path(path).joinpath(fpath)
691            if exe_file.exists() and os.access(exe_file, os.X_OK):
692                return True
693    return False
694
695
696def help(env: "SCEnvironment", options: "SCVariables") -> None:
697    """Print help about configuration options and exit.
698
699    Print a nicely formatted description of a SCons configuration
700    option, its permitted values, default value, and current value
701    if different from the default.
702    """
703
704    message = [
705        textwrap.dedent(
706            """
707                **************************************************
708                *   Configuration options for building Cantera   *
709                **************************************************
710
711        The following options can be passed to SCons to customize the Cantera
712        build process. They should be given in the form:
713
714            scons build option1=value1 option2=value2
715
716        Variables set in this way will be stored in the 'cantera.conf' file and reused
717        automatically on subsequent invocations of scons. Alternatively, the
718        configuration options can be entered directly into 'cantera.conf' before
719        running 'scons build'. The format of this file is:
720
721            option1 = 'value1'
722            option2 = 'value2'
723
724                **************************************************"""
725        )
726    ]
727
728    option_wrapper = textwrap.TextWrapper(
729        initial_indent=4 * " ",
730        subsequent_indent=4 * " ",
731        width=72,
732    )
733    for opt in options.options:
734        # Extract the help description from the permitted values. Original format
735        # is in the format: "Help text (value1|value2)" for EnumVariable and
736        # BoolVariable types or "Help text" for other Variables
737        if opt.help.endswith(")"):
738            help, values = opt.help.rsplit("(", maxsplit=1)
739            values = values.rstrip(")").strip().replace("|", " | ")
740            if not values:
741                values = "string"
742        else:
743            help = opt.help
744            values = "string"
745
746        # First line: "* option-name: [ choice1 | choice2 ]"
747        lines = [f"* {opt.key}: [ {values} ]"]
748
749        # Help text, wrapped and indented 4 spaces
750        lines.extend(option_wrapper.wrap(re.sub(r"\s+", " ", help)))
751
752        # Fix the representation of Boolean options, which are stored as
753        # Python bool types
754        default = opt.default
755        if default is True:
756            default = "yes"
757        elif default is False:
758            default = "no"
759
760        lines.append(f"    - default: {default!r}")
761
762        # Get the actual value in the current environment
763        if opt.key in env:
764            actual = env.subst(f"${opt.key!s}")
765        else:
766            actual = None
767
768        # Fix the representation of Boolean options to match the default values
769        if actual == "True":
770            actual = "yes"
771        elif actual == "False":
772            actual = "no"
773
774        # Print the value if it differs from the default
775        if actual != default:
776            lines.append(f"    - actual: {actual!r}")
777        message.append("\n".join(lines))
778
779    logger.info("\n\n".join(message), print_level=False)
780
781
782def listify(value: "Union[str, Iterable]") -> "List[str]":
783    """
784    Convert an option specified as a string to a list, using spaces as
785    delimiters. Passes lists and tuples transparently.
786    """
787    if isinstance(value, str):
788        return value.split()
789    else:
790        # Already a sequence. Return as a list
791        return list(value)
792
793
794def remove_file(name: "TPathLike") -> None:
795    """Remove file (if it exists) and print a log message."""
796    path_name = Path(name)
797    if path_name.exists():
798        logger.info(f"Removing file '{name!s}'")
799        path_name.unlink()
800
801
802def remove_directory(name: "TPathLike") -> None:
803    """Remove directory recursively and print a log message."""
804    path_name = Path(name)
805    if path_name.exists() and path_name.is_dir():
806        logger.info(f"Removing directory '{name!s}'")
807        shutil.rmtree(path_name)
808
809
810def ipdb():
811    """
812    Break execution and drop into an IPython debug shell at the point
813    where this function is called.
814    """
815    from IPython.core.debugger import Pdb
816    from IPython.core import getipython
817
818    ip = getipython.get_ipython()
819    def_colors = ip.colors
820    Pdb(def_colors).set_trace(sys._getframe().f_back)
821
822
823def get_spawn(env: "SCEnvironment"):
824    """
825    A replacement for env['SPAWN'] on Windows that can deal with very long
826    commands, namely those generated when linking. This is only used when
827    compiling with MinGW, as SCons automatically uses a tempfile for the
828    MSVC link command.
829
830    Pass the return value of this function as the SPAWN keyword argument to
831    the Library target, for example:
832
833        env.SharedLibrary(..., SPAWN=get_spawn(env))
834
835    Adapted from https://github.com/SCons/scons/wiki/LongCmdLinesOnWin32
836    """
837
838    if "cmd.exe" not in env["SHELL"] or env.subst("$CXX") == "cl":
839        return env["SPAWN"]
840
841    def our_spawn(sh: str, escape: str, cmd: str, args: str, environ: "Dict[str, str]"):
842        newargs = " ".join(args[1:])
843        cmdline = cmd + " " + newargs
844        startupinfo = subprocess.STARTUPINFO()  # type: ignore
845        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW  # type: ignore
846        proc = subprocess.Popen(cmdline,
847                                stdin=subprocess.PIPE,
848                                stdout=subprocess.PIPE,
849                                stderr=subprocess.PIPE,
850                                startupinfo=startupinfo,
851                                shell=False,
852                                env=environ)
853        _, err = proc.communicate()
854        rv = proc.wait()
855        if rv:
856            logger.error(err)
857        return rv
858
859    return our_spawn
860
861
862def get_command_output(cmd: str, *args: str):
863    """
864    Run a command with arguments and return its output.
865    """
866    environ = dict(os.environ)
867    if "PYTHONHOME" in environ:
868        # Can cause problems when trying to run a different Python interpreter
869        del environ["PYTHONHOME"]
870    data = subprocess.run(
871        [cmd] + list(args),
872        env=environ,
873        stdout=subprocess.PIPE,
874        universal_newlines=True,
875        check=True,
876    )
877    return data.stdout.strip()
878
879
880# Monkey patch for SCons Cygwin bug
881# See https://github.com/SCons/scons/issues/2664
882if "cygwin" in platform.system().lower():
883    import SCons.Node.FS
884    SCons.Node.FS._my_normcase = lambda x: x
885