1#!/usr/bin/env python3
2# Copyright (c) 2014-2018 The Bitcoin Core developers
3# Distributed under the MIT software license, see the accompanying
4# file COPYING or http://www.opensource.org/licenses/mit-license.php.
5"""Run regression test suite.
6
7This module calls down into individual test cases via subprocess. It will
8forward all unrecognized arguments onto the individual test scripts.
9
10For a description of arguments recognized by test scripts, see
11`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
12
13"""
14
15import argparse
16from collections import deque
17import configparser
18import datetime
19import os
20import time
21import shutil
22import signal
23import sys
24import subprocess
25import tempfile
26import re
27import logging
28
29# Formatting. Default colors to empty strings.
30BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
31try:
32    # Make sure python thinks it can write unicode to its stdout
33    "\u2713".encode("utf_8").decode(sys.stdout.encoding)
34    TICK = "✓ "
35    CROSS = "✖ "
36    CIRCLE = "○ "
37except UnicodeDecodeError:
38    TICK = "P "
39    CROSS = "x "
40    CIRCLE = "o "
41
42if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
43    if os.name == 'nt':
44        import ctypes
45        kernel32 = ctypes.windll.kernel32
46        ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
47        STD_OUTPUT_HANDLE = -11
48        STD_ERROR_HANDLE = -12
49        # Enable ascii color control to stdout
50        stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
51        stdout_mode = ctypes.c_int32()
52        kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
53        kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
54        # Enable ascii color control to stderr
55        stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
56        stderr_mode = ctypes.c_int32()
57        kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
58        kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
59    # primitive formatting on supported
60    # terminal via ANSI escape sequences:
61    BOLD = ('\033[0m', '\033[1m')
62    GREEN = ('\033[0m', '\033[0;32m')
63    RED = ('\033[0m', '\033[0;31m')
64    GREY = ('\033[0m', '\033[1;30m')
65
66TEST_EXIT_PASSED = 0
67TEST_EXIT_SKIPPED = 77
68
69BASE_SCRIPTS = [
70    # Scripts that are run by the travis build process.
71    # Longest test should go first, to favor running tests in parallel
72    'feature_fee_estimation.py',
73    'wallet_hd.py',
74    'wallet_backup.py',
75    # vv Tests less than 5m vv
76    'mining_getblocktemplate_longpoll.py',
77    'feature_maxuploadtarget.py',
78    'feature_block.py',
79    'rpc_fundrawtransaction.py',
80    'p2p_compactblocks.py',
81    'feature_segwit.py',
82    # vv Tests less than 2m vv
83    'wallet_basic.py',
84    'wallet_labels.py',
85    'p2p_segwit.py',
86    'p2p_timeouts.py',
87    'wallet_dump.py',
88    'wallet_listtransactions.py',
89    # vv Tests less than 60s vv
90    'p2p_sendheaders.py',
91    'wallet_zapwallettxes.py',
92    'wallet_importmulti.py',
93    'mempool_limit.py',
94    'rpc_txoutproof.py',
95    'wallet_listreceivedby.py',
96    'wallet_abandonconflict.py',
97    'feature_csv_activation.py',
98    'rpc_rawtransaction.py',
99    'wallet_address_types.py',
100    'feature_bip68_sequence.py',
101    'p2p_feefilter.py',
102    'feature_reindex.py',
103    # vv Tests less than 30s vv
104    'wallet_keypool_topup.py',
105    'interface_zmq.py',
106    'interface_bitcoin_cli.py',
107    'mempool_resurrect.py',
108    'wallet_txn_doublespend.py --mineblock',
109    'tool_wallet.py',
110    'wallet_txn_clone.py',
111    'wallet_txn_clone.py --segwit',
112    'rpc_getchaintips.py',
113    'rpc_misc.py',
114    'interface_rest.py',
115    'mempool_spend_coinbase.py',
116    'mempool_reorg.py',
117    'mempool_persist.py',
118    'wallet_multiwallet.py',
119    'wallet_multiwallet.py --usecli',
120    'wallet_createwallet.py',
121    'wallet_createwallet.py --usecli',
122    'interface_http.py',
123    'interface_rpc.py',
124    'rpc_psbt.py',
125    'rpc_users.py',
126    'feature_proxy.py',
127    'rpc_signrawtransaction.py',
128    'wallet_groups.py',
129    'p2p_disconnect_ban.py',
130    'rpc_decodescript.py',
131    'rpc_blockchain.py',
132    'rpc_deprecated.py',
133    'wallet_disable.py',
134    'rpc_net.py',
135    'wallet_keypool.py',
136    'p2p_mempool.py',
137    'p2p_blocksonly.py',
138    'mining_prioritisetransaction.py',
139    'p2p_invalid_locator.py',
140    'p2p_invalid_block.py',
141    'p2p_invalid_messages.py',
142    'p2p_invalid_tx.py',
143    'feature_assumevalid.py',
144    'example_test.py',
145    'wallet_txn_doublespend.py',
146    'wallet_txn_clone.py --mineblock',
147    'feature_notifications.py',
148    'rpc_invalidateblock.py',
149    'feature_rbf.py',
150    'mempool_packages.py',
151    'rpc_createmultisig.py',
152    'feature_versionbits_warning.py',
153    'rpc_preciousblock.py',
154    'wallet_importprunedfunds.py',
155    'p2p_leak_tx.py',
156    'rpc_signmessage.py',
157    'wallet_balance.py',
158    'feature_nulldummy.py',
159    'mempool_accept.py',
160    'wallet_import_rescan.py',
161    'wallet_import_with_label.py',
162    'rpc_bind.py --ipv4',
163    'rpc_bind.py --ipv6',
164    'rpc_bind.py --nonloopback',
165    'mining_basic.py',
166    'wallet_bumpfee.py',
167    'rpc_named_arguments.py',
168    'wallet_listsinceblock.py',
169    'p2p_leak.py',
170    'wallet_encryption.py',
171    'wallet_scriptaddress2.py',
172    'feature_dersig.py',
173    'feature_cltv.py',
174    'rpc_uptime.py',
175    'wallet_resendwallettransactions.py',
176    'wallet_fallbackfee.py',
177    'feature_minchainwork.py',
178    'rpc_getblockstats.py',
179    'wallet_create_tx.py',
180    'p2p_fingerprint.py',
181    'feature_uacomment.py',
182    'wallet_coinbase_category.py',
183    'feature_filelock.py',
184    'p2p_unrequested_blocks.py',
185    'feature_includeconf.py',
186    'rpc_deriveaddresses.py',
187    'rpc_deriveaddresses.py --usecli',
188    'rpc_scantxoutset.py',
189    'feature_logging.py',
190    'p2p_node_network_limited.py',
191    'feature_blocksdir.py',
192    'feature_config_args.py',
193    'rpc_help.py',
194    'feature_help.py',
195    'feature_shutdown.py',
196    # Don't append tests at the end to avoid merge conflicts
197    # Put them in a random line within the section that fits their approximate run-time
198]
199
200EXTENDED_SCRIPTS = [
201    # These tests are not run by the travis build process.
202    # Longest test should go first, to favor running tests in parallel
203    'feature_pruning.py',
204    'feature_dbcrash.py',
205]
206
207# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
208ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
209
210NON_SCRIPTS = [
211    # These are python files that live in the functional tests directory, but are not test scripts.
212    "combine_logs.py",
213    "create_cache.py",
214    "test_runner.py",
215]
216
217def main():
218    # Parse arguments and pass through unrecognised args
219    parser = argparse.ArgumentParser(add_help=False,
220                                     usage='%(prog)s [test_runner.py options] [script options] [scripts]',
221                                     description=__doc__,
222                                     epilog='''
223    Help text and arguments for individual test script:''',
224                                     formatter_class=argparse.RawTextHelpFormatter)
225    parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.')
226    parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
227    parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
228    parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
229    parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
230    parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
231    parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
232    parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
233    parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
234    parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
235    parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
236    args, unknown_args = parser.parse_known_args()
237
238    # args to be passed on always start with two dashes; tests are the remaining unknown args
239    tests = [arg for arg in unknown_args if arg[:2] != "--"]
240    passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
241
242    # Read config generated by configure.
243    config = configparser.ConfigParser()
244    configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
245    config.read_file(open(configfile, encoding="utf8"))
246
247    passon_args.append("--configfile=%s" % configfile)
248
249    # Set up logging
250    logging_level = logging.INFO if args.quiet else logging.DEBUG
251    logging.basicConfig(format='%(message)s', level=logging_level)
252
253    # Create base test directory
254    tmpdir = "%s/test_runner_Ł_��_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
255
256    os.makedirs(tmpdir)
257
258    logging.debug("Temporary test directory at %s" % tmpdir)
259
260    enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
261
262    if not enable_bitcoind:
263        print("No functional tests to run.")
264        print("Rerun ./configure with --with-daemon and then make")
265        sys.exit(0)
266
267    # Build list of tests
268    test_list = []
269    if tests:
270        # Individual tests have been specified. Run specified tests that exist
271        # in the ALL_SCRIPTS list. Accept the name with or without .py extension.
272        tests = [test + ".py" if ".py" not in test else test for test in tests]
273        for test in tests:
274            if test in ALL_SCRIPTS:
275                test_list.append(test)
276            else:
277                print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
278    elif args.extended:
279        # Include extended tests
280        test_list += ALL_SCRIPTS
281    else:
282        # Run base tests only
283        test_list += BASE_SCRIPTS
284
285    # Remove the test cases that the user has explicitly asked to exclude.
286    if args.exclude:
287        exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
288        for exclude_test in exclude_tests:
289            # Remove <test_name>.py and <test_name>.py --arg from the test list
290            exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
291            for exclude_item in exclude_list:
292                test_list.remove(exclude_item)
293            if not exclude_list:
294                print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
295
296    if not test_list:
297        print("No valid test scripts specified. Check that your test is in one "
298              "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
299        sys.exit(0)
300
301    if args.help:
302        # Print help for test_runner.py, then print help of the first script (with args removed) and exit.
303        parser.print_help()
304        subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
305        sys.exit(0)
306
307    check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
308    check_script_prefixes()
309
310    if not args.keepcache:
311        shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
312
313    run_tests(
314        test_list=test_list,
315        src_dir=config["environment"]["SRCDIR"],
316        build_dir=config["environment"]["BUILDDIR"],
317        tmpdir=tmpdir,
318        jobs=args.jobs,
319        enable_coverage=args.coverage,
320        args=passon_args,
321        combined_logs_len=args.combinedlogslen,
322        failfast=args.failfast,
323        runs_ci=args.ci,
324    )
325
326def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, runs_ci):
327    args = args or []
328
329    # Warn if bitcoind is already running (unix only)
330    try:
331        if subprocess.check_output(["pidof", "litecoind"]) is not None:
332            print("%sWARNING!%s There is already a litecoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
333    except (OSError, subprocess.SubprocessError):
334        pass
335
336    # Warn if there is a cache directory
337    cache_dir = "%s/test/cache" % build_dir
338    if os.path.isdir(cache_dir):
339        print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
340
341    tests_dir = src_dir + '/test/functional/'
342
343    flags = ['--cachedir={}'.format(cache_dir)] + args
344
345    if enable_coverage:
346        coverage = RPCCoverage()
347        flags.append(coverage.flag)
348        logging.debug("Initializing coverage directory at %s" % coverage.dir)
349    else:
350        coverage = None
351
352    if len(test_list) > 1 and jobs > 1:
353        # Populate cache
354        try:
355            subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
356        except subprocess.CalledProcessError as e:
357            sys.stdout.buffer.write(e.output)
358            raise
359
360    #Run Tests
361    job_queue = TestHandler(
362        num_tests_parallel=jobs,
363        tests_dir=tests_dir,
364        tmpdir=tmpdir,
365        test_list=test_list,
366        flags=flags,
367        timeout_duration=40 * 60 if runs_ci else float('inf'),  # in seconds
368    )
369    start_time = time.time()
370    test_results = []
371
372    max_len_name = len(max(test_list, key=len))
373    test_count = len(test_list)
374    for i in range(test_count):
375        test_result, testdir, stdout, stderr = job_queue.get_next()
376        test_results.append(test_result)
377        done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
378        if test_result.status == "Passed":
379            logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
380        elif test_result.status == "Skipped":
381            logging.debug("%s skipped" % (done_str))
382        else:
383            print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
384            print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
385            print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
386            if combined_logs_len and os.path.isdir(testdir):
387                # Print the final `combinedlogslen` lines of the combined logs
388                print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
389                print('\n============')
390                print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
391                print('============\n')
392                combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
393                if BOLD[0]:
394                    combined_logs_args += ['--color']
395                combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
396                print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
397
398            if failfast:
399                logging.debug("Early exiting after test failure")
400                break
401
402    print_results(test_results, max_len_name, (int(time.time() - start_time)))
403
404    if coverage:
405        coverage.report_rpc_coverage()
406
407        logging.debug("Cleaning up coverage data")
408        coverage.cleanup()
409
410    # Clear up the temp directory if all subdirectories are gone
411    if not os.listdir(tmpdir):
412        os.rmdir(tmpdir)
413
414    all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
415
416    # This will be a no-op unless failfast is True in which case there may be dangling
417    # processes which need to be killed.
418    job_queue.kill_and_join()
419
420    sys.exit(not all_passed)
421
422def print_results(test_results, max_len_name, runtime):
423    results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS   ", "DURATION") + BOLD[0]
424
425    test_results.sort(key=TestResult.sort_key)
426    all_passed = True
427    time_sum = 0
428
429    for test_result in test_results:
430        all_passed = all_passed and test_result.was_successful
431        time_sum += test_result.time
432        test_result.padding = max_len_name
433        results += str(test_result)
434
435    status = TICK + "Passed" if all_passed else CROSS + "Failed"
436    if not all_passed:
437        results += RED[1]
438    results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
439    if not all_passed:
440        results += RED[0]
441    results += "Runtime: %s s\n" % (runtime)
442    print(results)
443
444class TestHandler:
445    """
446    Trigger the test scripts passed in via the list.
447    """
448
449    def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration):
450        assert num_tests_parallel >= 1
451        self.num_jobs = num_tests_parallel
452        self.tests_dir = tests_dir
453        self.tmpdir = tmpdir
454        self.timeout_duration = timeout_duration
455        self.test_list = test_list
456        self.flags = flags
457        self.num_running = 0
458        self.jobs = []
459
460    def get_next(self):
461        while self.num_running < self.num_jobs and self.test_list:
462            # Add tests
463            self.num_running += 1
464            test = self.test_list.pop(0)
465            portseed = len(self.test_list)
466            portseed_arg = ["--portseed={}".format(portseed)]
467            log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
468            log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
469            test_argv = test.split()
470            testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
471            tmpdir_arg = ["--tmpdir={}".format(testdir)]
472            self.jobs.append((test,
473                              time.time(),
474                              subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
475                                               universal_newlines=True,
476                                               stdout=log_stdout,
477                                               stderr=log_stderr),
478                              testdir,
479                              log_stdout,
480                              log_stderr))
481        if not self.jobs:
482            raise IndexError('pop from empty list')
483        dot_count = 0
484        while True:
485            # Return first proc that finishes
486            time.sleep(.5)
487            for job in self.jobs:
488                (name, start_time, proc, testdir, log_out, log_err) = job
489                if int(time.time() - start_time) > self.timeout_duration:
490                    # In travis, timeout individual tests (to stop tests hanging and not providing useful output).
491                    proc.send_signal(signal.SIGINT)
492                if proc.poll() is not None:
493                    log_out.seek(0), log_err.seek(0)
494                    [stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
495                    log_out.close(), log_err.close()
496                    if proc.returncode == TEST_EXIT_PASSED and stderr == "":
497                        status = "Passed"
498                    elif proc.returncode == TEST_EXIT_SKIPPED:
499                        status = "Skipped"
500                    else:
501                        status = "Failed"
502                    self.num_running -= 1
503                    self.jobs.remove(job)
504                    clearline = '\r' + (' ' * dot_count) + '\r'
505                    print(clearline, end='', flush=True)
506                    dot_count = 0
507                    return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
508            print('.', end='', flush=True)
509            dot_count += 1
510
511    def kill_and_join(self):
512        """Send SIGKILL to all jobs and block until all have ended."""
513        procs = [i[2] for i in self.jobs]
514
515        for proc in procs:
516            proc.kill()
517
518        for proc in procs:
519            proc.wait()
520
521
522class TestResult():
523    def __init__(self, name, status, time):
524        self.name = name
525        self.status = status
526        self.time = time
527        self.padding = 0
528
529    def sort_key(self):
530        if self.status == "Passed":
531            return 0, self.name.lower()
532        elif self.status == "Failed":
533            return 2, self.name.lower()
534        elif self.status == "Skipped":
535            return 1, self.name.lower()
536
537    def __repr__(self):
538        if self.status == "Passed":
539            color = GREEN
540            glyph = TICK
541        elif self.status == "Failed":
542            color = RED
543            glyph = CROSS
544        elif self.status == "Skipped":
545            color = GREY
546            glyph = CIRCLE
547
548        return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
549
550    @property
551    def was_successful(self):
552        return self.status != "Failed"
553
554
555def check_script_prefixes():
556    """Check that test scripts start with one of the allowed name prefixes."""
557
558    good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_")
559    bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
560
561    if bad_script_names:
562        print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
563        print("  %s" % ("\n  ".join(sorted(bad_script_names))))
564        raise AssertionError("Some tests are not following naming convention!")
565
566
567def check_script_list(*, src_dir, fail_on_warn):
568    """Check scripts directory.
569
570    Check that there are no scripts in the functional tests directory which are
571    not being run by pull-tester.py."""
572    script_dir = src_dir + '/test/functional/'
573    python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
574    missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
575    if len(missed_tests) != 0:
576        print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
577        if fail_on_warn:
578            # On travis this warning is an error to prevent merging incomplete commits into master
579            sys.exit(1)
580
581
582class RPCCoverage():
583    """
584    Coverage reporting utilities for test_runner.
585
586    Coverage calculation works by having each test script subprocess write
587    coverage files into a particular directory. These files contain the RPC
588    commands invoked during testing, as well as a complete listing of RPC
589    commands per `litecoin-cli help` (`rpc_interface.txt`).
590
591    After all tests complete, the commands run are combined and diff'd against
592    the complete list to calculate uncovered RPC commands.
593
594    See also: test/functional/test_framework/coverage.py
595
596    """
597    def __init__(self):
598        self.dir = tempfile.mkdtemp(prefix="coverage")
599        self.flag = '--coveragedir=%s' % self.dir
600
601    def report_rpc_coverage(self):
602        """
603        Print out RPC commands that were unexercised by tests.
604
605        """
606        uncovered = self._get_uncovered_rpc_commands()
607
608        if uncovered:
609            print("Uncovered RPC commands:")
610            print("".join(("  - %s\n" % command) for command in sorted(uncovered)))
611        else:
612            print("All RPC commands covered.")
613
614    def cleanup(self):
615        return shutil.rmtree(self.dir)
616
617    def _get_uncovered_rpc_commands(self):
618        """
619        Return a set of currently untested RPC commands.
620
621        """
622        # This is shared from `test/functional/test-framework/coverage.py`
623        reference_filename = 'rpc_interface.txt'
624        coverage_file_prefix = 'coverage.'
625
626        coverage_ref_filename = os.path.join(self.dir, reference_filename)
627        coverage_filenames = set()
628        all_cmds = set()
629        covered_cmds = set()
630
631        if not os.path.isfile(coverage_ref_filename):
632            raise RuntimeError("No coverage reference found")
633
634        with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
635            all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
636
637        for root, _, files in os.walk(self.dir):
638            for filename in files:
639                if filename.startswith(coverage_file_prefix):
640                    coverage_filenames.add(os.path.join(root, filename))
641
642        for filename in coverage_filenames:
643            with open(filename, 'r', encoding="utf8") as coverage_file:
644                covered_cmds.update([line.strip() for line in coverage_file.readlines()])
645
646        return all_cmds - covered_cmds
647
648
649if __name__ == '__main__':
650    main()
651