1# SPDX-License-Identifier: GPL-2.0
2# Copyright (c) 2015 Stephen Warren
3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
4
5# Implementation of pytest run-time hook functions. These are invoked by
6# pytest at certain points during operation, e.g. startup, for each executed
7# test, at shutdown etc. These hooks perform functions such as:
8# - Parsing custom command-line options.
9# - Pullilng in user-specified board configuration.
10# - Creating the U-Boot console test fixture.
11# - Creating the HTML log file.
12# - Monitoring each test's results.
13# - Implementing custom pytest markers.
14
15import atexit
16import configparser
17import errno
18import io
19import os
20import os.path
21import pytest
22import re
23from _pytest.runner import runtestprotocol
24import sys
25
26# Globals: The HTML log file, and the connection to the U-Boot console.
27log = None
28console = None
29
30def mkdir_p(path):
31    """Create a directory path.
32
33    This includes creating any intermediate/parent directories. Any errors
34    caused due to already extant directories are ignored.
35
36    Args:
37        path: The directory path to create.
38
39    Returns:
40        Nothing.
41    """
42
43    try:
44        os.makedirs(path)
45    except OSError as exc:
46        if exc.errno == errno.EEXIST and os.path.isdir(path):
47            pass
48        else:
49            raise
50
51def pytest_addoption(parser):
52    """pytest hook: Add custom command-line options to the cmdline parser.
53
54    Args:
55        parser: The pytest command-line parser.
56
57    Returns:
58        Nothing.
59    """
60
61    parser.addoption('--build-dir', default=None,
62        help='U-Boot build directory (O=)')
63    parser.addoption('--result-dir', default=None,
64        help='U-Boot test result/tmp directory')
65    parser.addoption('--persistent-data-dir', default=None,
66        help='U-Boot test persistent generated data directory')
67    parser.addoption('--board-type', '--bd', '-B', default='sandbox',
68        help='U-Boot board type')
69    parser.addoption('--board-identity', '--id', default='na',
70        help='U-Boot board identity/instance')
71    parser.addoption('--build', default=False, action='store_true',
72        help='Compile U-Boot before running tests')
73    parser.addoption('--buildman', default=False, action='store_true',
74        help='Use buildman to build U-Boot (assuming --build is given)')
75    parser.addoption('--gdbserver', default=None,
76        help='Run sandbox under gdbserver. The argument is the channel '+
77        'over which gdbserver should communicate, e.g. localhost:1234')
78
79def pytest_configure(config):
80    """pytest hook: Perform custom initialization at startup time.
81
82    Args:
83        config: The pytest configuration.
84
85    Returns:
86        Nothing.
87    """
88    def parse_config(conf_file):
89        """Parse a config file, loading it into the ubconfig container
90
91        Args:
92            conf_file: Filename to load (within build_dir)
93
94        Raises
95            Exception if the file does not exist
96        """
97        dot_config = build_dir + '/' + conf_file
98        if not os.path.exists(dot_config):
99            raise Exception(conf_file + ' does not exist; ' +
100                            'try passing --build option?')
101
102        with open(dot_config, 'rt') as f:
103            ini_str = '[root]\n' + f.read()
104            ini_sio = io.StringIO(ini_str)
105            parser = configparser.RawConfigParser()
106            parser.read_file(ini_sio)
107            ubconfig.buildconfig.update(parser.items('root'))
108
109    global log
110    global console
111    global ubconfig
112
113    test_py_dir = os.path.dirname(os.path.abspath(__file__))
114    source_dir = os.path.dirname(os.path.dirname(test_py_dir))
115
116    board_type = config.getoption('board_type')
117    board_type_filename = board_type.replace('-', '_')
118
119    board_identity = config.getoption('board_identity')
120    board_identity_filename = board_identity.replace('-', '_')
121
122    build_dir = config.getoption('build_dir')
123    if not build_dir:
124        build_dir = source_dir + '/build-' + board_type
125    mkdir_p(build_dir)
126
127    result_dir = config.getoption('result_dir')
128    if not result_dir:
129        result_dir = build_dir
130    mkdir_p(result_dir)
131
132    persistent_data_dir = config.getoption('persistent_data_dir')
133    if not persistent_data_dir:
134        persistent_data_dir = build_dir + '/persistent-data'
135    mkdir_p(persistent_data_dir)
136
137    gdbserver = config.getoption('gdbserver')
138    if gdbserver and not board_type.startswith('sandbox'):
139        raise Exception('--gdbserver only supported with sandbox targets')
140
141    import multiplexed_log
142    log = multiplexed_log.Logfile(result_dir + '/test-log.html')
143
144    if config.getoption('build'):
145        if config.getoption('buildman'):
146            if build_dir != source_dir:
147                dest_args = ['-o', build_dir, '-w']
148            else:
149                dest_args = ['-i']
150            cmds = (['buildman', '--board', board_type] + dest_args,)
151            name = 'buildman'
152        else:
153            if build_dir != source_dir:
154                o_opt = 'O=%s' % build_dir
155            else:
156                o_opt = ''
157            cmds = (
158                ['make', o_opt, '-s', board_type + '_defconfig'],
159                ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())],
160            )
161            name = 'make'
162
163        with log.section(name):
164            runner = log.get_runner(name, sys.stdout)
165            for cmd in cmds:
166                runner.run(cmd, cwd=source_dir)
167            runner.close()
168            log.status_pass('OK')
169
170    class ArbitraryAttributeContainer(object):
171        pass
172
173    ubconfig = ArbitraryAttributeContainer()
174    ubconfig.brd = dict()
175    ubconfig.env = dict()
176
177    modules = [
178        (ubconfig.brd, 'u_boot_board_' + board_type_filename),
179        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
180        (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
181            board_identity_filename),
182    ]
183    for (dict_to_fill, module_name) in modules:
184        try:
185            module = __import__(module_name)
186        except ImportError:
187            continue
188        dict_to_fill.update(module.__dict__)
189
190    ubconfig.buildconfig = dict()
191
192    # buildman -k puts autoconf.mk in the rootdir, so handle this as well
193    # as the standard U-Boot build which leaves it in include/autoconf.mk
194    parse_config('.config')
195    if os.path.exists(build_dir + '/' + 'autoconf.mk'):
196        parse_config('autoconf.mk')
197    else:
198        parse_config('include/autoconf.mk')
199
200    ubconfig.test_py_dir = test_py_dir
201    ubconfig.source_dir = source_dir
202    ubconfig.build_dir = build_dir
203    ubconfig.result_dir = result_dir
204    ubconfig.persistent_data_dir = persistent_data_dir
205    ubconfig.board_type = board_type
206    ubconfig.board_identity = board_identity
207    ubconfig.gdbserver = gdbserver
208    ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
209
210    env_vars = (
211        'board_type',
212        'board_identity',
213        'source_dir',
214        'test_py_dir',
215        'build_dir',
216        'result_dir',
217        'persistent_data_dir',
218    )
219    for v in env_vars:
220        os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
221
222    if board_type.startswith('sandbox'):
223        import u_boot_console_sandbox
224        console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
225    else:
226        import u_boot_console_exec_attach
227        console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
228
229re_ut_test_list = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_test_2_\1_test_(.*)\s*$')
230def generate_ut_subtest(metafunc, fixture_name, sym_path):
231    """Provide parametrization for a ut_subtest fixture.
232
233    Determines the set of unit tests built into a U-Boot binary by parsing the
234    list of symbols generated by the build process. Provides this information
235    to test functions by parameterizing their ut_subtest fixture parameter.
236
237    Args:
238        metafunc: The pytest test function.
239        fixture_name: The fixture name to test.
240        sym_path: Relative path to the symbol file with preceding '/'
241            (e.g. '/u-boot.sym')
242
243    Returns:
244        Nothing.
245    """
246    fn = console.config.build_dir + sym_path
247    try:
248        with open(fn, 'rt') as f:
249            lines = f.readlines()
250    except:
251        lines = []
252    lines.sort()
253
254    vals = []
255    for l in lines:
256        m = re_ut_test_list.search(l)
257        if not m:
258            continue
259        vals.append(m.group(1) + ' ' + m.group(2))
260
261    ids = ['ut_' + s.replace(' ', '_') for s in vals]
262    metafunc.parametrize(fixture_name, vals, ids=ids)
263
264def generate_config(metafunc, fixture_name):
265    """Provide parametrization for {env,brd}__ fixtures.
266
267    If a test function takes parameter(s) (fixture names) of the form brd__xxx
268    or env__xxx, the brd and env configuration dictionaries are consulted to
269    find the list of values to use for those parameters, and the test is
270    parametrized so that it runs once for each combination of values.
271
272    Args:
273        metafunc: The pytest test function.
274        fixture_name: The fixture name to test.
275
276    Returns:
277        Nothing.
278    """
279
280    subconfigs = {
281        'brd': console.config.brd,
282        'env': console.config.env,
283    }
284    parts = fixture_name.split('__')
285    if len(parts) < 2:
286        return
287    if parts[0] not in subconfigs:
288        return
289    subconfig = subconfigs[parts[0]]
290    vals = []
291    val = subconfig.get(fixture_name, [])
292    # If that exact name is a key in the data source:
293    if val:
294        # ... use the dict value as a single parameter value.
295        vals = (val, )
296    else:
297        # ... otherwise, see if there's a key that contains a list of
298        # values to use instead.
299        vals = subconfig.get(fixture_name+ 's', [])
300    def fixture_id(index, val):
301        try:
302            return val['fixture_id']
303        except:
304            return fixture_name + str(index)
305    ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
306    metafunc.parametrize(fixture_name, vals, ids=ids)
307
308def pytest_generate_tests(metafunc):
309    """pytest hook: parameterize test functions based on custom rules.
310
311    Check each test function parameter (fixture name) to see if it is one of
312    our custom names, and if so, provide the correct parametrization for that
313    parameter.
314
315    Args:
316        metafunc: The pytest test function.
317
318    Returns:
319        Nothing.
320    """
321    for fn in metafunc.fixturenames:
322        if fn == 'ut_subtest':
323            generate_ut_subtest(metafunc, fn, '/u-boot.sym')
324            continue
325        if fn == 'ut_spl_subtest':
326            generate_ut_subtest(metafunc, fn, '/spl/u-boot-spl.sym')
327            continue
328        generate_config(metafunc, fn)
329
330@pytest.fixture(scope='session')
331def u_boot_log(request):
332     """Generate the value of a test's log fixture.
333
334     Args:
335         request: The pytest request.
336
337     Returns:
338         The fixture value.
339     """
340
341     return console.log
342
343@pytest.fixture(scope='session')
344def u_boot_config(request):
345     """Generate the value of a test's u_boot_config fixture.
346
347     Args:
348         request: The pytest request.
349
350     Returns:
351         The fixture value.
352     """
353
354     return console.config
355
356@pytest.fixture(scope='function')
357def u_boot_console(request):
358    """Generate the value of a test's u_boot_console fixture.
359
360    Args:
361        request: The pytest request.
362
363    Returns:
364        The fixture value.
365    """
366
367    console.ensure_spawned()
368    return console
369
370anchors = {}
371tests_not_run = []
372tests_failed = []
373tests_xpassed = []
374tests_xfailed = []
375tests_skipped = []
376tests_warning = []
377tests_passed = []
378
379def pytest_itemcollected(item):
380    """pytest hook: Called once for each test found during collection.
381
382    This enables our custom result analysis code to see the list of all tests
383    that should eventually be run.
384
385    Args:
386        item: The item that was collected.
387
388    Returns:
389        Nothing.
390    """
391
392    tests_not_run.append(item.name)
393
394def cleanup():
395    """Clean up all global state.
396
397    Executed (via atexit) once the entire test process is complete. This
398    includes logging the status of all tests, and the identity of any failed
399    or skipped tests.
400
401    Args:
402        None.
403
404    Returns:
405        Nothing.
406    """
407
408    if console:
409        console.close()
410    if log:
411        with log.section('Status Report', 'status_report'):
412            log.status_pass('%d passed' % len(tests_passed))
413            if tests_warning:
414                log.status_warning('%d passed with warning' % len(tests_warning))
415                for test in tests_warning:
416                    anchor = anchors.get(test, None)
417                    log.status_warning('... ' + test, anchor)
418            if tests_skipped:
419                log.status_skipped('%d skipped' % len(tests_skipped))
420                for test in tests_skipped:
421                    anchor = anchors.get(test, None)
422                    log.status_skipped('... ' + test, anchor)
423            if tests_xpassed:
424                log.status_xpass('%d xpass' % len(tests_xpassed))
425                for test in tests_xpassed:
426                    anchor = anchors.get(test, None)
427                    log.status_xpass('... ' + test, anchor)
428            if tests_xfailed:
429                log.status_xfail('%d xfail' % len(tests_xfailed))
430                for test in tests_xfailed:
431                    anchor = anchors.get(test, None)
432                    log.status_xfail('... ' + test, anchor)
433            if tests_failed:
434                log.status_fail('%d failed' % len(tests_failed))
435                for test in tests_failed:
436                    anchor = anchors.get(test, None)
437                    log.status_fail('... ' + test, anchor)
438            if tests_not_run:
439                log.status_fail('%d not run' % len(tests_not_run))
440                for test in tests_not_run:
441                    anchor = anchors.get(test, None)
442                    log.status_fail('... ' + test, anchor)
443        log.close()
444atexit.register(cleanup)
445
446def setup_boardspec(item):
447    """Process any 'boardspec' marker for a test.
448
449    Such a marker lists the set of board types that a test does/doesn't
450    support. If tests are being executed on an unsupported board, the test is
451    marked to be skipped.
452
453    Args:
454        item: The pytest test item.
455
456    Returns:
457        Nothing.
458    """
459
460    required_boards = []
461    for boards in item.iter_markers('boardspec'):
462        board = boards.args[0]
463        if board.startswith('!'):
464            if ubconfig.board_type == board[1:]:
465                pytest.skip('board "%s" not supported' % ubconfig.board_type)
466                return
467        else:
468            required_boards.append(board)
469    if required_boards and ubconfig.board_type not in required_boards:
470        pytest.skip('board "%s" not supported' % ubconfig.board_type)
471
472def setup_buildconfigspec(item):
473    """Process any 'buildconfigspec' marker for a test.
474
475    Such a marker lists some U-Boot configuration feature that the test
476    requires. If tests are being executed on an U-Boot build that doesn't
477    have the required feature, the test is marked to be skipped.
478
479    Args:
480        item: The pytest test item.
481
482    Returns:
483        Nothing.
484    """
485
486    for options in item.iter_markers('buildconfigspec'):
487        option = options.args[0]
488        if not ubconfig.buildconfig.get('config_' + option.lower(), None):
489            pytest.skip('.config feature "%s" not enabled' % option.lower())
490    for options in item.iter_markers('notbuildconfigspec'):
491        option = options.args[0]
492        if ubconfig.buildconfig.get('config_' + option.lower(), None):
493            pytest.skip('.config feature "%s" enabled' % option.lower())
494
495def tool_is_in_path(tool):
496    for path in os.environ["PATH"].split(os.pathsep):
497        fn = os.path.join(path, tool)
498        if os.path.isfile(fn) and os.access(fn, os.X_OK):
499            return True
500    return False
501
502def setup_requiredtool(item):
503    """Process any 'requiredtool' marker for a test.
504
505    Such a marker lists some external tool (binary, executable, application)
506    that the test requires. If tests are being executed on a system that
507    doesn't have the required tool, the test is marked to be skipped.
508
509    Args:
510        item: The pytest test item.
511
512    Returns:
513        Nothing.
514    """
515
516    for tools in item.iter_markers('requiredtool'):
517        tool = tools.args[0]
518        if not tool_is_in_path(tool):
519            pytest.skip('tool "%s" not in $PATH' % tool)
520
521def start_test_section(item):
522    anchors[item.name] = log.start_section(item.name)
523
524def pytest_runtest_setup(item):
525    """pytest hook: Configure (set up) a test item.
526
527    Called once for each test to perform any custom configuration. This hook
528    is used to skip the test if certain conditions apply.
529
530    Args:
531        item: The pytest test item.
532
533    Returns:
534        Nothing.
535    """
536
537    start_test_section(item)
538    setup_boardspec(item)
539    setup_buildconfigspec(item)
540    setup_requiredtool(item)
541
542def pytest_runtest_protocol(item, nextitem):
543    """pytest hook: Called to execute a test.
544
545    This hook wraps the standard pytest runtestprotocol() function in order
546    to acquire visibility into, and record, each test function's result.
547
548    Args:
549        item: The pytest test item to execute.
550        nextitem: The pytest test item that will be executed after this one.
551
552    Returns:
553        A list of pytest reports (test result data).
554    """
555
556    log.get_and_reset_warning()
557    ihook = item.ihook
558    ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
559    reports = runtestprotocol(item, nextitem=nextitem)
560    ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
561    was_warning = log.get_and_reset_warning()
562
563    # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
564    # the test is skipped. That call is required to create the test's section
565    # in the log file. The call to log.end_section() requires that the log
566    # contain a section for this test. Create a section for the test if it
567    # doesn't already exist.
568    if not item.name in anchors:
569        start_test_section(item)
570
571    failure_cleanup = False
572    if not was_warning:
573        test_list = tests_passed
574        msg = 'OK'
575        msg_log = log.status_pass
576    else:
577        test_list = tests_warning
578        msg = 'OK (with warning)'
579        msg_log = log.status_warning
580    for report in reports:
581        if report.outcome == 'failed':
582            if hasattr(report, 'wasxfail'):
583                test_list = tests_xpassed
584                msg = 'XPASSED'
585                msg_log = log.status_xpass
586            else:
587                failure_cleanup = True
588                test_list = tests_failed
589                msg = 'FAILED:\n' + str(report.longrepr)
590                msg_log = log.status_fail
591            break
592        if report.outcome == 'skipped':
593            if hasattr(report, 'wasxfail'):
594                failure_cleanup = True
595                test_list = tests_xfailed
596                msg = 'XFAILED:\n' + str(report.longrepr)
597                msg_log = log.status_xfail
598                break
599            test_list = tests_skipped
600            msg = 'SKIPPED:\n' + str(report.longrepr)
601            msg_log = log.status_skipped
602
603    if failure_cleanup:
604        console.drain_console()
605
606    test_list.append(item.name)
607    tests_not_run.remove(item.name)
608
609    try:
610        msg_log(msg)
611    except:
612        # If something went wrong with logging, it's better to let the test
613        # process continue, which may report other exceptions that triggered
614        # the logging issue (e.g. console.log wasn't created). Hence, just
615        # squash the exception. If the test setup failed due to e.g. syntax
616        # error somewhere else, this won't be seen. However, once that issue
617        # is fixed, if this exception still exists, it will then be logged as
618        # part of the test's stdout.
619        import traceback
620        print('Exception occurred while logging runtest status:')
621        traceback.print_exc()
622        # FIXME: Can we force a test failure here?
623
624    log.end_section(item.name)
625
626    if failure_cleanup:
627        console.cleanup_spawn()
628
629    return True
630