xref: /openbsd/gnu/llvm/llvm/utils/lit/lit/reports.py (revision d415bd75)
1import base64
2import datetime
3import itertools
4import json
5
6from xml.sax.saxutils import quoteattr as quo
7
8import lit.Test
9
10
11def by_suite_and_test_path(test):
12    # Suite names are not necessarily unique.  Include object identity in sort
13    # key to avoid mixing tests of different suites.
14    return (test.suite.name, id(test.suite), test.path_in_suite)
15
16
17class JsonReport(object):
18    def __init__(self, output_file):
19        self.output_file = output_file
20
21    def write_results(self, tests, elapsed):
22        unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
23        tests = [t for t in tests if t.result.code not in unexecuted_codes]
24        # Construct the data we will write.
25        data = {}
26        # Encode the current lit version as a schema version.
27        data['__version__'] = lit.__versioninfo__
28        data['elapsed'] = elapsed
29        # FIXME: Record some information on the lit configuration used?
30        # FIXME: Record information from the individual test suites?
31
32        # Encode the tests.
33        data['tests'] = tests_data = []
34        for test in tests:
35            test_data = {
36                'name': test.getFullName(),
37                'code': test.result.code.name,
38                'output': test.result.output,
39                'elapsed': test.result.elapsed}
40
41            # Add test metrics, if present.
42            if test.result.metrics:
43                test_data['metrics'] = metrics_data = {}
44                for key, value in test.result.metrics.items():
45                    metrics_data[key] = value.todata()
46
47            # Report micro-tests separately, if present
48            if test.result.microResults:
49                for key, micro_test in test.result.microResults.items():
50                    # Expand parent test name with micro test name
51                    parent_name = test.getFullName()
52                    micro_full_name = parent_name + ':' + key
53
54                    micro_test_data = {
55                        'name': micro_full_name,
56                        'code': micro_test.code.name,
57                        'output': micro_test.output,
58                        'elapsed': micro_test.elapsed}
59                    if micro_test.metrics:
60                        micro_test_data['metrics'] = micro_metrics_data = {}
61                        for key, value in micro_test.metrics.items():
62                            micro_metrics_data[key] = value.todata()
63
64                    tests_data.append(micro_test_data)
65
66            tests_data.append(test_data)
67
68        with open(self.output_file, 'w') as file:
69            json.dump(data, file, indent=2, sort_keys=True)
70            file.write('\n')
71
72
73_invalid_xml_chars_dict = {c: None for c in range(32) if chr(c) not in ('\t', '\n', '\r')}
74
75
76def remove_invalid_xml_chars(s):
77    # According to the XML 1.0 spec, control characters other than
78    # \t,\r, and \n are not permitted anywhere in the document
79    # (https://www.w3.org/TR/xml/#charsets) and therefore this function
80    # removes them to produce a valid XML document.
81    #
82    # Note: In XML 1.1 only \0 is illegal (https://www.w3.org/TR/xml11/#charsets)
83    # but lit currently produces XML 1.0 output.
84    return s.translate(_invalid_xml_chars_dict)
85
86
87class XunitReport(object):
88    def __init__(self, output_file):
89        self.output_file = output_file
90        self.skipped_codes = {lit.Test.EXCLUDED,
91                              lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
92
93    def write_results(self, tests, elapsed):
94        tests.sort(key=by_suite_and_test_path)
95        tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
96
97        with open(self.output_file, 'w') as file:
98            file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
99            file.write('<testsuites time="{time:.2f}">\n'.format(time=elapsed))
100            for suite, test_iter in tests_by_suite:
101                self._write_testsuite(file, suite, list(test_iter))
102            file.write('</testsuites>\n')
103
104    def _write_testsuite(self, file, suite, tests):
105        skipped = sum(1 for t in tests if t.result.code in self.skipped_codes)
106        failures = sum(1 for t in tests if t.isFailure())
107
108        name = suite.config.name.replace('.', '-')
109        file.write(f'<testsuite name={quo(name)} tests="{len(tests)}" failures="{failures}" skipped="{skipped}">\n')
110        for test in tests:
111            self._write_test(file, test, name)
112        file.write('</testsuite>\n')
113
114    def _write_test(self, file, test, suite_name):
115        path = '/'.join(test.path_in_suite[:-1]).replace('.', '_')
116        class_name = f'{suite_name}.{path or suite_name}'
117        name = test.path_in_suite[-1]
118        time = test.result.elapsed or 0.0
119        file.write(f'<testcase classname={quo(class_name)} name={quo(name)} time="{time:.2f}"')
120
121        if test.isFailure():
122            file.write('>\n  <failure><![CDATA[')
123            # In the unlikely case that the output contains the CDATA
124            # terminator we wrap it by creating a new CDATA block.
125            output = test.result.output.replace(']]>', ']]]]><![CDATA[>')
126            if isinstance(output, bytes):
127                output = output.decode("utf-8", 'ignore')
128
129            # Failing test  output sometimes contains control characters like
130            # \x1b (e.g. if there was some -fcolor-diagnostics output) which are
131            # not allowed inside XML files.
132            # This causes problems with CI systems: for example, the Jenkins
133            # JUnit XML will throw an exception when ecountering those
134            # characters and similar problems also occur with GitLab CI.
135            output = remove_invalid_xml_chars(output)
136            file.write(output)
137            file.write(']]></failure>\n</testcase>\n')
138        elif test.result.code in self.skipped_codes:
139            reason = self._get_skip_reason(test)
140            file.write(f'>\n  <skipped message={quo(reason)}/>\n</testcase>\n')
141        else:
142            file.write('/>\n')
143
144    def _get_skip_reason(self, test):
145        code = test.result.code
146        if code == lit.Test.EXCLUDED:
147            return 'Test not selected (--filter, --max-tests)'
148        if code == lit.Test.SKIPPED:
149            return 'User interrupt'
150
151        assert code == lit.Test.UNSUPPORTED
152        features = test.getMissingRequiredFeatures()
153        if features:
154            return 'Missing required feature(s): ' + ', '.join(features)
155        return 'Unsupported configuration'
156
157
158def gen_resultdb_test_entry(
159    test_name, start_time, elapsed_time, test_output, result_code, is_expected
160):
161    test_data = {
162        'testId': test_name,
163        'start_time': datetime.datetime.fromtimestamp(start_time).isoformat() + 'Z',
164        'duration': '%.9fs' % elapsed_time,
165        'summary_html': '<p><text-artifact artifact-id="artifact-content-in-request"></p>',
166        'artifacts': {
167            'artifact-content-in-request': {
168                'contents': base64.b64encode(test_output.encode('utf-8')).decode(
169                    'utf-8'
170                ),
171            },
172        },
173        'expected': is_expected,
174    }
175    if (
176        result_code == lit.Test.PASS
177        or result_code == lit.Test.XPASS
178        or result_code == lit.Test.FLAKYPASS
179    ):
180        test_data['status'] = 'PASS'
181    elif result_code == lit.Test.FAIL or result_code == lit.Test.XFAIL:
182        test_data['status'] = 'FAIL'
183    elif (
184        result_code == lit.Test.UNSUPPORTED
185        or result_code == lit.Test.SKIPPED
186        or result_code == lit.Test.EXCLUDED
187    ):
188        test_data['status'] = 'SKIP'
189    elif result_code == lit.Test.UNRESOLVED or result_code == lit.Test.TIMEOUT:
190        test_data['status'] = 'ABORT'
191    return test_data
192
193
194class ResultDBReport(object):
195    def __init__(self, output_file):
196        self.output_file = output_file
197
198    def write_results(self, tests, elapsed):
199        unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
200        tests = [t for t in tests if t.result.code not in unexecuted_codes]
201        data = {}
202        data['__version__'] = lit.__versioninfo__
203        data['elapsed'] = elapsed
204        # Encode the tests.
205        data['tests'] = tests_data = []
206        for test in tests:
207            tests_data.append(
208                gen_resultdb_test_entry(
209                    test_name=test.getFullName(),
210                    start_time=test.result.start,
211                    elapsed_time=test.result.elapsed,
212                    test_output=test.result.output,
213                    result_code=test.result.code,
214                    is_expected=not test.result.code.isFailure,
215                )
216            )
217            if test.result.microResults:
218                for key, micro_test in test.result.microResults.items():
219                    # Expand parent test name with micro test name
220                    parent_name = test.getFullName()
221                    micro_full_name = parent_name + ':' + key + 'microres'
222                    tests_data.append(
223                        gen_resultdb_test_entry(
224                            test_name=micro_full_name,
225                            start_time=micro_test.start
226                            if micro_test.start
227                            else test.result.start,
228                            elapsed_time=micro_test.elapsed
229                            if micro_test.elapsed
230                            else test.result.elapsed,
231                            test_output=micro_test.output,
232                            result_code=micro_test.code,
233                            is_expected=not micro_test.code.isFailure,
234                        )
235                    )
236
237        with open(self.output_file, 'w') as file:
238            json.dump(data, file, indent=2, sort_keys=True)
239            file.write('\n')
240
241
242class TimeTraceReport(object):
243    def __init__(self, output_file):
244        self.output_file = output_file
245        self.skipped_codes = {lit.Test.EXCLUDED,
246                              lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
247
248    def write_results(self, tests, elapsed):
249        # Find when first test started so we can make start times relative.
250        first_start_time = min([t.result.start for t in tests])
251        events = [self._get_test_event(
252            x, first_start_time) for x in tests if x.result.code not in self.skipped_codes]
253
254        json_data = {'traceEvents': events}
255
256        with open(self.output_file, "w") as time_trace_file:
257            json.dump(json_data, time_trace_file, indent=2, sort_keys=True)
258
259    def _get_test_event(self, test, first_start_time):
260        test_name = test.getFullName()
261        elapsed_time = test.result.elapsed or 0.0
262        start_time = test.result.start - first_start_time if test.result.start else 0.0
263        pid = test.result.pid or 0
264        return {
265            'pid': pid,
266            'tid': 1,
267            'ph': 'X',
268            'ts': int(start_time * 1000000.),
269            'dur': int(elapsed_time * 1000000.),
270            'name': test_name,
271        }
272