1from __future__ import absolute_import
2import json
3import math
4import os
5import shlex
6import subprocess
7import sys
8
9import lit.Test
10import lit.TestRunner
11import lit.util
12from .base import TestFormat
13
14kIsWindows = sys.platform in ['win32', 'cygwin']
15
16class GoogleTest(TestFormat):
17    def __init__(self, test_sub_dirs, test_suffix, run_under = []):
18        self.seen_executables = set()
19        self.test_sub_dirs = str(test_sub_dirs).split(';')
20
21        # On Windows, assume tests will also end in '.exe'.
22        exe_suffix = str(test_suffix)
23        if kIsWindows:
24            exe_suffix += '.exe'
25
26        # Also check for .py files for testing purposes.
27        self.test_suffixes = {exe_suffix, test_suffix + '.py'}
28        self.run_under = run_under
29
30    def get_num_tests(self, path, litConfig, localConfig):
31        list_test_cmd = self.prepareCmd(
32            [path, '--gtest_list_tests', '--gtest_filter=-*DISABLED_*'])
33        try:
34            out = subprocess.check_output(list_test_cmd,
35                                          env=localConfig.environment)
36        except subprocess.CalledProcessError as exc:
37            litConfig.warning(
38                "unable to discover google-tests in %r: %s. Process output: %s"
39                % (path, sys.exc_info()[1], exc.output))
40            return None
41        return sum(
42            map(lambda line: lit.util.to_string(line).startswith('  '),
43                out.splitlines(False)))
44
45    def getTestsInDirectory(self, testSuite, path_in_suite, litConfig,
46                            localConfig):
47        init_shard_size = 512  # number of tests in a shard
48        core_count = lit.util.usable_core_count()
49        source_path = testSuite.getSourcePath(path_in_suite)
50        for subdir in self.test_sub_dirs:
51            dir_path = os.path.join(source_path, subdir)
52            if not os.path.isdir(dir_path):
53                continue
54            for fn in lit.util.listdir_files(dir_path,
55                                             suffixes=self.test_suffixes):
56                # Discover the tests in this executable.
57                execpath = os.path.join(source_path, subdir, fn)
58                if execpath in self.seen_executables:
59                    litConfig.warning(
60                        "Skip adding %r since it has been added to the test pool" % execpath)
61                    continue
62                else:
63                    self.seen_executables.add(execpath)
64                num_tests = self.get_num_tests(execpath, litConfig,
65                                               localConfig)
66                if num_tests is not None:
67                    # Compute the number of shards.
68                    shard_size = init_shard_size
69                    nshard = int(math.ceil(num_tests / shard_size))
70                    while nshard < core_count and shard_size > 1:
71                        shard_size = shard_size // 2
72                        nshard = int(math.ceil(num_tests / shard_size))
73
74                    # Create one lit test for each shard.
75                    for idx in range(nshard):
76                        testPath = path_in_suite + (subdir, fn, str(idx),
77                                                    str(nshard))
78                        json_file = '-'.join([
79                            execpath, testSuite.config.name,
80                            str(os.getpid()),
81                            str(idx),
82                            str(nshard)
83                        ]) + '.json'
84                        yield lit.Test.Test(testSuite,
85                                            testPath,
86                                            localConfig,
87                                            file_path=execpath,
88                                            gtest_json_file=json_file)
89                else:
90                    # This doesn't look like a valid gtest file.  This can
91                    # have a number of causes, none of them good.  For
92                    # instance, we could have created a broken executable.
93                    # Alternatively, someone has cruft in their test
94                    # directory.  If we don't return a test here, then no
95                    # failures will get reported, so return a dummy test name
96                    # so that the failure is reported later.
97                    testPath = path_in_suite + (
98                        subdir, fn, 'failed_to_discover_tests_from_gtest')
99                    yield lit.Test.Test(testSuite,
100                                        testPath,
101                                        localConfig,
102                                        file_path=execpath)
103
104    def execute(self, test, litConfig):
105        if test.gtest_json_file is None:
106            return lit.Test.FAIL, ''
107
108        testPath,testName = os.path.split(test.getSourcePath())
109        while not os.path.exists(testPath):
110            # Handle GTest parametrized and typed tests, whose name includes
111            # some '/'s.
112            testPath, namePrefix = os.path.split(testPath)
113            testName = namePrefix + '/' + testName
114
115        testName,total_shards = os.path.split(testName)
116        testName,shard_idx = os.path.split(testName)
117        from lit.cl_arguments import TestOrder
118        use_shuffle = TestOrder(litConfig.order) == TestOrder.RANDOM
119        shard_env = {
120            'GTEST_OUTPUT': 'json:' + test.gtest_json_file,
121            'GTEST_SHUFFLE': '1' if use_shuffle else '0',
122            'GTEST_TOTAL_SHARDS': os.environ.get("GTEST_TOTAL_SHARDS", total_shards),
123            'GTEST_SHARD_INDEX': os.environ.get("GTEST_SHARD_INDEX", shard_idx)
124        }
125        test.config.environment.update(shard_env)
126
127        cmd = [testPath]
128        cmd = self.prepareCmd(cmd)
129        if litConfig.useValgrind:
130            cmd = litConfig.valgrindArgs + cmd
131
132        if litConfig.noExecute:
133            return lit.Test.PASS, ''
134
135        def get_shard_header(shard_env):
136            shard_envs = ' '.join([k + '=' + v for k, v in shard_env.items()])
137            return f"Script(shard):\n--\n%s %s\n--\n" % (shard_envs, ' '.join(cmd))
138
139        shard_header = get_shard_header(shard_env)
140
141        try:
142            out, _, exitCode = lit.util.executeCommand(
143                cmd, env=test.config.environment,
144                timeout=litConfig.maxIndividualTestTime, redirect_stderr=True)
145        except lit.util.ExecuteCommandTimeoutException as e:
146            stream_msg = f"\n{e.out}\n--\nexit: {e.exitCode}\n--\n"
147            return (lit.Test.TIMEOUT, f'{shard_header}{stream_msg}Reached '
148                    f'timeout of {litConfig.maxIndividualTestTime} seconds')
149
150        if not os.path.exists(test.gtest_json_file):
151            errmsg = f"shard JSON output does not exist: %s" % (
152                test.gtest_json_file)
153            stream_msg = f"\n{out}\n--\nexit: {exitCode}\n--\n"
154            return lit.Test.FAIL, shard_header + stream_msg + errmsg
155
156        if exitCode == 0:
157            return lit.Test.PASS, ''
158
159        def get_test_stdout(test_name):
160            res = []
161            header = f'[ RUN      ] ' + test_name
162            footer = f'[  FAILED  ] ' + test_name
163            in_range = False
164            for l in out.splitlines():
165                if l.startswith(header):
166                    in_range = True
167                elif l.startswith(footer):
168                    return f'' if len(res) == 0 else '\n'.join(res)
169                elif in_range:
170                    res.append(l)
171            assert False, f'gtest did not report the result for ' + test_name
172
173        found_failed_test = False
174
175        with open(test.gtest_json_file, encoding='utf-8') as f:
176            jf = json.load(f)
177
178            if use_shuffle:
179                shard_env['GTEST_RANDOM_SEED'] = str(jf['random_seed'])
180            output = get_shard_header(shard_env) + '\n'
181
182            for testcase in jf['testsuites']:
183                for testinfo in testcase['testsuite']:
184                    result = testinfo['result']
185                    if result == 'SUPPRESSED' or result == 'SKIPPED':
186                        continue
187                    testname = testcase['name'] + '.' + testinfo['name']
188                    header = f"Script:\n--\n%s --gtest_filter=%s\n--\n" % (
189                        ' '.join(cmd), testname)
190                    if 'failures' in testinfo:
191                        found_failed_test = True
192                        output += header
193                        test_out = get_test_stdout(testname)
194                        if test_out:
195                            output += test_out + '\n\n'
196                        for fail in testinfo['failures']:
197                            output += fail['failure'] + '\n'
198                        output += '\n'
199                    elif result != 'COMPLETED':
200                        output += header
201                        output += 'unresolved test result\n'
202
203        # In some situations, like running tests with sanitizers, all test passes but
204        # the shard could still fail due to memory issues.
205        if not found_failed_test:
206            output += f"\n{out}\n--\nexit: {exitCode}\n--\n"
207
208        return lit.Test.FAIL, output
209
210    def prepareCmd(self, cmd):
211        """Insert interpreter if needed.
212
213        It inserts the python exe into the command if cmd[0] ends in .py or caller
214        specified run_under.
215        We cannot rely on the system to interpret shebang lines for us on
216        Windows, so add the python executable to the command if this is a .py
217        script.
218        """
219        if cmd[0].endswith('.py'):
220            cmd = [sys.executable] + cmd
221        if self.run_under:
222            if isinstance(self.run_under, list):
223                cmd = self.run_under + cmd
224            else:
225                cmd = shlex.split(self.run_under) + cmd
226        return cmd
227
228    @staticmethod
229    def post_process_shard_results(selected_tests, discovered_tests):
230        def remove_gtest(tests):
231            return [t for t in tests if t.gtest_json_file is None]
232
233        discovered_tests = remove_gtest(discovered_tests)
234        gtests = [t for t in selected_tests if t.gtest_json_file]
235        selected_tests = remove_gtest(selected_tests)
236        for test in gtests:
237            # In case gtest has bugs such that no JSON file was emitted.
238            if not os.path.exists(test.gtest_json_file):
239                selected_tests.append(test)
240                discovered_tests.append(test)
241                continue
242
243            start_time = test.result.start or 0.0
244
245            has_failure_in_shard = False
246
247            # Load json file to retrieve results.
248            with open(test.gtest_json_file, encoding='utf-8') as f:
249                try:
250                    testsuites = json.load(f)['testsuites']
251                except json.JSONDecodeError as e:
252                    raise RuntimeError("Failed to parse json file: " +
253                                       test.gtest_json_file + "\n" + e.doc)
254                for testcase in testsuites:
255                    for testinfo in testcase['testsuite']:
256                        # Ignore disabled tests.
257                        if testinfo['result'] == 'SUPPRESSED':
258                            continue
259
260                        testPath = test.path_in_suite[:-2] + (testcase['name'],
261                                                              testinfo['name'])
262                        subtest = lit.Test.Test(test.suite, testPath,
263                                                test.config, test.file_path)
264
265                        testname = testcase['name'] + '.' + testinfo['name']
266                        header = f"Script:\n--\n%s --gtest_filter=%s\n--\n" % (
267                            test.file_path, testname)
268
269                        output = ''
270                        if testinfo['result'] == 'SKIPPED':
271                            returnCode = lit.Test.SKIPPED
272                        elif 'failures' in testinfo:
273                            has_failure_in_shard = True
274                            returnCode = lit.Test.FAIL
275                            output = header
276                            for fail in testinfo['failures']:
277                                output += fail['failure'] + '\n'
278                        elif testinfo['result'] == 'COMPLETED':
279                            returnCode = lit.Test.PASS
280                        else:
281                            returnCode = lit.Test.UNRESOLVED
282                            output = header + 'unresolved test result\n'
283
284                        elapsed_time = float(testinfo['time'][:-1])
285                        res = lit.Test.Result(returnCode, output, elapsed_time)
286                        res.pid = test.result.pid or 0
287                        res.start = start_time
288                        start_time = start_time + elapsed_time
289                        subtest.setResult(res)
290
291                        selected_tests.append(subtest)
292                        discovered_tests.append(subtest)
293            os.remove(test.gtest_json_file)
294
295            if not has_failure_in_shard and test.isFailure():
296                selected_tests.append(test)
297                discovered_tests.append(test)
298
299        return selected_tests, discovered_tests
300