1#!/usr/bin/env python
2# Copyright 2015 gRPC authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#     http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15"""Run tests in parallel."""
16
17from __future__ import print_function
18
19import argparse
20import ast
21import collections
22import glob
23import itertools
24import json
25import logging
26import multiprocessing
27import os
28import os.path
29import pipes
30import platform
31import random
32import re
33import socket
34import subprocess
35import sys
36import tempfile
37import time
38import traceback
39import uuid
40
41import six
42from six.moves import urllib
43
44import python_utils.jobset as jobset
45import python_utils.report_utils as report_utils
46import python_utils.start_port_server as start_port_server
47import python_utils.watch_dirs as watch_dirs
48
49try:
50    from python_utils.upload_test_results import upload_results_to_bq
51except (ImportError):
52    pass  # It's ok to not import because this is only necessary to upload results to BQ.
53
54gcp_utils_dir = os.path.abspath(
55    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
56sys.path.append(gcp_utils_dir)
57
58_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
59os.chdir(_ROOT)
60
61_FORCE_ENVIRON_FOR_WRAPPERS = {
62    'GRPC_VERBOSITY': 'DEBUG',
63}
64
65_POLLING_STRATEGIES = {
66    'linux': ['epollex', 'epoll1', 'poll'],
67    'mac': ['poll'],
68}
69
70
71def platform_string():
72    return jobset.platform_string()
73
74
75_DEFAULT_TIMEOUT_SECONDS = 5 * 60
76_PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
77
78
79def run_shell_command(cmd, env=None, cwd=None):
80    try:
81        subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
82    except subprocess.CalledProcessError as e:
83        logging.exception(
84            "Error while running command '%s'. Exit status %d. Output:\n%s",
85            e.cmd, e.returncode, e.output)
86        raise
87
88
89def max_parallel_tests_for_current_platform():
90    # Too much test parallelization has only been seen to be a problem
91    # so far on windows.
92    if jobset.platform_string() == 'windows':
93        return 64
94    return 1024
95
96
97# SimpleConfig: just compile with CONFIG=config, and run the binary to test
98class Config(object):
99
100    def __init__(self,
101                 config,
102                 environ=None,
103                 timeout_multiplier=1,
104                 tool_prefix=[],
105                 iomgr_platform='native'):
106        if environ is None:
107            environ = {}
108        self.build_config = config
109        self.environ = environ
110        self.environ['CONFIG'] = config
111        self.tool_prefix = tool_prefix
112        self.timeout_multiplier = timeout_multiplier
113        self.iomgr_platform = iomgr_platform
114
115    def job_spec(self,
116                 cmdline,
117                 timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
118                 shortname=None,
119                 environ={},
120                 cpu_cost=1.0,
121                 flaky=False):
122        """Construct a jobset.JobSpec for a test under this config
123
124       Args:
125         cmdline:      a list of strings specifying the command line the test
126                       would like to run
127    """
128        actual_environ = self.environ.copy()
129        for k, v in environ.items():
130            actual_environ[k] = v
131        if not flaky and shortname and shortname in flaky_tests:
132            flaky = True
133        if shortname in shortname_to_cpu:
134            cpu_cost = shortname_to_cpu[shortname]
135        return jobset.JobSpec(
136            cmdline=self.tool_prefix + cmdline,
137            shortname=shortname,
138            environ=actual_environ,
139            cpu_cost=cpu_cost,
140            timeout_seconds=(self.timeout_multiplier *
141                             timeout_seconds if timeout_seconds else None),
142            flake_retries=4 if flaky or args.allow_flakes else 0,
143            timeout_retries=1 if flaky or args.allow_flakes else 0)
144
145
146def get_c_tests(travis, test_lang):
147    out = []
148    platforms_str = 'ci_platforms' if travis else 'platforms'
149    with open('tools/run_tests/generated/tests.json') as f:
150        js = json.load(f)
151        return [
152            tgt for tgt in js
153            if tgt['language'] == test_lang and platform_string() in
154            tgt[platforms_str] and not (travis and tgt['flaky'])
155        ]
156
157
158def _check_compiler(compiler, supported_compilers):
159    if compiler not in supported_compilers:
160        raise Exception('Compiler %s not supported (on this platform).' %
161                        compiler)
162
163
164def _check_arch(arch, supported_archs):
165    if arch not in supported_archs:
166        raise Exception('Architecture %s not supported.' % arch)
167
168
169def _is_use_docker_child():
170    """Returns True if running running as a --use_docker child."""
171    return True if os.getenv('RUN_TESTS_COMMAND') else False
172
173
174_PythonConfigVars = collections.namedtuple('_ConfigVars', [
175    'shell',
176    'builder',
177    'builder_prefix_arguments',
178    'venv_relative_python',
179    'toolchain',
180    'runner',
181    'test_name',
182    'iomgr_platform',
183])
184
185
186def _python_config_generator(name, major, minor, bits, config_vars):
187    name += '_' + config_vars.iomgr_platform
188    return PythonConfig(
189        name, config_vars.shell + config_vars.builder +
190        config_vars.builder_prefix_arguments +
191        [_python_pattern_function(major=major, minor=minor, bits=bits)] +
192        [name] + config_vars.venv_relative_python + config_vars.toolchain,
193        config_vars.shell + config_vars.runner + [
194            os.path.join(name, config_vars.venv_relative_python[0]),
195            config_vars.test_name
196        ])
197
198
199def _pypy_config_generator(name, major, config_vars):
200    return PythonConfig(
201        name, config_vars.shell + config_vars.builder +
202        config_vars.builder_prefix_arguments +
203        [_pypy_pattern_function(major=major)] + [name] +
204        config_vars.venv_relative_python + config_vars.toolchain,
205        config_vars.shell + config_vars.runner +
206        [os.path.join(name, config_vars.venv_relative_python[0])])
207
208
209def _python_pattern_function(major, minor, bits):
210    # Bit-ness is handled by the test machine's environment
211    if os.name == "nt":
212        if bits == "64":
213            return '/c/Python{major}{minor}/python.exe'.format(major=major,
214                                                               minor=minor,
215                                                               bits=bits)
216        else:
217            return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
218                major=major, minor=minor, bits=bits)
219    else:
220        return 'python{major}.{minor}'.format(major=major, minor=minor)
221
222
223def _pypy_pattern_function(major):
224    if major == '2':
225        return 'pypy'
226    elif major == '3':
227        return 'pypy3'
228    else:
229        raise ValueError("Unknown PyPy major version")
230
231
232class CLanguage(object):
233
234    def __init__(self, make_target, test_lang):
235        self.make_target = make_target
236        self.platform = platform_string()
237        self.test_lang = test_lang
238
239    def configure(self, config, args):
240        self.config = config
241        self.args = args
242        self._make_options = []
243        self._use_cmake = True
244        if self.platform == 'windows':
245            _check_compiler(self.args.compiler, [
246                'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017',
247                'cmake_vs2019'
248            ])
249            _check_arch(self.args.arch, ['default', 'x64', 'x86'])
250            if self.args.compiler == 'cmake_vs2019':
251                cmake_generator_option = 'Visual Studio 16 2019'
252            elif self.args.compiler == 'cmake_vs2017':
253                cmake_generator_option = 'Visual Studio 15 2017'
254            else:
255                cmake_generator_option = 'Visual Studio 14 2015'
256            cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
257            self._cmake_configure_extra_args = [
258                '-G', cmake_generator_option, '-A', cmake_arch_option
259            ]
260        else:
261            if self.platform == 'linux':
262                # Allow all the known architectures. _check_arch_option has already checked that we're not doing
263                # something illegal when not running under docker.
264                _check_arch(self.args.arch, ['default', 'x64', 'x86'])
265            else:
266                _check_arch(self.args.arch, ['default'])
267
268            self._docker_distro, self._cmake_configure_extra_args = self._compiler_options(
269                self.args.use_docker, self.args.compiler)
270
271            if self.args.arch == 'x86':
272                # disable boringssl asm optimizations when on x86
273                # see https://github.com/grpc/grpc/blob/b5b8578b3f8b4a9ce61ed6677e19d546e43c5c68/tools/run_tests/artifacts/artifact_targets.py#L253
274                self._cmake_configure_extra_args.append('-DOPENSSL_NO_ASM=ON')
275
276    def test_specs(self):
277        out = []
278        binaries = get_c_tests(self.args.travis, self.test_lang)
279        for target in binaries:
280            if self._use_cmake and target.get('boringssl', False):
281                # cmake doesn't build boringssl tests
282                continue
283            auto_timeout_scaling = target.get('auto_timeout_scaling', True)
284            polling_strategies = (_POLLING_STRATEGIES.get(
285                self.platform, ['all']) if target.get('uses_polling', True) else
286                                  ['none'])
287            for polling_strategy in polling_strategies:
288                env = {
289                    'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
290                        _ROOT + '/src/core/tsi/test_creds/ca.pem',
291                    'GRPC_POLL_STRATEGY':
292                        polling_strategy,
293                    'GRPC_VERBOSITY':
294                        'DEBUG'
295                }
296                resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
297                if resolver:
298                    env['GRPC_DNS_RESOLVER'] = resolver
299                shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
300                if polling_strategy in target.get('excluded_poll_engines', []):
301                    continue
302
303                timeout_scaling = 1
304                if auto_timeout_scaling:
305                    config = self.args.config
306                    if ('asan' in config or config == 'msan' or
307                            config == 'tsan' or config == 'ubsan' or
308                            config == 'helgrind' or config == 'memcheck'):
309                        # Scale overall test timeout if running under various sanitizers.
310                        # scaling value is based on historical data analysis
311                        timeout_scaling *= 3
312
313                if self.config.build_config in target['exclude_configs']:
314                    continue
315                if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
316                    continue
317                if self.platform == 'windows':
318                    binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
319                        self.config.build_config], target['name'])
320                else:
321                    if self._use_cmake:
322                        binary = 'cmake/build/%s' % target['name']
323                    else:
324                        binary = 'bins/%s/%s' % (self.config.build_config,
325                                                 target['name'])
326                cpu_cost = target['cpu_cost']
327                if cpu_cost == 'capacity':
328                    cpu_cost = multiprocessing.cpu_count()
329                if os.path.isfile(binary):
330                    list_test_command = None
331                    filter_test_command = None
332
333                    # these are the flag defined by gtest and benchmark framework to list
334                    # and filter test runs. We use them to split each individual test
335                    # into its own JobSpec, and thus into its own process.
336                    if 'benchmark' in target and target['benchmark']:
337                        with open(os.devnull, 'w') as fnull:
338                            tests = subprocess.check_output(
339                                [binary, '--benchmark_list_tests'],
340                                stderr=fnull)
341                        for line in tests.decode().split('\n'):
342                            test = line.strip()
343                            if not test:
344                                continue
345                            cmdline = [binary,
346                                       '--benchmark_filter=%s$' % test
347                                      ] + target['args']
348                            out.append(
349                                self.config.job_spec(
350                                    cmdline,
351                                    shortname='%s %s' %
352                                    (' '.join(cmdline), shortname_ext),
353                                    cpu_cost=cpu_cost,
354                                    timeout_seconds=target.get(
355                                        'timeout_seconds',
356                                        _DEFAULT_TIMEOUT_SECONDS) *
357                                    timeout_scaling,
358                                    environ=env))
359                    elif 'gtest' in target and target['gtest']:
360                        # here we parse the output of --gtest_list_tests to build up a complete
361                        # list of the tests contained in a binary for each test, we then
362                        # add a job to run, filtering for just that test.
363                        with open(os.devnull, 'w') as fnull:
364                            tests = subprocess.check_output(
365                                [binary, '--gtest_list_tests'], stderr=fnull)
366                        base = None
367                        for line in tests.decode().split('\n'):
368                            i = line.find('#')
369                            if i >= 0:
370                                line = line[:i]
371                            if not line:
372                                continue
373                            if line[0] != ' ':
374                                base = line.strip()
375                            else:
376                                assert base is not None
377                                assert line[1] == ' '
378                                test = base + line.strip()
379                                cmdline = [binary,
380                                           '--gtest_filter=%s' % test
381                                          ] + target['args']
382                                out.append(
383                                    self.config.job_spec(
384                                        cmdline,
385                                        shortname='%s %s' %
386                                        (' '.join(cmdline), shortname_ext),
387                                        cpu_cost=cpu_cost,
388                                        timeout_seconds=target.get(
389                                            'timeout_seconds',
390                                            _DEFAULT_TIMEOUT_SECONDS) *
391                                        timeout_scaling,
392                                        environ=env))
393                    else:
394                        cmdline = [binary] + target['args']
395                        shortname = target.get(
396                            'shortname',
397                            ' '.join(pipes.quote(arg) for arg in cmdline))
398                        shortname += shortname_ext
399                        out.append(
400                            self.config.job_spec(
401                                cmdline,
402                                shortname=shortname,
403                                cpu_cost=cpu_cost,
404                                flaky=target.get('flaky', False),
405                                timeout_seconds=target.get(
406                                    'timeout_seconds',
407                                    _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
408                                environ=env))
409                elif self.args.regex == '.*' or self.platform == 'windows':
410                    print('\nWARNING: binary not found, skipping', binary)
411        return sorted(out)
412
413    def make_targets(self):
414        if self.platform == 'windows':
415            # don't build tools on windows just yet
416            return ['buildtests_%s' % self.make_target]
417        return [
418            'buildtests_%s' % self.make_target,
419            'tools_%s' % self.make_target, 'check_epollexclusive'
420        ]
421
422    def make_options(self):
423        return self._make_options
424
425    def pre_build_steps(self):
426        if self.platform == 'windows':
427            return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat'] +
428                    self._cmake_configure_extra_args]
429        elif self._use_cmake:
430            return [['tools/run_tests/helper_scripts/pre_build_cmake.sh'] +
431                    self._cmake_configure_extra_args]
432        else:
433            return []
434
435    def build_steps(self):
436        return []
437
438    def post_tests_steps(self):
439        if self.platform == 'windows':
440            return []
441        else:
442            return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
443
444    def makefile_name(self):
445        if self._use_cmake:
446            return 'cmake/build/Makefile'
447        else:
448            return 'Makefile'
449
450    def _clang_cmake_configure_extra_args(self, version_suffix=''):
451        return [
452            '-DCMAKE_C_COMPILER=clang%s' % version_suffix,
453            '-DCMAKE_CXX_COMPILER=clang++%s' % version_suffix,
454        ]
455
456    def _compiler_options(self, use_docker, compiler):
457        """Returns docker distro and cmake configure args to use for given compiler."""
458        if not use_docker and not _is_use_docker_child():
459            # if not running under docker, we cannot ensure the right compiler version will be used,
460            # so we only allow the non-specific choices.
461            _check_compiler(compiler, ['default', 'cmake'])
462
463        if compiler == 'gcc4.9' or compiler == 'default' or compiler == 'cmake':
464            return ('jessie', [])
465        elif compiler == 'gcc5.3':
466            return ('ubuntu1604', [])
467        elif compiler == 'gcc8.3':
468            return ('buster', [])
469        elif compiler == 'gcc8.3_openssl102':
470            return ('buster_openssl102', [
471                "-DgRPC_SSL_PROVIDER=package",
472            ])
473        elif compiler == 'gcc11':
474            return ('gcc_11', [])
475        elif compiler == 'gcc_musl':
476            return ('alpine', [])
477        elif compiler == 'clang4':
478            return ('clang_4', self._clang_cmake_configure_extra_args())
479        elif compiler == 'clang12':
480            return ('clang_12', self._clang_cmake_configure_extra_args())
481        else:
482            raise Exception('Compiler %s not supported.' % compiler)
483
484    def dockerfile_dir(self):
485        return 'tools/dockerfile/test/cxx_%s_%s' % (
486            self._docker_distro, _docker_arch_suffix(self.args.arch))
487
488    def __str__(self):
489        return self.make_target
490
491
492# This tests Node on grpc/grpc-node and will become the standard for Node testing
493class RemoteNodeLanguage(object):
494
495    def __init__(self):
496        self.platform = platform_string()
497
498    def configure(self, config, args):
499        self.config = config
500        self.args = args
501        # Note: electron ABI only depends on major and minor version, so that's all
502        # we should specify in the compiler argument
503        _check_compiler(self.args.compiler, [
504            'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
505            'electron1.3', 'electron1.6'
506        ])
507        if self.args.compiler == 'default':
508            self.runtime = 'node'
509            self.node_version = '8'
510        else:
511            if self.args.compiler.startswith('electron'):
512                self.runtime = 'electron'
513                self.node_version = self.args.compiler[8:]
514            else:
515                self.runtime = 'node'
516                # Take off the word "node"
517                self.node_version = self.args.compiler[4:]
518
519    # TODO: update with Windows/electron scripts when available for grpc/grpc-node
520    def test_specs(self):
521        if self.platform == 'windows':
522            return [
523                self.config.job_spec(
524                    ['tools\\run_tests\\helper_scripts\\run_node.bat'])
525            ]
526        else:
527            return [
528                self.config.job_spec(
529                    ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
530                    None,
531                    environ=_FORCE_ENVIRON_FOR_WRAPPERS)
532            ]
533
534    def pre_build_steps(self):
535        return []
536
537    def make_targets(self):
538        return []
539
540    def make_options(self):
541        return []
542
543    def build_steps(self):
544        return []
545
546    def post_tests_steps(self):
547        return []
548
549    def makefile_name(self):
550        return 'Makefile'
551
552    def dockerfile_dir(self):
553        return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
554            self.args.arch)
555
556    def __str__(self):
557        return 'grpc-node'
558
559
560class Php7Language(object):
561
562    def configure(self, config, args):
563        self.config = config
564        self.args = args
565        _check_compiler(self.args.compiler, ['default'])
566        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
567
568    def test_specs(self):
569        return [
570            self.config.job_spec(['src/php/bin/run_tests.sh'],
571                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
572        ]
573
574    def pre_build_steps(self):
575        return []
576
577    def make_targets(self):
578        return ['static_c', 'shared_c']
579
580    def make_options(self):
581        return self._make_options
582
583    def build_steps(self):
584        return [['tools/run_tests/helper_scripts/build_php.sh']]
585
586    def post_tests_steps(self):
587        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
588
589    def makefile_name(self):
590        return 'Makefile'
591
592    def dockerfile_dir(self):
593        return 'tools/dockerfile/test/php7_debian9_%s' % _docker_arch_suffix(
594            self.args.arch)
595
596    def __str__(self):
597        return 'php7'
598
599
600class PythonConfig(
601        collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
602    """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
603
604
605class PythonLanguage(object):
606
607    _TEST_SPECS_FILE = {
608        'native': ['src/python/grpcio_tests/tests/tests.json'],
609        'gevent': [
610            'src/python/grpcio_tests/tests/tests.json',
611            'src/python/grpcio_tests/tests_gevent/tests.json',
612        ],
613        'asyncio': ['src/python/grpcio_tests/tests_aio/tests.json'],
614    }
615    _TEST_FOLDER = {
616        'native': 'test',
617        'gevent': 'test_gevent',
618        'asyncio': 'test_aio',
619    }
620
621    def configure(self, config, args):
622        self.config = config
623        self.args = args
624        self.pythons = self._get_pythons(self.args)
625
626    def test_specs(self):
627        # load list of known test suites
628        tests_json = []
629        for tests_json_file_name in self._TEST_SPECS_FILE[
630                self.args.iomgr_platform]:
631            with open(tests_json_file_name) as tests_json_file:
632                tests_json.extend(json.load(tests_json_file))
633        environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
634        # TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
635        # designed for non-native IO manager. It has a side-effect that
636        # overrides threading settings in C-Core.
637        if args.iomgr_platform != 'native':
638            environment['GRPC_ENABLE_FORK_SUPPORT'] = '0'
639        return [
640            self.config.job_spec(
641                config.run,
642                timeout_seconds=8 * 60,
643                environ=dict(GRPC_PYTHON_TESTRUNNER_FILTER=str(suite_name),
644                             **environment),
645                shortname='%s.%s.%s' %
646                (config.name, self._TEST_FOLDER[self.args.iomgr_platform],
647                 suite_name),
648            ) for suite_name in tests_json for config in self.pythons
649        ]
650
651    def pre_build_steps(self):
652        return []
653
654    def make_targets(self):
655        return []
656
657    def make_options(self):
658        return []
659
660    def build_steps(self):
661        return [config.build for config in self.pythons]
662
663    def post_tests_steps(self):
664        if self.config.build_config != 'gcov':
665            return []
666        else:
667            return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
668
669    def makefile_name(self):
670        return 'Makefile'
671
672    def dockerfile_dir(self):
673        return 'tools/dockerfile/test/python_%s_%s' % (
674            self._python_manager_name(), _docker_arch_suffix(self.args.arch))
675
676    def _python_manager_name(self):
677        """Choose the docker image to use based on python version."""
678        if self.args.compiler in ['python3.6', 'python3.7', 'python3.8']:
679            return 'stretch_' + self.args.compiler[len('python'):]
680        elif self.args.compiler == 'python_alpine':
681            return 'alpine'
682        else:
683            return 'stretch_default'
684
685    def _get_pythons(self, args):
686        """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
687        if args.arch == 'x86':
688            bits = '32'
689        else:
690            bits = '64'
691
692        if os.name == 'nt':
693            shell = ['bash']
694            builder = [
695                os.path.abspath(
696                    'tools/run_tests/helper_scripts/build_python_msys2.sh')
697            ]
698            builder_prefix_arguments = ['MINGW{}'.format(bits)]
699            venv_relative_python = ['Scripts/python.exe']
700            toolchain = ['mingw32']
701        else:
702            shell = []
703            builder = [
704                os.path.abspath(
705                    'tools/run_tests/helper_scripts/build_python.sh')
706            ]
707            builder_prefix_arguments = []
708            venv_relative_python = ['bin/python']
709            toolchain = ['unix']
710
711        # Selects the corresponding testing mode.
712        # See src/python/grpcio_tests/commands.py for implementation details.
713        if args.iomgr_platform == 'native':
714            test_command = 'test_lite'
715        elif args.iomgr_platform == 'gevent':
716            test_command = 'test_gevent'
717        elif args.iomgr_platform == 'asyncio':
718            test_command = 'test_aio'
719        else:
720            raise ValueError('Unsupported IO Manager platform: %s' %
721                             args.iomgr_platform)
722        runner = [
723            os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
724        ]
725
726        config_vars = _PythonConfigVars(shell, builder,
727                                        builder_prefix_arguments,
728                                        venv_relative_python, toolchain, runner,
729                                        test_command, args.iomgr_platform)
730        python36_config = _python_config_generator(name='py36',
731                                                   major='3',
732                                                   minor='6',
733                                                   bits=bits,
734                                                   config_vars=config_vars)
735        python37_config = _python_config_generator(name='py37',
736                                                   major='3',
737                                                   minor='7',
738                                                   bits=bits,
739                                                   config_vars=config_vars)
740        python38_config = _python_config_generator(name='py38',
741                                                   major='3',
742                                                   minor='8',
743                                                   bits=bits,
744                                                   config_vars=config_vars)
745        python39_config = _python_config_generator(name='py39',
746                                                   major='3',
747                                                   minor='9',
748                                                   bits=bits,
749                                                   config_vars=config_vars)
750        pypy27_config = _pypy_config_generator(name='pypy',
751                                               major='2',
752                                               config_vars=config_vars)
753        pypy32_config = _pypy_config_generator(name='pypy3',
754                                               major='3',
755                                               config_vars=config_vars)
756
757        if args.iomgr_platform in ('asyncio', 'gevent'):
758            if args.compiler not in ('default', 'python3.6', 'python3.7',
759                                     'python3.8', 'python3.9'):
760                raise Exception(
761                    'Compiler %s not supported with IO Manager platform: %s' %
762                    (args.compiler, args.iomgr_platform))
763
764        if args.compiler == 'default':
765            if os.name == 'nt':
766                if args.iomgr_platform == 'gevent':
767                    # TODO(https://github.com/grpc/grpc/issues/23784) allow
768                    # gevent to run on later version once issue solved.
769                    return (python36_config,)
770                else:
771                    return (python38_config,)
772            else:
773                if args.iomgr_platform in ('asyncio', 'gevent'):
774                    return (python36_config, python38_config)
775                elif os.uname()[0] == 'Darwin':
776                    # NOTE(rbellevi): Testing takes significantly longer on
777                    # MacOS, so we restrict the number of interpreter versions
778                    # tested.
779                    return (python38_config,)
780                else:
781                    return (
782                        python37_config,
783                        python38_config,
784                    )
785        elif args.compiler == 'python3.6':
786            return (python36_config,)
787        elif args.compiler == 'python3.7':
788            return (python37_config,)
789        elif args.compiler == 'python3.8':
790            return (python38_config,)
791        elif args.compiler == 'python3.9':
792            return (python39_config,)
793        elif args.compiler == 'pypy':
794            return (pypy27_config,)
795        elif args.compiler == 'pypy3':
796            return (pypy32_config,)
797        elif args.compiler == 'python_alpine':
798            return (python38_config,)
799        elif args.compiler == 'all_the_cpythons':
800            return (
801                python36_config,
802                python37_config,
803                python38_config,
804            )
805        else:
806            raise Exception('Compiler %s not supported.' % args.compiler)
807
808    def __str__(self):
809        return 'python'
810
811
812class RubyLanguage(object):
813
814    def configure(self, config, args):
815        self.config = config
816        self.args = args
817        _check_compiler(self.args.compiler, ['default'])
818
819    def test_specs(self):
820        tests = [
821            self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
822                                 timeout_seconds=10 * 60,
823                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
824        ]
825        for test in [
826                'src/ruby/end2end/sig_handling_test.rb',
827                'src/ruby/end2end/channel_state_test.rb',
828                'src/ruby/end2end/channel_closing_test.rb',
829                'src/ruby/end2end/sig_int_during_channel_watch_test.rb',
830                'src/ruby/end2end/killed_client_thread_test.rb',
831                'src/ruby/end2end/forking_client_test.rb',
832                'src/ruby/end2end/grpc_class_init_test.rb',
833                'src/ruby/end2end/multiple_killed_watching_threads_test.rb',
834                'src/ruby/end2end/load_grpc_with_gc_stress_test.rb',
835                'src/ruby/end2end/client_memory_usage_test.rb',
836                'src/ruby/end2end/package_with_underscore_test.rb',
837                'src/ruby/end2end/graceful_sig_handling_test.rb',
838                'src/ruby/end2end/graceful_sig_stop_test.rb',
839                'src/ruby/end2end/errors_load_before_grpc_lib_test.rb',
840                'src/ruby/end2end/logger_load_before_grpc_lib_test.rb',
841                'src/ruby/end2end/status_codes_load_before_grpc_lib_test.rb',
842                'src/ruby/end2end/call_credentials_timeout_test.rb',
843                'src/ruby/end2end/call_credentials_returning_bad_metadata_doesnt_kill_background_thread_test.rb'
844        ]:
845            tests.append(
846                self.config.job_spec(['ruby', test],
847                                     shortname=test,
848                                     timeout_seconds=20 * 60,
849                                     environ=_FORCE_ENVIRON_FOR_WRAPPERS))
850        return tests
851
852    def pre_build_steps(self):
853        return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
854
855    def make_targets(self):
856        return []
857
858    def make_options(self):
859        return []
860
861    def build_steps(self):
862        return [['tools/run_tests/helper_scripts/build_ruby.sh']]
863
864    def post_tests_steps(self):
865        return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
866
867    def makefile_name(self):
868        return 'Makefile'
869
870    def dockerfile_dir(self):
871        return 'tools/dockerfile/test/ruby_buster_%s' % _docker_arch_suffix(
872            self.args.arch)
873
874    def __str__(self):
875        return 'ruby'
876
877
878class CSharpLanguage(object):
879
880    def __init__(self):
881        self.platform = platform_string()
882
883    def configure(self, config, args):
884        self.config = config
885        self.args = args
886        if self.platform == 'windows':
887            _check_compiler(self.args.compiler, ['default', 'coreclr'])
888            _check_arch(self.args.arch, ['default'])
889            self._cmake_arch_option = 'x64'
890        else:
891            _check_compiler(self.args.compiler, ['default', 'coreclr'])
892            self._docker_distro = 'buster'
893
894    def test_specs(self):
895        with open('src/csharp/tests.json') as f:
896            tests_by_assembly = json.load(f)
897
898        msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
899        nunit_args = ['--labels=All', '--noresult', '--workers=1']
900        assembly_subdir = 'bin/%s' % msbuild_config
901        assembly_extension = '.exe'
902
903        if self.args.compiler == 'coreclr':
904            assembly_subdir += '/netcoreapp2.1'
905            runtime_cmd = ['dotnet', 'exec']
906            assembly_extension = '.dll'
907        else:
908            assembly_subdir += '/net45'
909            if self.platform == 'windows':
910                runtime_cmd = []
911            elif self.platform == 'mac':
912                # mono before version 5.2 on MacOS defaults to 32bit runtime
913                runtime_cmd = ['mono', '--arch=64']
914            else:
915                runtime_cmd = ['mono']
916
917        specs = []
918        for assembly in six.iterkeys(tests_by_assembly):
919            assembly_file = 'src/csharp/%s/%s/%s%s' % (
920                assembly, assembly_subdir, assembly, assembly_extension)
921            if self.config.build_config != 'gcov' or self.platform != 'windows':
922                # normally, run each test as a separate process
923                for test in tests_by_assembly[assembly]:
924                    cmdline = runtime_cmd + [assembly_file,
925                                             '--test=%s' % test] + nunit_args
926                    specs.append(
927                        self.config.job_spec(
928                            cmdline,
929                            shortname='csharp.%s' % test,
930                            environ=_FORCE_ENVIRON_FOR_WRAPPERS))
931            else:
932                # For C# test coverage, run all tests from the same assembly at once
933                # using OpenCover.Console (only works on Windows).
934                cmdline = [
935                    'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
936                    '-target:%s' % assembly_file, '-targetdir:src\\csharp',
937                    '-targetargs:%s' % ' '.join(nunit_args),
938                    '-filter:+[Grpc.Core]*', '-register:user',
939                    '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
940                ]
941
942                # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
943                # to prevent problems with registering the profiler.
944                run_exclusive = 1000000
945                specs.append(
946                    self.config.job_spec(cmdline,
947                                         shortname='csharp.coverage.%s' %
948                                         assembly,
949                                         cpu_cost=run_exclusive,
950                                         environ=_FORCE_ENVIRON_FOR_WRAPPERS))
951        return specs
952
953    def pre_build_steps(self):
954        if self.platform == 'windows':
955            return [[
956                'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
957                self._cmake_arch_option
958            ]]
959        else:
960            return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
961
962    def make_targets(self):
963        return ['grpc_csharp_ext']
964
965    def make_options(self):
966        return []
967
968    def build_steps(self):
969        if self.platform == 'windows':
970            return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
971        else:
972            return [['tools/run_tests/helper_scripts/build_csharp.sh']]
973
974    def post_tests_steps(self):
975        if self.platform == 'windows':
976            return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
977        else:
978            return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
979
980    def makefile_name(self):
981        if self.platform == 'windows':
982            return 'cmake/build/%s/Makefile' % self._cmake_arch_option
983        else:
984            # no need to set x86 specific flags as run_tests.py
985            # currently forbids x86 C# builds on both Linux and MacOS.
986            return 'cmake/build/Makefile'
987
988    def dockerfile_dir(self):
989        return 'tools/dockerfile/test/csharp_%s_%s' % (
990            self._docker_distro, _docker_arch_suffix(self.args.arch))
991
992    def __str__(self):
993        return 'csharp'
994
995
996class ObjCLanguage(object):
997
998    def configure(self, config, args):
999        self.config = config
1000        self.args = args
1001        _check_compiler(self.args.compiler, ['default'])
1002
1003    def test_specs(self):
1004        out = []
1005        out.append(
1006            self.config.job_spec(
1007                ['src/objective-c/tests/build_one_example_bazel.sh'],
1008                timeout_seconds=10 * 60,
1009                shortname='ios-buildtest-example-sample',
1010                cpu_cost=1e6,
1011                environ={
1012                    'SCHEME': 'Sample',
1013                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1014                    'FRAMEWORKS': 'NO'
1015                }))
1016        # Currently not supporting compiling as frameworks in Bazel
1017        out.append(
1018            self.config.job_spec(
1019                ['src/objective-c/tests/build_one_example.sh'],
1020                timeout_seconds=20 * 60,
1021                shortname='ios-buildtest-example-sample-frameworks',
1022                cpu_cost=1e6,
1023                environ={
1024                    'SCHEME': 'Sample',
1025                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1026                    'FRAMEWORKS': 'YES'
1027                }))
1028        out.append(
1029            self.config.job_spec(
1030                ['src/objective-c/tests/build_one_example.sh'],
1031                timeout_seconds=20 * 60,
1032                shortname='ios-buildtest-example-switftsample',
1033                cpu_cost=1e6,
1034                environ={
1035                    'SCHEME': 'SwiftSample',
1036                    'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
1037                }))
1038        out.append(
1039            self.config.job_spec(
1040                ['src/objective-c/tests/build_one_example_bazel.sh'],
1041                timeout_seconds=10 * 60,
1042                shortname='ios-buildtest-example-tvOS-sample',
1043                cpu_cost=1e6,
1044                environ={
1045                    'SCHEME': 'tvOS-sample',
1046                    'EXAMPLE_PATH': 'src/objective-c/examples/tvOS-sample',
1047                    'FRAMEWORKS': 'NO'
1048                }))
1049        # Disabled due to #20258
1050        # TODO (mxyan): Reenable this test when #20258 is resolved.
1051        # out.append(
1052        #     self.config.job_spec(
1053        #         ['src/objective-c/tests/build_one_example_bazel.sh'],
1054        #         timeout_seconds=20 * 60,
1055        #         shortname='ios-buildtest-example-watchOS-sample',
1056        #         cpu_cost=1e6,
1057        #         environ={
1058        #             'SCHEME': 'watchOS-sample-WatchKit-App',
1059        #             'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
1060        #             'FRAMEWORKS': 'NO'
1061        #         }))
1062        out.append(
1063            self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
1064                                 timeout_seconds=60 * 60,
1065                                 shortname='ios-test-plugintest',
1066                                 cpu_cost=1e6,
1067                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1068        out.append(
1069            self.config.job_spec(
1070                ['src/objective-c/tests/run_plugin_option_tests.sh'],
1071                timeout_seconds=60 * 60,
1072                shortname='ios-test-plugin-option-test',
1073                cpu_cost=1e6,
1074                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1075        out.append(
1076            self.config.job_spec(
1077                ['test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh'],
1078                timeout_seconds=60 * 60,
1079                shortname='ios-test-cfstream-tests',
1080                cpu_cost=1e6,
1081                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1082        # TODO: replace with run_one_test_bazel.sh when Bazel-Xcode is stable
1083        out.append(
1084            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1085                                 timeout_seconds=60 * 60,
1086                                 shortname='ios-test-unittests',
1087                                 cpu_cost=1e6,
1088                                 environ={'SCHEME': 'UnitTests'}))
1089        out.append(
1090            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1091                                 timeout_seconds=60 * 60,
1092                                 shortname='ios-test-interoptests',
1093                                 cpu_cost=1e6,
1094                                 environ={'SCHEME': 'InteropTests'}))
1095        out.append(
1096            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1097                                 timeout_seconds=60 * 60,
1098                                 shortname='ios-test-cronettests',
1099                                 cpu_cost=1e6,
1100                                 environ={'SCHEME': 'CronetTests'}))
1101        out.append(
1102            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1103                                 timeout_seconds=30 * 60,
1104                                 shortname='ios-perf-test',
1105                                 cpu_cost=1e6,
1106                                 environ={'SCHEME': 'PerfTests'}))
1107        out.append(
1108            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1109                                 timeout_seconds=30 * 60,
1110                                 shortname='ios-perf-test-posix',
1111                                 cpu_cost=1e6,
1112                                 environ={'SCHEME': 'PerfTestsPosix'}))
1113        out.append(
1114            self.config.job_spec(['test/cpp/ios/build_and_run_tests.sh'],
1115                                 timeout_seconds=60 * 60,
1116                                 shortname='ios-cpp-test-cronet',
1117                                 cpu_cost=1e6,
1118                                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1119        out.append(
1120            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1121                                 timeout_seconds=60 * 60,
1122                                 shortname='mac-test-basictests',
1123                                 cpu_cost=1e6,
1124                                 environ={
1125                                     'SCHEME': 'MacTests',
1126                                     'PLATFORM': 'macos'
1127                                 }))
1128        out.append(
1129            self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1130                                 timeout_seconds=30 * 60,
1131                                 shortname='tvos-test-basictests',
1132                                 cpu_cost=1e6,
1133                                 environ={
1134                                     'SCHEME': 'TvTests',
1135                                     'PLATFORM': 'tvos'
1136                                 }))
1137
1138        return sorted(out)
1139
1140    def pre_build_steps(self):
1141        return []
1142
1143    def make_targets(self):
1144        return []
1145
1146    def make_options(self):
1147        return []
1148
1149    def build_steps(self):
1150        return []
1151
1152    def post_tests_steps(self):
1153        return []
1154
1155    def makefile_name(self):
1156        return 'Makefile'
1157
1158    def dockerfile_dir(self):
1159        return None
1160
1161    def __str__(self):
1162        return 'objc'
1163
1164
1165class Sanity(object):
1166
1167    def configure(self, config, args):
1168        self.config = config
1169        self.args = args
1170        _check_compiler(self.args.compiler, ['default'])
1171
1172    def test_specs(self):
1173        import yaml
1174        with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
1175            environ = {'TEST': 'true'}
1176            if _is_use_docker_child():
1177                environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1178                environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1179                # sanity tests run tools/bazel wrapper concurrently
1180                # and that can result in a download/run race in the wrapper.
1181                # under docker we already have the right version of bazel
1182                # so we can just disable the wrapper.
1183                environ['DISABLE_BAZEL_WRAPPER'] = 'true'
1184            return [
1185                self.config.job_spec(cmd['script'].split(),
1186                                     timeout_seconds=30 * 60,
1187                                     environ=environ,
1188                                     cpu_cost=cmd.get('cpu_cost', 1))
1189                for cmd in yaml.load(f)
1190            ]
1191
1192    def pre_build_steps(self):
1193        return []
1194
1195    def make_targets(self):
1196        return ['run_dep_checks']
1197
1198    def make_options(self):
1199        return []
1200
1201    def build_steps(self):
1202        return []
1203
1204    def post_tests_steps(self):
1205        return []
1206
1207    def makefile_name(self):
1208        return 'Makefile'
1209
1210    def dockerfile_dir(self):
1211        return 'tools/dockerfile/test/sanity'
1212
1213    def __str__(self):
1214        return 'sanity'
1215
1216
1217# different configurations we can run under
1218with open('tools/run_tests/generated/configs.json') as f:
1219    _CONFIGS = dict(
1220        (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1221
1222_LANGUAGES = {
1223    'c++': CLanguage('cxx', 'c++'),
1224    'c': CLanguage('c', 'c'),
1225    'grpc-node': RemoteNodeLanguage(),
1226    'php7': Php7Language(),
1227    'python': PythonLanguage(),
1228    'ruby': RubyLanguage(),
1229    'csharp': CSharpLanguage(),
1230    'objc': ObjCLanguage(),
1231    'sanity': Sanity()
1232}
1233
1234_MSBUILD_CONFIG = {
1235    'dbg': 'Debug',
1236    'opt': 'Release',
1237    'gcov': 'Debug',
1238}
1239
1240
1241def _windows_arch_option(arch):
1242    """Returns msbuild cmdline option for selected architecture."""
1243    if arch == 'default' or arch == 'x86':
1244        return '/p:Platform=Win32'
1245    elif arch == 'x64':
1246        return '/p:Platform=x64'
1247    else:
1248        print('Architecture %s not supported.' % arch)
1249        sys.exit(1)
1250
1251
1252def _check_arch_option(arch):
1253    """Checks that architecture option is valid."""
1254    if platform_string() == 'windows':
1255        _windows_arch_option(arch)
1256    elif platform_string() == 'linux':
1257        # On linux, we need to be running under docker with the right architecture.
1258        runtime_arch = platform.architecture()[0]
1259        if arch == 'default':
1260            return
1261        elif runtime_arch == '64bit' and arch == 'x64':
1262            return
1263        elif runtime_arch == '32bit' and arch == 'x86':
1264            return
1265        else:
1266            print(
1267                'Architecture %s does not match current runtime architecture.' %
1268                arch)
1269            sys.exit(1)
1270    else:
1271        if args.arch != 'default':
1272            print('Architecture %s not supported on current platform.' %
1273                  args.arch)
1274            sys.exit(1)
1275
1276
1277def _docker_arch_suffix(arch):
1278    """Returns suffix to dockerfile dir to use."""
1279    if arch == 'default' or arch == 'x64':
1280        return 'x64'
1281    elif arch == 'x86':
1282        return 'x86'
1283    else:
1284        print('Architecture %s not supported with current settings.' % arch)
1285        sys.exit(1)
1286
1287
1288def runs_per_test_type(arg_str):
1289    """Auxiliary function to parse the "runs_per_test" flag.
1290
1291       Returns:
1292           A positive integer or 0, the latter indicating an infinite number of
1293           runs.
1294
1295       Raises:
1296           argparse.ArgumentTypeError: Upon invalid input.
1297    """
1298    if arg_str == 'inf':
1299        return 0
1300    try:
1301        n = int(arg_str)
1302        if n <= 0:
1303            raise ValueError
1304        return n
1305    except:
1306        msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1307        raise argparse.ArgumentTypeError(msg)
1308
1309
1310def percent_type(arg_str):
1311    pct = float(arg_str)
1312    if pct > 100 or pct < 0:
1313        raise argparse.ArgumentTypeError(
1314            "'%f' is not a valid percentage in the [0, 100] range" % pct)
1315    return pct
1316
1317
1318# This is math.isclose in python >= 3.5
1319def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1320    return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1321
1322
1323# parse command line
1324argp = argparse.ArgumentParser(description='Run grpc tests.')
1325argp.add_argument('-c',
1326                  '--config',
1327                  choices=sorted(_CONFIGS.keys()),
1328                  default='opt')
1329argp.add_argument(
1330    '-n',
1331    '--runs_per_test',
1332    default=1,
1333    type=runs_per_test_type,
1334    help='A positive integer or "inf". If "inf", all tests will run in an '
1335    'infinite loop. Especially useful in combination with "-f"')
1336argp.add_argument('-r', '--regex', default='.*', type=str)
1337argp.add_argument('--regex_exclude', default='', type=str)
1338argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1339argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1340argp.add_argument('-p',
1341                  '--sample_percent',
1342                  default=100.0,
1343                  type=percent_type,
1344                  help='Run a random sample with that percentage of tests')
1345argp.add_argument('-f',
1346                  '--forever',
1347                  default=False,
1348                  action='store_const',
1349                  const=True)
1350argp.add_argument('-t',
1351                  '--travis',
1352                  default=False,
1353                  action='store_const',
1354                  const=True)
1355argp.add_argument('--newline_on_success',
1356                  default=False,
1357                  action='store_const',
1358                  const=True)
1359argp.add_argument('-l',
1360                  '--language',
1361                  choices=sorted(_LANGUAGES.keys()),
1362                  nargs='+',
1363                  required=True)
1364argp.add_argument('-S',
1365                  '--stop_on_failure',
1366                  default=False,
1367                  action='store_const',
1368                  const=True)
1369argp.add_argument('--use_docker',
1370                  default=False,
1371                  action='store_const',
1372                  const=True,
1373                  help='Run all the tests under docker. That provides ' +
1374                  'additional isolation and prevents the need to install ' +
1375                  'language specific prerequisites. Only available on Linux.')
1376argp.add_argument(
1377    '--allow_flakes',
1378    default=False,
1379    action='store_const',
1380    const=True,
1381    help=
1382    'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1383)
1384argp.add_argument(
1385    '--arch',
1386    choices=['default', 'x86', 'x64'],
1387    default='default',
1388    help=
1389    'Selects architecture to target. For some platforms "default" is the only supported choice.'
1390)
1391argp.add_argument(
1392    '--compiler',
1393    choices=[
1394        'default',
1395        'gcc4.9',
1396        'gcc5.3',
1397        'gcc8.3',
1398        'gcc8.3_openssl102',
1399        'gcc11',
1400        'gcc_musl',
1401        'clang4',
1402        'clang12',
1403        'python2.7',
1404        'python3.5',
1405        'python3.6',
1406        'python3.7',
1407        'python3.8',
1408        'python3.9',
1409        'pypy',
1410        'pypy3',
1411        'python_alpine',
1412        'all_the_cpythons',
1413        'electron1.3',
1414        'electron1.6',
1415        'coreclr',
1416        'cmake',
1417        'cmake_vs2015',
1418        'cmake_vs2017',
1419        'cmake_vs2019',
1420    ],
1421    default='default',
1422    help=
1423    'Selects compiler to use. Allowed values depend on the platform and language.'
1424)
1425argp.add_argument('--iomgr_platform',
1426                  choices=['native', 'gevent', 'asyncio'],
1427                  default='native',
1428                  help='Selects iomgr platform to build on')
1429argp.add_argument('--build_only',
1430                  default=False,
1431                  action='store_const',
1432                  const=True,
1433                  help='Perform all the build steps but don\'t run any tests.')
1434argp.add_argument('--measure_cpu_costs',
1435                  default=False,
1436                  action='store_const',
1437                  const=True,
1438                  help='Measure the cpu costs of tests')
1439argp.add_argument(
1440    '--update_submodules',
1441    default=[],
1442    nargs='*',
1443    help=
1444    'Update some submodules before building. If any are updated, also run generate_projects. '
1445    +
1446    'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
1447)
1448argp.add_argument('-a', '--antagonists', default=0, type=int)
1449argp.add_argument('-x',
1450                  '--xml_report',
1451                  default=None,
1452                  type=str,
1453                  help='Generates a JUnit-compatible XML report')
1454argp.add_argument('--report_suite_name',
1455                  default='tests',
1456                  type=str,
1457                  help='Test suite name to use in generated JUnit XML report')
1458argp.add_argument(
1459    '--report_multi_target',
1460    default=False,
1461    const=True,
1462    action='store_const',
1463    help='Generate separate XML report for each test job (Looks better in UIs).'
1464)
1465argp.add_argument(
1466    '--quiet_success',
1467    default=False,
1468    action='store_const',
1469    const=True,
1470    help=
1471    'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1472    + 'Useful when running many iterations of each test (argument -n).')
1473argp.add_argument(
1474    '--force_default_poller',
1475    default=False,
1476    action='store_const',
1477    const=True,
1478    help='Don\'t try to iterate over many polling strategies when they exist')
1479argp.add_argument(
1480    '--force_use_pollers',
1481    default=None,
1482    type=str,
1483    help='Only use the specified comma-delimited list of polling engines. '
1484    'Example: --force_use_pollers epoll1,poll '
1485    ' (This flag has no effect if --force_default_poller flag is also used)')
1486argp.add_argument('--max_time',
1487                  default=-1,
1488                  type=int,
1489                  help='Maximum test runtime in seconds')
1490argp.add_argument('--bq_result_table',
1491                  default='',
1492                  type=str,
1493                  nargs='?',
1494                  help='Upload test results to a specified BQ table.')
1495args = argp.parse_args()
1496
1497flaky_tests = set()
1498shortname_to_cpu = {}
1499
1500if args.force_default_poller:
1501    _POLLING_STRATEGIES = {}
1502elif args.force_use_pollers:
1503    _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1504
1505jobset.measure_cpu_costs = args.measure_cpu_costs
1506
1507# update submodules if necessary
1508need_to_regenerate_projects = False
1509for spec in args.update_submodules:
1510    spec = spec.split(':', 1)
1511    if len(spec) == 1:
1512        submodule = spec[0]
1513        branch = 'master'
1514    elif len(spec) == 2:
1515        submodule = spec[0]
1516        branch = spec[1]
1517    cwd = 'third_party/%s' % submodule
1518
1519    def git(cmd, cwd=cwd):
1520        print('in %s: git %s' % (cwd, cmd))
1521        run_shell_command('git %s' % cmd, cwd=cwd)
1522
1523    git('fetch')
1524    git('checkout %s' % branch)
1525    git('pull origin %s' % branch)
1526    if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
1527        need_to_regenerate_projects = True
1528if need_to_regenerate_projects:
1529    if jobset.platform_string() == 'linux':
1530        run_shell_command('tools/buildgen/generate_projects.sh')
1531    else:
1532        print(
1533            'WARNING: may need to regenerate projects, but since we are not on')
1534        print(
1535            '         Linux this step is being skipped. Compilation MAY fail.')
1536
1537# grab config
1538run_config = _CONFIGS[args.config]
1539build_config = run_config.build_config
1540
1541if args.travis:
1542    _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1543
1544languages = set(_LANGUAGES[l] for l in args.language)
1545for l in languages:
1546    l.configure(run_config, args)
1547
1548language_make_options = []
1549if any(language.make_options() for language in languages):
1550    if not 'gcov' in args.config and len(languages) != 1:
1551        print(
1552            'languages with custom make options cannot be built simultaneously with other languages'
1553        )
1554        sys.exit(1)
1555    else:
1556        # Combining make options is not clean and just happens to work. It allows C & C++ to build
1557        # together, and is only used under gcov. All other configs should build languages individually.
1558        language_make_options = list(
1559            set([
1560                make_option for lang in languages
1561                for make_option in lang.make_options()
1562            ]))
1563
1564if args.use_docker:
1565    if not args.travis:
1566        print('Seen --use_docker flag, will run tests under docker.')
1567        print('')
1568        print(
1569            'IMPORTANT: The changes you are testing need to be locally committed'
1570        )
1571        print(
1572            'because only the committed changes in the current branch will be')
1573        print('copied to the docker environment.')
1574        time.sleep(5)
1575
1576    dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1577    if len(dockerfile_dirs) > 1:
1578        print('Languages to be tested require running under different docker '
1579              'images.')
1580        sys.exit(1)
1581    else:
1582        dockerfile_dir = next(iter(dockerfile_dirs))
1583
1584    child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1585    run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
1586        child_argv[1:])
1587
1588    env = os.environ.copy()
1589    env['RUN_TESTS_COMMAND'] = run_tests_cmd
1590    env['DOCKERFILE_DIR'] = dockerfile_dir
1591    env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
1592    if args.xml_report:
1593        env['XML_REPORT'] = args.xml_report
1594    if not args.travis:
1595        env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
1596
1597    subprocess.check_call(
1598        'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
1599        shell=True,
1600        env=env)
1601    sys.exit(0)
1602
1603_check_arch_option(args.arch)
1604
1605
1606def make_jobspec(cfg, targets, makefile='Makefile'):
1607    if platform_string() == 'windows':
1608        return [
1609            jobset.JobSpec([
1610                'cmake', '--build', '.', '--target',
1611                '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
1612            ],
1613                           cwd=os.path.dirname(makefile),
1614                           timeout_seconds=None) for target in targets
1615        ]
1616    else:
1617        if targets and makefile.startswith('cmake/build/'):
1618            # With cmake, we've passed all the build configuration in the pre-build step already
1619            return [
1620                jobset.JobSpec(
1621                    [os.getenv('MAKE', 'make'), '-j',
1622                     '%d' % args.jobs] + targets,
1623                    cwd='cmake/build',
1624                    timeout_seconds=None)
1625            ]
1626        if targets:
1627            return [
1628                jobset.JobSpec(
1629                    [
1630                        os.getenv('MAKE', 'make'), '-f', makefile, '-j',
1631                        '%d' % args.jobs,
1632                        'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
1633                        args.slowdown,
1634                        'CONFIG=%s' % cfg, 'Q='
1635                    ] + language_make_options +
1636                    ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
1637                    timeout_seconds=None)
1638            ]
1639        else:
1640            return []
1641
1642
1643make_targets = {}
1644for l in languages:
1645    makefile = l.makefile_name()
1646    make_targets[makefile] = make_targets.get(makefile, set()).union(
1647        set(l.make_targets()))
1648
1649
1650def build_step_environ(cfg):
1651    environ = {'CONFIG': cfg}
1652    msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1653    if msbuild_cfg:
1654        environ['MSBUILD_CONFIG'] = msbuild_cfg
1655    return environ
1656
1657
1658build_steps = list(
1659    set(
1660        jobset.JobSpec(cmdline,
1661                       environ=build_step_environ(build_config),
1662                       timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
1663                       flake_retries=2)
1664        for l in languages
1665        for cmdline in l.pre_build_steps()))
1666if make_targets:
1667    make_commands = itertools.chain.from_iterable(
1668        make_jobspec(build_config, list(targets), makefile)
1669        for (makefile, targets) in make_targets.items())
1670    build_steps.extend(set(make_commands))
1671build_steps.extend(
1672    set(
1673        jobset.JobSpec(cmdline,
1674                       environ=build_step_environ(build_config),
1675                       timeout_seconds=None)
1676        for l in languages
1677        for cmdline in l.build_steps()))
1678
1679post_tests_steps = list(
1680    set(
1681        jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
1682        for l in languages
1683        for cmdline in l.post_tests_steps()))
1684runs_per_test = args.runs_per_test
1685forever = args.forever
1686
1687
1688def _shut_down_legacy_server(legacy_server_port):
1689    try:
1690        version = int(
1691            urllib.request.urlopen('http://localhost:%d/version_number' %
1692                                   legacy_server_port,
1693                                   timeout=10).read())
1694    except:
1695        pass
1696    else:
1697        urllib.request.urlopen('http://localhost:%d/quitquitquit' %
1698                               legacy_server_port).read()
1699
1700
1701def _calculate_num_runs_failures(list_of_results):
1702    """Calculate number of runs and failures for a particular test.
1703
1704  Args:
1705    list_of_results: (List) of JobResult object.
1706  Returns:
1707    A tuple of total number of runs and failures.
1708  """
1709    num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
1710    num_failures = 0
1711    for jobresult in list_of_results:
1712        if jobresult.retries > 0:
1713            num_runs += jobresult.retries
1714        if jobresult.num_failures > 0:
1715            num_failures += jobresult.num_failures
1716    return num_runs, num_failures
1717
1718
1719# _build_and_run results
1720class BuildAndRunError(object):
1721
1722    BUILD = object()
1723    TEST = object()
1724    POST_TEST = object()
1725
1726
1727def _has_epollexclusive():
1728    binary = 'bins/%s/check_epollexclusive' % args.config
1729    if not os.path.exists(binary):
1730        return False
1731    try:
1732        subprocess.check_call(binary)
1733        return True
1734    except subprocess.CalledProcessError as e:
1735        return False
1736    except OSError as e:
1737        # For languages other than C and Windows the binary won't exist
1738        return False
1739
1740
1741# returns a list of things that failed (or an empty list on success)
1742def _build_and_run(check_cancelled,
1743                   newline_on_success,
1744                   xml_report=None,
1745                   build_only=False):
1746    """Do one pass of building & running tests."""
1747    # build latest sequentially
1748    num_failures, resultset = jobset.run(build_steps,
1749                                         maxjobs=1,
1750                                         stop_on_failure=True,
1751                                         newline_on_success=newline_on_success,
1752                                         travis=args.travis)
1753    if num_failures:
1754        return [BuildAndRunError.BUILD]
1755
1756    if build_only:
1757        if xml_report:
1758            report_utils.render_junit_xml_report(
1759                resultset, xml_report, suite_name=args.report_suite_name)
1760        return []
1761
1762    if not args.travis and not _has_epollexclusive() and platform_string(
1763    ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
1764            platform_string()]:
1765        print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
1766        _POLLING_STRATEGIES[platform_string()].remove('epollex')
1767
1768    # start antagonists
1769    antagonists = [
1770        subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1771        for _ in range(0, args.antagonists)
1772    ]
1773    start_port_server.start_port_server()
1774    resultset = None
1775    num_test_failures = 0
1776    try:
1777        infinite_runs = runs_per_test == 0
1778        one_run = set(spec for language in languages
1779                      for spec in language.test_specs()
1780                      if (re.search(args.regex, spec.shortname) and
1781                          (args.regex_exclude == '' or
1782                           not re.search(args.regex_exclude, spec.shortname))))
1783        # When running on travis, we want out test runs to be as similar as possible
1784        # for reproducibility purposes.
1785        if args.travis and args.max_time <= 0:
1786            massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1787        else:
1788            # whereas otherwise, we want to shuffle things up to give all tests a
1789            # chance to run.
1790            massaged_one_run = list(
1791                one_run)  # random.sample needs an indexable seq.
1792            num_jobs = len(massaged_one_run)
1793            # for a random sample, get as many as indicated by the 'sample_percent'
1794            # argument. By default this arg is 100, resulting in a shuffle of all
1795            # jobs.
1796            sample_size = int(num_jobs * args.sample_percent / 100.0)
1797            massaged_one_run = random.sample(massaged_one_run, sample_size)
1798            if not isclose(args.sample_percent, 100.0):
1799                assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1800                print("Running %d tests out of %d (~%d%%)" %
1801                      (sample_size, num_jobs, args.sample_percent))
1802        if infinite_runs:
1803            assert len(massaged_one_run
1804                      ) > 0, 'Must have at least one test for a -n inf run'
1805        runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
1806                         else itertools.repeat(massaged_one_run, runs_per_test))
1807        all_runs = itertools.chain.from_iterable(runs_sequence)
1808
1809        if args.quiet_success:
1810            jobset.message(
1811                'START',
1812                'Running tests quietly, only failing tests will be reported',
1813                do_newline=True)
1814        num_test_failures, resultset = jobset.run(
1815            all_runs,
1816            check_cancelled,
1817            newline_on_success=newline_on_success,
1818            travis=args.travis,
1819            maxjobs=args.jobs,
1820            maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1821            stop_on_failure=args.stop_on_failure,
1822            quiet_success=args.quiet_success,
1823            max_time=args.max_time)
1824        if resultset:
1825            for k, v in sorted(resultset.items()):
1826                num_runs, num_failures = _calculate_num_runs_failures(v)
1827                if num_failures > 0:
1828                    if num_failures == num_runs:  # what about infinite_runs???
1829                        jobset.message('FAILED', k, do_newline=True)
1830                    else:
1831                        jobset.message('FLAKE',
1832                                       '%s [%d/%d runs flaked]' %
1833                                       (k, num_failures, num_runs),
1834                                       do_newline=True)
1835    finally:
1836        for antagonist in antagonists:
1837            antagonist.kill()
1838        if args.bq_result_table and resultset:
1839            upload_extra_fields = {
1840                'compiler': args.compiler,
1841                'config': args.config,
1842                'iomgr_platform': args.iomgr_platform,
1843                'language': args.language[
1844                    0
1845                ],  # args.language is a list but will always have one element when uploading to BQ is enabled.
1846                'platform': platform_string()
1847            }
1848            try:
1849                upload_results_to_bq(resultset, args.bq_result_table,
1850                                     upload_extra_fields)
1851            except NameError as e:
1852                logging.warning(
1853                    e)  # It's fine to ignore since this is not critical
1854        if xml_report and resultset:
1855            report_utils.render_junit_xml_report(
1856                resultset,
1857                xml_report,
1858                suite_name=args.report_suite_name,
1859                multi_target=args.report_multi_target)
1860
1861    number_failures, _ = jobset.run(post_tests_steps,
1862                                    maxjobs=1,
1863                                    stop_on_failure=False,
1864                                    newline_on_success=newline_on_success,
1865                                    travis=args.travis)
1866
1867    out = []
1868    if number_failures:
1869        out.append(BuildAndRunError.POST_TEST)
1870    if num_test_failures:
1871        out.append(BuildAndRunError.TEST)
1872
1873    return out
1874
1875
1876if forever:
1877    success = True
1878    while True:
1879        dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
1880        initial_time = dw.most_recent_change()
1881        have_files_changed = lambda: dw.most_recent_change() != initial_time
1882        previous_success = success
1883        errors = _build_and_run(check_cancelled=have_files_changed,
1884                                newline_on_success=False,
1885                                build_only=args.build_only) == 0
1886        if not previous_success and not errors:
1887            jobset.message('SUCCESS',
1888                           'All tests are now passing properly',
1889                           do_newline=True)
1890        jobset.message('IDLE', 'No change detected')
1891        while not have_files_changed():
1892            time.sleep(1)
1893else:
1894    errors = _build_and_run(check_cancelled=lambda: False,
1895                            newline_on_success=args.newline_on_success,
1896                            xml_report=args.xml_report,
1897                            build_only=args.build_only)
1898    if not errors:
1899        jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1900    else:
1901        jobset.message('FAILED', 'Some tests failed', do_newline=True)
1902    exit_code = 0
1903    if BuildAndRunError.BUILD in errors:
1904        exit_code |= 1
1905    if BuildAndRunError.TEST in errors:
1906        exit_code |= 2
1907    if BuildAndRunError.POST_TEST in errors:
1908        exit_code |= 4
1909    sys.exit(exit_code)
1910