1# Copyright 2017 the V8 project authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5# for py2/py3 compatibility 6from __future__ import print_function 7from functools import reduce 8 9from collections import OrderedDict 10import json 11import multiprocessing 12import optparse 13import os 14import shlex 15import sys 16import traceback 17 18 19 20# Add testrunner to the path. 21sys.path.insert( 22 0, 23 os.path.dirname( 24 os.path.dirname(os.path.abspath(__file__)))) 25 26 27from testrunner.local import command 28from testrunner.local import testsuite 29from testrunner.local import utils 30from testrunner.test_config import TestConfig 31from testrunner.testproc import progress 32from testrunner.testproc.rerun import RerunProc 33from testrunner.testproc.shard import ShardProc 34from testrunner.testproc.sigproc import SignalProc 35from testrunner.testproc.timeout import TimeoutProc 36 37 38BASE_DIR = ( 39 os.path.dirname( 40 os.path.dirname( 41 os.path.dirname( 42 os.path.abspath(__file__))))) 43 44DEFAULT_OUT_GN = 'out.gn' 45 46# Map of test name synonyms to lists of test suites. Should be ordered by 47# expected runtimes (suites with slow test cases first). These groups are 48# invoked in separate steps on the bots. 49# The mapping from names used here to GN targets (which must stay in sync) 50# is defined in infra/mb/gn_isolate_map.pyl. 51TEST_MAP = { 52 # This needs to stay in sync with group("v8_bot_default") in test/BUILD.gn. 53 "bot_default": [ 54 "debugger", 55 "mjsunit", 56 "cctest", 57 "wasm-spec-tests", 58 "inspector", 59 "webkit", 60 "mkgrokdump", 61 "wasm-js", 62 "fuzzer", 63 "message", 64 "intl", 65 "unittests", 66 "wasm-api-tests", 67 ], 68 # This needs to stay in sync with group("v8_default") in test/BUILD.gn. 69 "default": [ 70 "debugger", 71 "mjsunit", 72 "cctest", 73 "wasm-spec-tests", 74 "inspector", 75 "mkgrokdump", 76 "wasm-js", 77 "fuzzer", 78 "message", 79 "intl", 80 "unittests", 81 "wasm-api-tests", 82 ], 83 # This needs to stay in sync with group("v8_d8_default") in test/BUILD.gn. 84 "d8_default": [ 85 "debugger", 86 "mjsunit", 87 "webkit", 88 "message", 89 "intl", 90 ], 91 # This needs to stay in sync with "v8_optimize_for_size" in test/BUILD.gn. 92 "optimize_for_size": [ 93 "debugger", 94 "mjsunit", 95 "cctest", 96 "inspector", 97 "webkit", 98 "intl", 99 ], 100 "unittests": [ 101 "unittests", 102 ], 103} 104 105# Increase the timeout for these: 106SLOW_ARCHS = [ 107 "arm", 108 "arm64", 109 "mips", 110 "mipsel", 111 "mips64", 112 "mips64el", 113 "s390", 114 "s390x", 115] 116 117 118class ModeConfig(object): 119 def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode): 120 self.flags = flags 121 self.timeout_scalefactor = timeout_scalefactor 122 self.status_mode = status_mode 123 self.execution_mode = execution_mode 124 125 126DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"] 127RELEASE_FLAGS = ["--nohard-abort"] 128MODES = { 129 "debug": ModeConfig( 130 flags=DEBUG_FLAGS, 131 timeout_scalefactor=4, 132 status_mode="debug", 133 execution_mode="debug", 134 ), 135 "optdebug": ModeConfig( 136 flags=DEBUG_FLAGS, 137 timeout_scalefactor=4, 138 status_mode="debug", 139 execution_mode="debug", 140 ), 141 "release": ModeConfig( 142 flags=RELEASE_FLAGS, 143 timeout_scalefactor=1, 144 status_mode="release", 145 execution_mode="release", 146 ), 147 # Normal trybot release configuration. There, dchecks are always on which 148 # implies debug is set. Hence, the status file needs to assume debug-like 149 # behavior/timeouts. 150 "tryrelease": ModeConfig( 151 flags=RELEASE_FLAGS, 152 timeout_scalefactor=1, 153 status_mode="debug", 154 execution_mode="release", 155 ), 156 # This mode requires v8 to be compiled with dchecks and slow dchecks. 157 "slowrelease": ModeConfig( 158 flags=RELEASE_FLAGS + ["--enable-slow-asserts"], 159 timeout_scalefactor=2, 160 status_mode="debug", 161 execution_mode="release", 162 ), 163} 164 165PROGRESS_INDICATORS = { 166 'verbose': progress.VerboseProgressIndicator, 167 'ci': progress.CIProgressIndicator, 168 'dots': progress.DotsProgressIndicator, 169 'color': progress.ColorProgressIndicator, 170 'mono': progress.MonochromeProgressIndicator, 171} 172 173class TestRunnerError(Exception): 174 pass 175 176 177class BuildConfig(object): 178 def __init__(self, build_config): 179 # In V8 land, GN's x86 is called ia32. 180 if build_config['v8_target_cpu'] == 'x86': 181 self.arch = 'ia32' 182 else: 183 self.arch = build_config['v8_target_cpu'] 184 185 self.asan = build_config['is_asan'] 186 self.cfi_vptr = build_config['is_cfi'] 187 self.dcheck_always_on = build_config['dcheck_always_on'] 188 self.gcov_coverage = build_config['is_gcov_coverage'] 189 self.is_android = build_config['is_android'] 190 self.is_clang = build_config['is_clang'] 191 self.is_debug = build_config['is_debug'] 192 self.is_full_debug = build_config['is_full_debug'] 193 self.msan = build_config['is_msan'] 194 self.no_i18n = not build_config['v8_enable_i18n_support'] 195 self.predictable = build_config['v8_enable_verify_predictable'] 196 self.tsan = build_config['is_tsan'] 197 # TODO(machenbach): We only have ubsan not ubsan_vptr. 198 self.ubsan_vptr = build_config['is_ubsan_vptr'] 199 self.verify_csa = build_config['v8_enable_verify_csa'] 200 self.lite_mode = build_config['v8_enable_lite_mode'] 201 self.pointer_compression = build_config['v8_enable_pointer_compression'] 202 # Export only for MIPS target 203 if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']: 204 self.mips_arch_variant = build_config['mips_arch_variant'] 205 self.mips_use_msa = build_config['mips_use_msa'] 206 207 @property 208 def use_sanitizer(self): 209 return (self.asan or self.cfi_vptr or self.msan or self.tsan or 210 self.ubsan_vptr) 211 212 def __str__(self): 213 detected_options = [] 214 215 if self.asan: 216 detected_options.append('asan') 217 if self.cfi_vptr: 218 detected_options.append('cfi_vptr') 219 if self.dcheck_always_on: 220 detected_options.append('dcheck_always_on') 221 if self.gcov_coverage: 222 detected_options.append('gcov_coverage') 223 if self.msan: 224 detected_options.append('msan') 225 if self.no_i18n: 226 detected_options.append('no_i18n') 227 if self.predictable: 228 detected_options.append('predictable') 229 if self.tsan: 230 detected_options.append('tsan') 231 if self.ubsan_vptr: 232 detected_options.append('ubsan_vptr') 233 if self.verify_csa: 234 detected_options.append('verify_csa') 235 if self.lite_mode: 236 detected_options.append('lite_mode') 237 if self.pointer_compression: 238 detected_options.append('pointer_compression') 239 240 return '\n'.join(detected_options) 241 242 243class BaseTestRunner(object): 244 def __init__(self, basedir=None): 245 self.basedir = basedir or BASE_DIR 246 self.outdir = None 247 self.build_config = None 248 self.mode_name = None 249 self.mode_options = None 250 self.target_os = None 251 252 @property 253 def framework_name(self): 254 """String name of the base-runner subclass, used in test results.""" 255 raise NotImplementedError() 256 257 def execute(self, sys_args=None): 258 if sys_args is None: # pragma: no cover 259 sys_args = sys.argv[1:] 260 try: 261 parser = self._create_parser() 262 options, args = self._parse_args(parser, sys_args) 263 if options.swarming: 264 # Swarming doesn't print how isolated commands are called. Lets make 265 # this less cryptic by printing it ourselves. 266 print(' '.join(sys.argv)) 267 268 self._load_build_config(options) 269 command.setup(self.target_os, options.device) 270 271 try: 272 self._process_default_options(options) 273 self._process_options(options) 274 except TestRunnerError: 275 parser.print_help() 276 raise 277 278 args = self._parse_test_args(args) 279 tests = self._load_testsuite_generators(args, options) 280 self._setup_env() 281 print(">>> Running tests for %s.%s" % (self.build_config.arch, 282 self.mode_name)) 283 exit_code = self._do_execute(tests, args, options) 284 if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results: 285 print("Force exit code 0 after failures. Json test results file " 286 "generated with failure information.") 287 exit_code = utils.EXIT_CODE_PASS 288 return exit_code 289 except TestRunnerError: 290 traceback.print_exc() 291 return utils.EXIT_CODE_INTERNAL_ERROR 292 except KeyboardInterrupt: 293 return utils.EXIT_CODE_INTERRUPTED 294 except Exception: 295 traceback.print_exc() 296 return utils.EXIT_CODE_INTERNAL_ERROR 297 finally: 298 command.tear_down() 299 300 def _create_parser(self): 301 parser = optparse.OptionParser() 302 parser.usage = '%prog [options] [tests]' 303 parser.description = """TESTS: %s""" % (TEST_MAP["default"]) 304 self._add_parser_default_options(parser) 305 self._add_parser_options(parser) 306 return parser 307 308 def _add_parser_default_options(self, parser): 309 parser.add_option("--gn", help="Scan out.gn for the last built" 310 " configuration", 311 default=False, action="store_true") 312 parser.add_option("--outdir", help="Base directory with compile output", 313 default="out") 314 parser.add_option("--arch", 315 help="The architecture to run tests for") 316 parser.add_option("-m", "--mode", 317 help="The test mode in which to run (uppercase for builds" 318 " in CI): %s" % MODES.keys()) 319 parser.add_option("--shell-dir", help="DEPRECATED! Executables from build " 320 "directory will be used") 321 parser.add_option("--test-root", help="Root directory of the test suites", 322 default=os.path.join(self.basedir, 'test')) 323 parser.add_option("--total-timeout-sec", default=0, type="int", 324 help="How long should fuzzer run") 325 parser.add_option("--swarming", default=False, action="store_true", 326 help="Indicates running test driver on swarming.") 327 328 parser.add_option("-j", help="The number of parallel tasks to run", 329 default=0, type=int) 330 parser.add_option("-d", "--device", 331 help="The device ID to run Android tests on. If not " 332 "given it will be autodetected.") 333 334 # Shard 335 parser.add_option("--shard-count", default=1, type=int, 336 help="Split tests into this number of shards") 337 parser.add_option("--shard-run", default=1, type=int, 338 help="Run this shard from the split up tests.") 339 340 # Progress 341 parser.add_option("-p", "--progress", 342 choices=PROGRESS_INDICATORS.keys(), default="mono", 343 help="The style of progress indicator (verbose, dots, " 344 "color, mono)") 345 parser.add_option("--json-test-results", 346 help="Path to a file for storing json results.") 347 parser.add_option('--slow-tests-cutoff', type="int", default=100, 348 help='Collect N slowest tests') 349 parser.add_option("--exit-after-n-failures", type="int", default=100, 350 help="Exit after the first N failures instead of " 351 "running all tests. Pass 0 to disable this feature.") 352 parser.add_option("--ci-test-completion", 353 help="Path to a file for logging test completion in the " 354 "context of CI progress indicator. Ignored if " 355 "progress indicator is other than 'ci'.") 356 357 # Rerun 358 parser.add_option("--rerun-failures-count", default=0, type=int, 359 help="Number of times to rerun each failing test case. " 360 "Very slow tests will be rerun only once.") 361 parser.add_option("--rerun-failures-max", default=100, type=int, 362 help="Maximum number of failing test cases to rerun") 363 364 # Test config 365 parser.add_option("--command-prefix", default="", 366 help="Prepended to each shell command used to run a test") 367 parser.add_option("--extra-flags", action="append", default=[], 368 help="Additional flags to pass to each test command") 369 parser.add_option("--isolates", action="store_true", default=False, 370 help="Whether to test isolates") 371 parser.add_option("--no-harness", "--noharness", 372 default=False, action="store_true", 373 help="Run without test harness of a given suite") 374 parser.add_option("--random-seed", default=0, type=int, 375 help="Default seed for initializing random generator") 376 parser.add_option("--run-skipped", help="Also run skipped tests.", 377 default=False, action="store_true") 378 parser.add_option("-t", "--timeout", default=60, type=int, 379 help="Timeout for single test in seconds") 380 parser.add_option("-v", "--verbose", default=False, action="store_true", 381 help="Verbose output") 382 parser.add_option('--regenerate-expected-files', default=False, action='store_true', 383 help='Regenerate expected files') 384 385 # TODO(machenbach): Temporary options for rolling out new test runner 386 # features. 387 parser.add_option("--mastername", default='', 388 help="Mastername property from infrastructure. Not " 389 "setting this option indicates manual usage.") 390 parser.add_option("--buildername", default='', 391 help="Buildername property from infrastructure. Not " 392 "setting this option indicates manual usage.") 393 394 def _add_parser_options(self, parser): 395 pass 396 397 def _parse_args(self, parser, sys_args): 398 options, args = parser.parse_args(sys_args) 399 400 if any(map(lambda v: v and ',' in v, 401 [options.arch, options.mode])): # pragma: no cover 402 print('Multiple arch/mode are deprecated') 403 raise TestRunnerError() 404 405 return options, args 406 407 def _load_build_config(self, options): 408 for outdir in self._possible_outdirs(options): 409 try: 410 self.build_config = self._do_load_build_config(outdir, options.verbose) 411 except TestRunnerError: 412 pass 413 414 if not self.build_config: # pragma: no cover 415 print('Failed to load build config') 416 raise TestRunnerError 417 418 print('Build found: %s' % self.outdir) 419 if str(self.build_config): 420 print('>>> Autodetected:') 421 print(self.build_config) 422 423 # Represents the OS where tests are run on. Same as host OS except for 424 # Android, which is determined by build output. 425 if self.build_config.is_android: 426 self.target_os = 'android' 427 else: 428 self.target_os = utils.GuessOS() 429 430 # Returns possible build paths in order: 431 # gn 432 # outdir 433 # outdir/arch.mode 434 # Each path is provided in two versions: <path> and <path>/mode for bots. 435 def _possible_outdirs(self, options): 436 def outdirs(): 437 if options.gn: 438 yield self._get_gn_outdir() 439 return 440 441 yield options.outdir 442 if options.arch and options.mode: 443 yield os.path.join(options.outdir, 444 '%s.%s' % (options.arch, options.mode)) 445 446 for outdir in outdirs(): 447 yield os.path.join(self.basedir, outdir) 448 449 # bot option 450 if options.mode: 451 yield os.path.join(self.basedir, outdir, options.mode) 452 453 def _get_gn_outdir(self): 454 gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN) 455 latest_timestamp = -1 456 latest_config = None 457 for gn_config in os.listdir(gn_out_dir): 458 gn_config_dir = os.path.join(gn_out_dir, gn_config) 459 if not os.path.isdir(gn_config_dir): 460 continue 461 if os.path.getmtime(gn_config_dir) > latest_timestamp: 462 latest_timestamp = os.path.getmtime(gn_config_dir) 463 latest_config = gn_config 464 if latest_config: 465 print(">>> Latest GN build found: %s" % latest_config) 466 return os.path.join(DEFAULT_OUT_GN, latest_config) 467 468 def _do_load_build_config(self, outdir, verbose=False): 469 build_config_path = os.path.join(outdir, "v8_build_config.json") 470 if not os.path.exists(build_config_path): 471 if verbose: 472 print("Didn't find build config: %s" % build_config_path) 473 raise TestRunnerError() 474 475 with open(build_config_path) as f: 476 try: 477 build_config_json = json.load(f) 478 except Exception: # pragma: no cover 479 print("%s exists but contains invalid json. Is your build up-to-date?" 480 % build_config_path) 481 raise TestRunnerError() 482 483 # In auto-detect mode the outdir is always where we found the build config. 484 # This ensures that we'll also take the build products from there. 485 self.outdir = os.path.dirname(build_config_path) 486 487 return BuildConfig(build_config_json) 488 489 def _process_default_options(self, options): 490 # We don't use the mode for more path-magic. 491 # Therefore transform the bot mode here to fix build_config value. 492 if options.mode: 493 options.mode = self._bot_to_v8_mode(options.mode) 494 495 build_config_mode = 'debug' if self.build_config.is_debug else 'release' 496 if options.mode: 497 if options.mode not in MODES: # pragma: no cover 498 print('%s mode is invalid' % options.mode) 499 raise TestRunnerError() 500 if MODES[options.mode].execution_mode != build_config_mode: 501 print ('execution mode (%s) for %s is inconsistent with build config ' 502 '(%s)' % ( 503 MODES[options.mode].execution_mode, 504 options.mode, 505 build_config_mode)) 506 raise TestRunnerError() 507 508 self.mode_name = options.mode 509 else: 510 self.mode_name = build_config_mode 511 512 self.mode_options = MODES[self.mode_name] 513 514 if options.arch and options.arch != self.build_config.arch: 515 print('--arch value (%s) inconsistent with build config (%s).' % ( 516 options.arch, self.build_config.arch)) 517 raise TestRunnerError() 518 519 if options.shell_dir: # pragma: no cover 520 print('Warning: --shell-dir is deprecated. Searching for executables in ' 521 'build directory (%s) instead.' % self.outdir) 522 523 if options.j == 0: 524 if self.build_config.is_android: 525 # Adb isn't happy about multi-processed file pushing. 526 options.j = 1 527 else: 528 options.j = multiprocessing.cpu_count() 529 530 options.command_prefix = shlex.split(options.command_prefix) 531 options.extra_flags = sum(map(shlex.split, options.extra_flags), []) 532 533 def _bot_to_v8_mode(self, config): 534 """Convert build configs from bots to configs understood by the v8 runner. 535 536 V8 configs are always lower case and without the additional _x64 suffix 537 for 64 bit builds on windows with ninja. 538 """ 539 mode = config[:-4] if config.endswith('_x64') else config 540 return mode.lower() 541 542 def _process_options(self, options): 543 pass 544 545 def _setup_env(self): 546 # Use the v8 root as cwd as some test cases use "load" with relative paths. 547 os.chdir(self.basedir) 548 549 # Many tests assume an English interface. 550 os.environ['LANG'] = 'en_US.UTF-8' 551 552 symbolizer_option = self._get_external_symbolizer_option() 553 554 if self.build_config.asan: 555 asan_options = [ 556 symbolizer_option, 557 'allow_user_segv_handler=1', 558 'allocator_may_return_null=1', 559 ] 560 if not utils.GuessOS() in ['macos', 'windows']: 561 # LSAN is not available on mac and windows. 562 asan_options.append('detect_leaks=1') 563 else: 564 asan_options.append('detect_leaks=0') 565 if utils.GuessOS() == 'windows': 566 # https://crbug.com/967663 567 asan_options.append('detect_stack_use_after_return=0') 568 os.environ['ASAN_OPTIONS'] = ":".join(asan_options) 569 570 if self.build_config.cfi_vptr: 571 os.environ['UBSAN_OPTIONS'] = ":".join([ 572 'print_stacktrace=1', 573 'print_summary=1', 574 'symbolize=1', 575 symbolizer_option, 576 ]) 577 578 if self.build_config.ubsan_vptr: 579 os.environ['UBSAN_OPTIONS'] = ":".join([ 580 'print_stacktrace=1', 581 symbolizer_option, 582 ]) 583 584 if self.build_config.msan: 585 os.environ['MSAN_OPTIONS'] = symbolizer_option 586 587 if self.build_config.tsan: 588 suppressions_file = os.path.join( 589 self.basedir, 590 'tools', 591 'sanitizers', 592 'tsan_suppressions.txt') 593 os.environ['TSAN_OPTIONS'] = " ".join([ 594 symbolizer_option, 595 'suppressions=%s' % suppressions_file, 596 'exit_code=0', 597 'report_thread_leaks=0', 598 'history_size=7', 599 'report_destroy_locked=0', 600 ]) 601 602 def _get_external_symbolizer_option(self): 603 external_symbolizer_path = os.path.join( 604 self.basedir, 605 'third_party', 606 'llvm-build', 607 'Release+Asserts', 608 'bin', 609 'llvm-symbolizer', 610 ) 611 612 if utils.IsWindows(): 613 # Quote, because sanitizers might confuse colon as option separator. 614 external_symbolizer_path = '"%s.exe"' % external_symbolizer_path 615 616 return 'external_symbolizer_path=%s' % external_symbolizer_path 617 618 def _parse_test_args(self, args): 619 if not args: 620 args = self._get_default_suite_names() 621 622 # Expand arguments with grouped tests. The args should reflect the list 623 # of suites as otherwise filters would break. 624 def expand_test_group(name): 625 return TEST_MAP.get(name, [name]) 626 627 return reduce(list.__add__, map(expand_test_group, args), []) 628 629 def _args_to_suite_names(self, args, test_root): 630 # Use default tests if no test configuration was provided at the cmd line. 631 all_names = set(utils.GetSuitePaths(test_root)) 632 args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set 633 return [name for name in args_names if name in all_names] 634 635 def _get_default_suite_names(self): 636 return [] 637 638 def _load_testsuite_generators(self, args, options): 639 names = self._args_to_suite_names(args, options.test_root) 640 test_config = self._create_test_config(options) 641 variables = self._get_statusfile_variables(options) 642 643 # Head generator with no elements 644 test_chain = testsuite.TestGenerator(0, [], []) 645 for name in names: 646 if options.verbose: 647 print('>>> Loading test suite: %s' % name) 648 suite = testsuite.TestSuite.Load( 649 os.path.join(options.test_root, name), test_config, 650 self.framework_name) 651 652 if self._is_testsuite_supported(suite, options): 653 tests = suite.load_tests_from_disk(variables) 654 test_chain.merge(tests) 655 656 return test_chain 657 658 def _is_testsuite_supported(self, suite, options): 659 """A predicate that can be overridden to filter out unsupported TestSuite 660 instances (see NumFuzzer for usage).""" 661 return True 662 663 def _get_statusfile_variables(self, options): 664 simd_mips = ( 665 self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and 666 self.build_config.mips_arch_variant == "r6" and 667 self.build_config.mips_use_msa) 668 669 mips_arch_variant = ( 670 self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and 671 self.build_config.mips_arch_variant) 672 673 # TODO(machenbach): In GN we can derive simulator run from 674 # target_arch != v8_target_arch in the dumped build config. 675 return { 676 "arch": self.build_config.arch, 677 "asan": self.build_config.asan, 678 "byteorder": sys.byteorder, 679 "dcheck_always_on": self.build_config.dcheck_always_on, 680 "deopt_fuzzer": False, 681 "endurance_fuzzer": False, 682 "gc_fuzzer": False, 683 "gc_stress": False, 684 "gcov_coverage": self.build_config.gcov_coverage, 685 "isolates": options.isolates, 686 "is_clang": self.build_config.is_clang, 687 "is_full_debug": self.build_config.is_full_debug, 688 "mips_arch_variant": mips_arch_variant, 689 "mode": self.mode_options.status_mode 690 if not self.build_config.dcheck_always_on 691 else "debug", 692 "msan": self.build_config.msan, 693 "no_harness": options.no_harness, 694 "no_i18n": self.build_config.no_i18n, 695 "novfp3": False, 696 "optimize_for_size": "--optimize-for-size" in options.extra_flags, 697 "predictable": self.build_config.predictable, 698 "simd_mips": simd_mips, 699 "simulator_run": False, 700 "system": self.target_os, 701 "tsan": self.build_config.tsan, 702 "ubsan_vptr": self.build_config.ubsan_vptr, 703 "verify_csa": self.build_config.verify_csa, 704 "lite_mode": self.build_config.lite_mode, 705 "pointer_compression": self.build_config.pointer_compression, 706 } 707 708 def _runner_flags(self): 709 """Extra default flags specific to the test runner implementation.""" 710 return [] 711 712 def _create_test_config(self, options): 713 timeout = options.timeout * self._timeout_scalefactor(options) 714 return TestConfig( 715 command_prefix=options.command_prefix, 716 extra_flags=options.extra_flags, 717 isolates=options.isolates, 718 mode_flags=self.mode_options.flags + self._runner_flags(), 719 no_harness=options.no_harness, 720 noi18n=self.build_config.no_i18n, 721 random_seed=options.random_seed, 722 run_skipped=options.run_skipped, 723 shell_dir=self.outdir, 724 timeout=timeout, 725 verbose=options.verbose, 726 regenerate_expected_files=options.regenerate_expected_files, 727 ) 728 729 def _timeout_scalefactor(self, options): 730 """Increases timeout for slow build configurations.""" 731 factor = self.mode_options.timeout_scalefactor 732 if self.build_config.arch in SLOW_ARCHS: 733 factor *= 4.5 734 if self.build_config.lite_mode: 735 factor *= 2 736 if self.build_config.predictable: 737 factor *= 4 738 if self.build_config.use_sanitizer: 739 factor *= 1.5 740 if self.build_config.is_full_debug: 741 factor *= 4 742 743 return factor 744 745 # TODO(majeski): remove options & args parameters 746 def _do_execute(self, suites, args, options): 747 raise NotImplementedError() 748 749 def _prepare_procs(self, procs): 750 procs = filter(None, procs) 751 for i in range(0, len(procs) - 1): 752 procs[i].connect_to(procs[i + 1]) 753 procs[0].setup() 754 755 def _create_shard_proc(self, options): 756 myid, count = self._get_shard_info(options) 757 if count == 1: 758 return None 759 return ShardProc(myid - 1, count) 760 761 def _get_shard_info(self, options): 762 """ 763 Returns pair: 764 (id of the current shard [1; number of shards], number of shards) 765 """ 766 # Read gtest shard configuration from environment (e.g. set by swarming). 767 # If none is present, use values passed on the command line. 768 shard_count = int( 769 os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count)) 770 shard_run = os.environ.get('GTEST_SHARD_INDEX') 771 if shard_run is not None: 772 # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0. 773 shard_run = int(shard_run) + 1 774 else: 775 shard_run = options.shard_run 776 777 if options.shard_count > 1: 778 # Log if a value was passed on the cmd line and it differs from the 779 # environment variables. 780 if options.shard_count != shard_count: # pragma: no cover 781 print("shard_count from cmd line differs from environment variable " 782 "GTEST_TOTAL_SHARDS") 783 if (options.shard_run > 1 and 784 options.shard_run != shard_run): # pragma: no cover 785 print("shard_run from cmd line differs from environment variable " 786 "GTEST_SHARD_INDEX") 787 788 if shard_run < 1 or shard_run > shard_count: 789 # TODO(machenbach): Turn this into an assert. If that's wrong on the 790 # bots, printing will be quite useless. Or refactor this code to make 791 # sure we get a return code != 0 after testing if we got here. 792 print("shard-run not a valid number, should be in [1:shard-count]") 793 print("defaulting back to running all tests") 794 return 1, 1 795 796 return shard_run, shard_count 797 798 def _create_progress_indicators(self, test_count, options): 799 procs = [PROGRESS_INDICATORS[options.progress]()] 800 if options.json_test_results: 801 procs.append(progress.JsonTestProgressIndicator( 802 self.framework_name, 803 self.build_config.arch, 804 self.mode_options.execution_mode)) 805 806 for proc in procs: 807 proc.configure(options) 808 809 for proc in procs: 810 try: 811 proc.set_test_count(test_count) 812 except AttributeError: 813 pass 814 815 return procs 816 817 def _create_result_tracker(self, options): 818 return progress.ResultsTracker(options.exit_after_n_failures) 819 820 def _create_timeout_proc(self, options): 821 if not options.total_timeout_sec: 822 return None 823 return TimeoutProc(options.total_timeout_sec) 824 825 def _create_signal_proc(self): 826 return SignalProc() 827 828 def _create_rerun_proc(self, options): 829 if not options.rerun_failures_count: 830 return None 831 return RerunProc(options.rerun_failures_count, 832 options.rerun_failures_max) 833