1#!/usr/bin/env python 2# This Source Code Form is subject to the terms of the Mozilla Public 3# License, v. 2.0. If a copy of the MPL was not distributed with this 4# file, You can obtain one at http://mozilla.org/MPL/2.0/. 5 6from __future__ import print_function, unicode_literals 7 8import math 9import os 10import platform 11import posixpath 12import shlex 13import subprocess 14import sys 15import traceback 16 17 18read_input = input 19if sys.version_info.major == 2: 20 read_input = raw_input 21 22 23def add_tests_dir_to_path(): 24 from os.path import dirname, exists, join, realpath 25 26 js_src_dir = dirname(dirname(realpath(sys.argv[0]))) 27 assert exists(join(js_src_dir, "jsapi.h")) 28 sys.path.insert(0, join(js_src_dir, "tests")) 29 30 31add_tests_dir_to_path() 32 33from lib import jittests 34from lib.tests import ( 35 get_jitflags, 36 valid_jitflags, 37 get_cpu_count, 38 get_environment_overlay, 39 change_env, 40) 41from lib.tempfile import TemporaryDirectory 42 43 44def which(name): 45 if name.find(os.path.sep) != -1: 46 return os.path.abspath(name) 47 48 for path in os.environ["PATH"].split(os.pathsep): 49 full = os.path.join(path, name) 50 if os.path.exists(full): 51 return os.path.abspath(full) 52 53 return name 54 55 56def choose_item(jobs, max_items, display): 57 job_count = len(jobs) 58 59 # Don't present a choice if there are too many tests 60 if job_count > max_items: 61 raise Exception("Too many jobs.") 62 63 for i, job in enumerate(jobs, 1): 64 print("{}) {}".format(i, display(job))) 65 66 item = read_input("Which one:\n") 67 try: 68 item = int(item) 69 if item > job_count or item < 1: 70 raise Exception("Input isn't between 1 and {}".format(job_count)) 71 except ValueError: 72 raise Exception("Unrecognized input") 73 74 return jobs[item - 1] 75 76 77def main(argv): 78 # The [TESTS] optional arguments are paths of test files relative 79 # to the jit-test/tests directory. 80 import argparse 81 82 op = argparse.ArgumentParser(description="Run jit-test JS shell tests") 83 op.add_argument( 84 "-s", 85 "--show-cmd", 86 dest="show_cmd", 87 action="store_true", 88 help="show js shell command run", 89 ) 90 op.add_argument( 91 "-f", 92 "--show-failed-cmd", 93 dest="show_failed", 94 action="store_true", 95 help="show command lines of failed tests", 96 ) 97 op.add_argument( 98 "-o", 99 "--show-output", 100 dest="show_output", 101 action="store_true", 102 help="show output from js shell", 103 ) 104 op.add_argument( 105 "-F", 106 "--failed-only", 107 dest="failed_only", 108 action="store_true", 109 help="if --show-output is given, only print output for" " failed tests", 110 ) 111 op.add_argument( 112 "--no-show-failed", 113 dest="no_show_failed", 114 action="store_true", 115 help="don't print output for failed tests" " (no-op with --show-output)", 116 ) 117 op.add_argument( 118 "-x", 119 "--exclude", 120 dest="exclude", 121 default=[], 122 action="append", 123 help="exclude given test dir or path", 124 ) 125 op.add_argument( 126 "--exclude-from", 127 dest="exclude_from", 128 type=str, 129 help="exclude each test dir or path in FILE", 130 ) 131 op.add_argument( 132 "--slow", 133 dest="run_slow", 134 action="store_true", 135 help="also run tests marked as slow", 136 ) 137 op.add_argument( 138 "--no-slow", 139 dest="run_slow", 140 action="store_false", 141 help="do not run tests marked as slow (the default)", 142 ) 143 op.add_argument( 144 "-t", 145 "--timeout", 146 dest="timeout", 147 type=float, 148 default=150.0, 149 help="set test timeout in seconds", 150 ) 151 op.add_argument( 152 "--no-progress", 153 dest="hide_progress", 154 action="store_true", 155 help="hide progress bar", 156 ) 157 op.add_argument( 158 "--tinderbox", 159 dest="format", 160 action="store_const", 161 const="automation", 162 help="Use automation-parseable output format", 163 ) 164 op.add_argument( 165 "--format", 166 dest="format", 167 default="none", 168 choices=("automation", "none"), 169 help="Output format (default %(default)s).", 170 ) 171 op.add_argument( 172 "--args", 173 dest="shell_args", 174 metavar="ARGS", 175 default="", 176 help="extra args to pass to the JS shell", 177 ) 178 op.add_argument( 179 "--feature-args", 180 dest="feature_args", 181 metavar="ARGS", 182 default="", 183 help="even more args to pass to the JS shell " 184 "(for compatibility with jstests.py)", 185 ) 186 op.add_argument( 187 "-w", 188 "--write-failures", 189 dest="write_failures", 190 metavar="FILE", 191 help="Write a list of failed tests to [FILE]", 192 ) 193 op.add_argument( 194 "-C", 195 "--check-output", 196 action="store_true", 197 dest="check_output", 198 help="Run tests to check output for different jit-flags", 199 ) 200 op.add_argument( 201 "-r", 202 "--read-tests", 203 dest="read_tests", 204 metavar="FILE", 205 help="Run test files listed in [FILE]", 206 ) 207 op.add_argument( 208 "-R", 209 "--retest", 210 dest="retest", 211 metavar="FILE", 212 help="Retest using test list file [FILE]", 213 ) 214 op.add_argument( 215 "-g", 216 "--debug", 217 action="store_const", 218 const="gdb", 219 dest="debugger", 220 help="Run a single test under the gdb debugger", 221 ) 222 op.add_argument( 223 "-G", 224 "--debug-rr", 225 action="store_const", 226 const="rr", 227 dest="debugger", 228 help="Run a single test under the rr debugger", 229 ) 230 op.add_argument( 231 "--debugger", type=str, help="Run a single test under the specified debugger" 232 ) 233 op.add_argument( 234 "--valgrind", 235 dest="valgrind", 236 action="store_true", 237 help="Enable the |valgrind| flag, if valgrind is in $PATH.", 238 ) 239 op.add_argument( 240 "--unusable-error-status", 241 action="store_true", 242 help="Ignore incorrect exit status on tests that should return nonzero.", 243 ) 244 op.add_argument( 245 "--valgrind-all", 246 dest="valgrind_all", 247 action="store_true", 248 help="Run all tests with valgrind, if valgrind is in $PATH.", 249 ) 250 op.add_argument( 251 "--avoid-stdio", 252 dest="avoid_stdio", 253 action="store_true", 254 help="Use js-shell file indirection instead of piping stdio.", 255 ) 256 op.add_argument( 257 "--write-failure-output", 258 dest="write_failure_output", 259 action="store_true", 260 help="With --write-failures=FILE, additionally write the" 261 " output of failed tests to [FILE]", 262 ) 263 op.add_argument( 264 "--jitflags", 265 dest="jitflags", 266 default="none", 267 choices=valid_jitflags(), 268 help="IonMonkey option combinations (default %(default)s).", 269 ) 270 op.add_argument( 271 "--ion", 272 dest="jitflags", 273 action="store_const", 274 const="ion", 275 help="Run tests once with --ion-eager and once with" 276 " --baseline-eager (equivalent to --jitflags=ion)", 277 ) 278 op.add_argument( 279 "--no-xdr", 280 dest="use_xdr", 281 action="store_false", 282 help="Whether to disable caching of self-hosted parsed content in XDR format.", 283 ) 284 op.add_argument( 285 "--tbpl", 286 dest="jitflags", 287 action="store_const", 288 const="all", 289 help="Run tests with all IonMonkey option combinations" 290 " (equivalent to --jitflags=all)", 291 ) 292 op.add_argument( 293 "-j", 294 "--worker-count", 295 dest="max_jobs", 296 type=int, 297 default=max(1, get_cpu_count()), 298 help="Number of tests to run in parallel (default %(default)s).", 299 ) 300 op.add_argument( 301 "--remote", action="store_true", help="Run tests on a remote device" 302 ) 303 op.add_argument( 304 "--deviceIP", 305 action="store", 306 type=str, 307 dest="device_ip", 308 help="IP address of remote device to test", 309 ) 310 op.add_argument( 311 "--devicePort", 312 action="store", 313 type=int, 314 dest="device_port", 315 default=20701, 316 help="port of remote device to test", 317 ) 318 op.add_argument( 319 "--deviceSerial", 320 action="store", 321 type=str, 322 dest="device_serial", 323 default=None, 324 help="ADB device serial number of remote device to test", 325 ) 326 op.add_argument( 327 "--remoteTestRoot", 328 dest="remote_test_root", 329 action="store", 330 type=str, 331 default="/data/local/tmp/test_root", 332 help="The remote directory to use as test root" " (e.g. %(default)s)", 333 ) 334 op.add_argument( 335 "--localLib", 336 dest="local_lib", 337 action="store", 338 type=str, 339 help="The location of libraries to push -- preferably" " stripped", 340 ) 341 op.add_argument( 342 "--repeat", type=int, default=1, help="Repeat tests the given number of times." 343 ) 344 op.add_argument("--this-chunk", type=int, default=1, help="The test chunk to run.") 345 op.add_argument( 346 "--total-chunks", type=int, default=1, help="The total number of test chunks." 347 ) 348 op.add_argument( 349 "--ignore-timeouts", 350 dest="ignore_timeouts", 351 metavar="FILE", 352 help="Ignore timeouts of tests listed in [FILE]", 353 ) 354 op.add_argument( 355 "--retry-remote-timeouts", 356 dest="timeout_retry", 357 type=int, 358 default=1, 359 help="Number of time to retry timeout on remote devices", 360 ) 361 op.add_argument( 362 "--test-reflect-stringify", 363 dest="test_reflect_stringify", 364 help="instead of running tests, use them to test the " 365 "Reflect.stringify code in specified file", 366 ) 367 # --enable-webrender is ignored as it is not relevant for JIT 368 # tests, but is required for harness compatibility. 369 op.add_argument( 370 "--enable-webrender", 371 action="store_true", 372 dest="enable_webrender", 373 default=False, 374 help=argparse.SUPPRESS, 375 ) 376 op.add_argument("js_shell", metavar="JS_SHELL", help="JS shell to run tests with") 377 op.add_argument( 378 "-z", "--gc-zeal", help="GC zeal mode to use when running the shell" 379 ) 380 381 options, test_args = op.parse_known_args(argv) 382 js_shell = which(options.js_shell) 383 test_environment = get_environment_overlay(js_shell, options.gc_zeal) 384 385 if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)): 386 if ( 387 platform.system() != "Windows" 388 or os.path.isfile(js_shell) 389 or not os.path.isfile(js_shell + ".exe") 390 or not os.access(js_shell + ".exe", os.X_OK) 391 ): 392 op.error("shell is not executable: " + js_shell) 393 394 if jittests.stdio_might_be_broken(): 395 # Prefer erring on the side of caution and not using stdio if 396 # it might be broken on this platform. The file-redirect 397 # fallback should work on any platform, so at worst by 398 # guessing wrong we might have slowed down the tests a bit. 399 # 400 # XXX technically we could check for broken stdio, but it 401 # really seems like overkill. 402 options.avoid_stdio = True 403 404 if options.retest: 405 options.read_tests = options.retest 406 options.write_failures = options.retest 407 408 test_list = [] 409 read_all = True 410 411 if test_args: 412 read_all = False 413 for arg in test_args: 414 test_list += jittests.find_tests(arg) 415 416 if options.read_tests: 417 read_all = False 418 try: 419 f = open(options.read_tests) 420 for line in f: 421 test_list.append(os.path.join(jittests.TEST_DIR, line.strip("\n"))) 422 f.close() 423 except IOError: 424 if options.retest: 425 read_all = True 426 else: 427 sys.stderr.write( 428 "Exception thrown trying to read test file" 429 " '{}'\n".format(options.read_tests) 430 ) 431 traceback.print_exc() 432 sys.stderr.write("---\n") 433 434 if read_all: 435 test_list = jittests.find_tests() 436 437 if options.exclude_from: 438 with open(options.exclude_from) as fh: 439 for line in fh: 440 line_exclude = line.strip() 441 if not line_exclude.startswith("#") and len(line_exclude): 442 options.exclude.append(line_exclude) 443 444 if options.exclude: 445 exclude_list = [] 446 for exclude in options.exclude: 447 exclude_list += jittests.find_tests(exclude) 448 test_list = [test for test in test_list if test not in set(exclude_list)] 449 450 if not test_list: 451 print("No tests found matching command line arguments.", file=sys.stderr) 452 sys.exit(0) 453 454 test_list = [jittests.JitTest.from_file(_, options) for _ in test_list] 455 456 if not options.run_slow: 457 test_list = [_ for _ in test_list if not _.slow] 458 459 if options.test_reflect_stringify is not None: 460 for test in test_list: 461 test.test_reflect_stringify = options.test_reflect_stringify 462 463 # If chunking is enabled, determine which tests are part of this chunk. 464 # This code was adapted from testing/mochitest/runtestsremote.py. 465 if options.total_chunks > 1: 466 total_tests = len(test_list) 467 tests_per_chunk = math.ceil(total_tests / float(options.total_chunks)) 468 start = int(round((options.this_chunk - 1) * tests_per_chunk)) 469 end = int(round(options.this_chunk * tests_per_chunk)) 470 test_list = test_list[start:end] 471 472 if not test_list: 473 print( 474 "No tests found matching command line arguments after filtering.", 475 file=sys.stderr, 476 ) 477 sys.exit(0) 478 479 # The full test list is ready. Now create copies for each JIT configuration. 480 test_flags = get_jitflags(options.jitflags) 481 482 test_list = [_ for test in test_list for _ in test.copy_variants(test_flags)] 483 484 job_list = (test for test in test_list) 485 job_count = len(test_list) 486 487 if options.repeat: 488 489 def repeat_copy(job_list_generator, repeat): 490 job_list = list(job_list_generator) 491 for i in range(repeat): 492 for test in job_list: 493 if i == 0: 494 yield test 495 else: 496 yield test.copy() 497 498 job_list = repeat_copy(job_list, options.repeat) 499 job_count *= options.repeat 500 501 if options.ignore_timeouts: 502 read_all = False 503 try: 504 with open(options.ignore_timeouts) as f: 505 ignore = set() 506 for line in f.readlines(): 507 path = line.strip("\n") 508 ignore.add(path) 509 options.ignore_timeouts = ignore 510 except IOError: 511 sys.exit("Error reading file: " + options.ignore_timeouts) 512 else: 513 options.ignore_timeouts = set() 514 515 prefix = ( 516 [js_shell] + shlex.split(options.shell_args) + shlex.split(options.feature_args) 517 ) 518 prologue = os.path.join(jittests.LIB_DIR, "prologue.js") 519 if options.remote: 520 prologue = posixpath.join(options.remote_test_root, "lib", "prologue.js") 521 522 prefix += ["-f", prologue] 523 524 if options.debugger: 525 if job_count > 1: 526 print( 527 "Multiple tests match command line" 528 " arguments, debugger can only run one" 529 ) 530 jobs = list(job_list) 531 532 def display_job(job): 533 flags = "" 534 if len(job.jitflags) != 0: 535 flags = "({})".format(" ".join(job.jitflags)) 536 return "{} {}".format(job.path, flags) 537 538 try: 539 tc = choose_item(jobs, max_items=50, display=display_job) 540 except Exception as e: 541 sys.exit(str(e)) 542 else: 543 tc = next(job_list) 544 545 if options.debugger == "gdb": 546 debug_cmd = ["gdb", "--args"] 547 elif options.debugger == "lldb": 548 debug_cmd = ["lldb", "--"] 549 elif options.debugger == "rr": 550 debug_cmd = ["rr", "record"] 551 else: 552 debug_cmd = options.debugger.split() 553 554 with change_env(test_environment): 555 with TemporaryDirectory() as tempdir: 556 if options.debugger == "rr": 557 subprocess.call( 558 debug_cmd 559 + tc.command( 560 prefix, jittests.LIB_DIR, jittests.MODULE_DIR, tempdir 561 ) 562 ) 563 os.execvp("rr", ["rr", "replay"]) 564 else: 565 os.execvp( 566 debug_cmd[0], 567 debug_cmd 568 + tc.command( 569 prefix, jittests.LIB_DIR, jittests.MODULE_DIR, tempdir 570 ), 571 ) 572 sys.exit() 573 574 try: 575 ok = None 576 if options.remote: 577 ok = jittests.run_tests(job_list, job_count, prefix, options, remote=True) 578 else: 579 with change_env(test_environment): 580 ok = jittests.run_tests(job_list, job_count, prefix, options) 581 if not ok: 582 sys.exit(2) 583 except OSError: 584 if not os.path.exists(prefix[0]): 585 print( 586 "JS shell argument: file does not exist:" " '{}'".format(prefix[0]), 587 file=sys.stderr, 588 ) 589 sys.exit(1) 590 else: 591 raise 592 593 594if __name__ == "__main__": 595 main(sys.argv[1:]) 596