1# Copyright (C) 2010 Google Inc. All rights reserved. 2# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged 3# Copyright (C) 2011 Apple Inc. All rights reserved. 4# 5# Redistribution and use in source and binary forms, with or without 6# modification, are permitted provided that the following conditions are 7# met: 8# 9# * Redistributions of source code must retain the above copyright 10# notice, this list of conditions and the following disclaimer. 11# * Redistributions in binary form must reproduce the above 12# copyright notice, this list of conditions and the following disclaimer 13# in the documentation and/or other materials provided with the 14# distribution. 15# * Neither the name of Google Inc. nor the names of its 16# contributors may be used to endorse or promote products derived from 17# this software without specific prior written permission. 18# 19# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31import json 32import os 33import re 34import StringIO 35import sys 36import unittest 37 38from blinkpy.common import exit_codes 39from blinkpy.common import path_finder 40from blinkpy.common.host import Host 41from blinkpy.common.host_mock import MockHost 42from blinkpy.common.path_finder import WEB_TESTS_LAST_COMPONENT 43from blinkpy.common.system.path import abspath_to_uri 44from blinkpy.common.system.system_host import SystemHost 45 46from blinkpy.web_tests import run_web_tests 47from blinkpy.web_tests.models import test_expectations 48from blinkpy.web_tests.models import test_failures 49from blinkpy.web_tests.models.typ_types import ResultType 50from blinkpy.web_tests.port import test 51from blinkpy.web_tests.views.printing import Printer 52 53_MOCK_ROOT = os.path.join( 54 path_finder.get_chromium_src_dir(), 'third_party', 'pymock') 55sys.path.insert(0, _MOCK_ROOT) 56import mock # pylint: disable=wrong-import-position 57 58 59def parse_args(extra_args=None, tests_included=False): 60 extra_args = extra_args or [] 61 args = [] 62 if not '--platform' in extra_args: 63 args.extend(['--platform', 'test']) 64 65 if not {'--jobs', '-j', '--child-processes'}.intersection(set(args)): 66 args.extend(['--jobs', 1]) 67 args.extend(extra_args) 68 if not tests_included: 69 # We use the glob to test that globbing works. 70 args.extend(['passes', 71 'http/tests', 72 'websocket/tests', 73 'failures/expected/*']) 74 return run_web_tests.parse_args(args) 75 76 77def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True): 78 options, parsed_args = parse_args(extra_args, tests_included) 79 if not port_obj: 80 host = host or MockHost() 81 port_obj = host.port_factory.get(port_name=options.platform, options=options) 82 83 if shared_port: 84 port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj 85 86 printer = Printer(host, options, StringIO.StringIO()) 87 run_details = run_web_tests.run(port_obj, options, parsed_args, printer) 88 return run_details.exit_code == 0 89 90 91def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True): 92 options, parsed_args = parse_args(extra_args=extra_args, 93 tests_included=tests_included) 94 host = host or MockHost() 95 if not port_obj: 96 port_obj = host.port_factory.get(port_name=options.platform, options=options) 97 98 run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port) 99 return (run_details, output, host.user) 100 101 102def run_and_capture(port_obj, options, parsed_args, shared_port=True): 103 if shared_port: 104 port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj 105 logging_stream = StringIO.StringIO() 106 printer = Printer(port_obj.host, options, logging_stream) 107 run_details = run_web_tests.run(port_obj, options, parsed_args, printer) 108 return (run_details, logging_stream) 109 110 111def get_tests_run(args, host=None, port_obj=None): 112 results = get_test_results(args, host=host, port_obj=port_obj) 113 return [result.test_name for result in results] 114 115 116def get_test_batches(args, host=None): 117 results = get_test_results(args, host) 118 batches = [] 119 batch = [] 120 current_pid = None 121 for result in results: 122 if batch and result.pid != current_pid: 123 batches.append(batch) 124 batch = [] 125 batch.append(result.test_name) 126 if batch: 127 batches.append(batch) 128 return batches 129 130 131def get_test_results(args, host=None, port_obj=None): 132 options, parsed_args = parse_args(args, tests_included=True) 133 134 host = host or MockHost() 135 port_obj = port_obj or host.port_factory.get(port_name=options.platform, options=options) 136 137 printer = Printer(host, options, StringIO.StringIO()) 138 run_details = run_web_tests.run(port_obj, options, parsed_args, printer) 139 140 all_results = [] 141 if run_details.initial_results: 142 all_results.extend(run_details.initial_results.all_results) 143 144 for retry_results in run_details.all_retry_results: 145 all_results.extend(retry_results.all_results) 146 return all_results 147 148 149def parse_full_results(full_results_text): 150 json_to_eval = full_results_text.replace('ADD_RESULTS(', '').replace(');', '') 151 compressed_results = json.loads(json_to_eval) 152 return compressed_results 153 154 155class StreamTestingMixin(object): 156 157 def assert_contains(self, stream, string): 158 self.assertIn(string, stream.getvalue()) 159 160 def assert_not_empty(self, stream): 161 self.assertTrue(stream.getvalue()) 162 163 164class RunTest(unittest.TestCase, StreamTestingMixin): 165 166 def setUp(self): 167 # A real PlatformInfo object is used here instead of a 168 # MockPlatformInfo because we need to actually check for 169 # Windows and Mac to skip some tests. 170 self._platform = SystemHost().platform 171 172 def test_basic(self): 173 options, args = parse_args( 174 extra_args=['--json-failing-test-results', '/tmp/json_failing_test_results.json'], 175 tests_included=True) 176 logging_stream = StringIO.StringIO() 177 host = MockHost() 178 port_obj = host.port_factory.get(options.platform, options) 179 printer = Printer(host, options, logging_stream) 180 details = run_web_tests.run(port_obj, options, args, printer) 181 182 # These numbers will need to be updated whenever we add new tests. 183 self.assertEqual(details.initial_results.total, test.TOTAL_TESTS) 184 self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS) 185 self.assertEqual( 186 len(details.initial_results.unexpected_results_by_name), 187 test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES) 188 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES) 189 self.assertEqual(details.all_retry_results[0].total, test.UNEXPECTED_FAILURES) 190 191 expected_tests = ( 192 details.initial_results.total 193 - details.initial_results.expected_skips 194 - len(details.initial_results.unexpected_results_by_name)) 195 expected_summary_str = '' 196 if details.initial_results.expected_failures > 0: 197 expected_summary_str = " (%d passed, %d didn't)" % ( 198 expected_tests - details.initial_results.expected_failures, 199 details.initial_results.expected_failures) 200 one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % ( 201 expected_tests, 202 expected_summary_str, 203 len(details.initial_results.unexpected_results_by_name)) 204 self.assertIn(one_line_summary, logging_stream.buflist) 205 206 # Ensure the results were summarized properly. 207 self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code) 208 209 # Ensure the results were written out and displayed. 210 failing_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json') 211 json_to_eval = failing_results_text.replace('ADD_RESULTS(', '').replace(');', '') 212 self.assertEqual(json.loads(json_to_eval), details.summarized_failing_results) 213 214 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json') 215 self.assertEqual(json.loads(full_results_text), details.summarized_full_results) 216 217 self.assertEqual(host.user.opened_urls, [abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')]) 218 219 def test_max_locked_shards(self): 220 # Tests for the default of using one locked shard even in the case of more than one child process. 221 _, regular_output, _ = logging_run(['--debug-rwt-logging', '--jobs', '2'], shared_port=False) 222 self.assertTrue(any('1 locked' in line for line in regular_output.buflist)) 223 224 def test_child_processes_2(self): 225 _, regular_output, _ = logging_run( 226 ['--debug-rwt-logging', '--jobs', '2'], shared_port=False) 227 self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist])) 228 229 def test_child_processes_min(self): 230 _, regular_output, _ = logging_run( 231 ['--debug-rwt-logging', '--jobs', '2', '-i', 'passes/virtual_passes', 'passes'], 232 tests_included=True, shared_port=False) 233 self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist])) 234 235 def test_dryrun(self): 236 tests_run = get_tests_run(['--dry-run']) 237 self.assertEqual(tests_run, []) 238 239 tests_run = get_tests_run(['-n']) 240 self.assertEqual(tests_run, []) 241 242 def test_enable_sanitizer(self): 243 self.assertTrue(passing_run(['--enable-sanitizer', '--order', 'natural', 'failures/expected/text.html'])) 244 245 def test_exception_raised(self): 246 # Exceptions raised by a worker are treated differently depending on 247 # whether they are in-process or out. inline exceptions work as normal, 248 # which allows us to get the full stack trace and traceback from the 249 # worker. The downside to this is that it could be any error, but this 250 # is actually useful in testing. 251 # 252 # Exceptions raised in a separate process are re-packaged into 253 # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can 254 # be printed, but don't display properly in the unit test exception handlers. 255 with self.assertRaises(BaseException): 256 logging_run(['failures/expected/exception.html', '--jobs', '1'], tests_included=True) 257 258 with self.assertRaises(BaseException): 259 logging_run( 260 ['--jobs', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], 261 tests_included=True, 262 shared_port=False) 263 264 def test_device_failure(self): 265 # Test that we handle a device going offline during a test properly. 266 details, regular_output, _ = logging_run(['failures/expected/device_failure.html'], tests_included=True) 267 self.assertEqual(details.exit_code, 0) 268 self.assertTrue('worker/0 has failed' in regular_output.getvalue()) 269 270 def test_keyboard_interrupt(self): 271 # Note that this also tests running a test marked as SKIP if 272 # you specify it explicitly. 273 details, _, _ = logging_run(['failures/expected/keyboard.html', '--jobs', '1'], tests_included=True) 274 self.assertEqual(details.exit_code, exit_codes.INTERRUPTED_EXIT_STATUS) 275 276 _, regular_output, _ = logging_run( 277 ['failures/expected/keyboard.html', 'passes/text.html', '--jobs', '2', '--skipped=ignore'], 278 tests_included=True, shared_port=False) 279 self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist])) 280 281 def test_no_tests_found(self): 282 details, err, _ = logging_run(['resources'], tests_included=True) 283 self.assertEqual(details.exit_code, exit_codes.NO_TESTS_EXIT_STATUS) 284 self.assert_contains(err, 'No tests to run.\n') 285 286 def test_no_tests_found_2(self): 287 details, err, _ = logging_run(['foo'], tests_included=True) 288 self.assertEqual(details.exit_code, exit_codes.NO_TESTS_EXIT_STATUS) 289 self.assert_contains(err, 'No tests to run.\n') 290 291 def test_no_tests_found_3(self): 292 details, err, _ = logging_run(['--shard-index', '4', '--total-shards', '400', 'foo/bar.html'], tests_included=True) 293 self.assertEqual(details.exit_code, exit_codes.NO_TESTS_EXIT_STATUS) 294 self.assert_contains(err, 'No tests to run.\n') 295 296 def test_no_tests_found_with_ok_flag(self): 297 details, err, _ = logging_run( 298 ['resources', '--zero-tests-executed-ok'], tests_included=True) 299 self.assertEqual(details.exit_code, exit_codes.OK_EXIT_STATUS) 300 self.assert_contains(err, 'No tests to run.\n') 301 302 def test_no_tests_found_with_ok_flag_shards(self): 303 details, err, _ = logging_run( 304 ['--shard-index', '4', '--total-shards', '40', 'foo/bar.html', '--zero-tests-executed-ok'], tests_included=True) 305 self.assertEqual(details.exit_code, exit_codes.OK_EXIT_STATUS) 306 self.assert_contains(err, 'No tests to run.\n') 307 308 def test_natural_order(self): 309 tests_to_run = [ 310 'passes/audio.html', 311 'failures/expected/text.html', 312 'failures/unexpected/missing_text.html', 313 'passes/args.html' 314 ] 315 tests_run = get_tests_run(['--order=natural'] + tests_to_run) 316 self.assertEqual([ 317 'failures/expected/text.html', 318 'failures/unexpected/missing_text.html', 319 'passes/args.html', 320 'passes/audio.html' 321 ], tests_run) 322 323 def test_natural_order_test_specified_multiple_times(self): 324 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html'] 325 tests_run = get_tests_run(['--order=natural'] + tests_to_run) 326 self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run) 327 328 def test_random_order(self): 329 tests_to_run = [ 330 'passes/audio.html', 331 'failures/expected/text.html', 332 'failures/unexpected/missing_text.html', 333 'passes/args.html' 334 ] 335 tests_run = get_tests_run(['--order=random'] + tests_to_run) 336 self.assertEqual(sorted(tests_to_run), sorted(tests_run)) 337 338 def test_random_order_with_seed(self): 339 tests_to_run = [ 340 'failures/expected/text.html', 341 'failures/unexpected/missing_text.html', 342 'passes/args.html', 343 'passes/audio.html', 344 ] 345 tests_run = get_tests_run(['--order=random', '--seed=5'] + sorted(tests_to_run)) 346 expected_order = [ 347 'failures/expected/text.html', 348 'failures/unexpected/missing_text.html', 349 'passes/audio.html', 350 'passes/args.html', 351 ] 352 353 self.assertEqual(tests_run, expected_order) 354 355 def test_random_order_with_timestamp_seed(self): 356 tests_to_run = sorted([ 357 'failures/unexpected/missing_text.html', 358 'failures/expected/text.html', 359 'passes/args.html', 360 'passes/audio.html', 361 ]) 362 363 run_1 = get_tests_run(['--order=random'] + tests_to_run, host=MockHost(time_return_val=10)) 364 run_2 = get_tests_run(['--order=random'] + tests_to_run, host=MockHost(time_return_val=10)) 365 self.assertEqual(run_1, run_2) 366 367 run_3 = get_tests_run(['--order=random'] + tests_to_run, host=MockHost(time_return_val=20)) 368 self.assertNotEqual(run_1, run_3) 369 370 def test_random_order_test_specified_multiple_times(self): 371 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html'] 372 tests_run = get_tests_run(['--order=random'] + tests_to_run) 373 self.assertEqual(tests_run.count('passes/audio.html'), 2) 374 self.assertEqual(tests_run.count('passes/args.html'), 2) 375 376 def test_no_order(self): 377 tests_to_run = [ 378 'passes/audio.html', 379 'failures/expected/text.html', 380 'failures/unexpected/missing_text.html', 381 'passes/args.html' 382 ] 383 tests_run = get_tests_run(['--order=none'] + tests_to_run) 384 self.assertEqual(tests_to_run, tests_run) 385 386 def test_no_order_test_specified_multiple_times(self): 387 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html'] 388 tests_run = get_tests_run(['--order=none'] + tests_to_run) 389 self.assertEqual(tests_to_run, tests_run) 390 391 def test_no_order_with_directory_entries_in_natural_order(self): 392 tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes'] 393 tests_run = get_tests_run(['--order=none'] + tests_to_run) 394 self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 395 'http/tests/passes/image.html', 'http/tests/passes/text.html']) 396 397 def test_repeat_each(self): 398 tests_to_run = ['passes/image.html', 'passes/text.html'] 399 tests_run = get_tests_run(['--repeat-each', '2', '--order', 'natural'] + tests_to_run) 400 self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html']) 401 402 def test_gtest_repeat(self): 403 tests_to_run = ['passes/image.html', 'passes/text.html'] 404 tests_run = get_tests_run(['--gtest_repeat', '2', '--order', 'natural'] + tests_to_run) 405 self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html']) 406 407 def test_gtest_repeat_overrides_iterations(self): 408 tests_to_run = ['passes/image.html', 'passes/text.html'] 409 tests_run = get_tests_run(['--iterations', '4', '--gtest_repeat', '2', '--order', 'natural'] + tests_to_run) 410 self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html']) 411 412 def test_ignore_flag(self): 413 # Note that passes/image.html is expected to be run since we specified it directly. 414 tests_run = get_tests_run(['-i', 'passes', 'passes/image.html']) 415 self.assertNotIn('passes/text.html', tests_run) 416 self.assertIn('passes/image.html', tests_run) 417 418 def test_skipped_flag(self): 419 tests_run = get_tests_run(['passes']) 420 self.assertNotIn('passes/skipped/skip.html', tests_run) 421 num_tests_run_by_default = len(tests_run) 422 423 # Check that nothing changes when we specify skipped=default. 424 self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])), 425 num_tests_run_by_default) 426 427 # Now check that we run one more test (the skipped one). 428 tests_run = get_tests_run(['--skipped=ignore', 'passes']) 429 self.assertIn('passes/skipped/skip.html', tests_run) 430 self.assertEqual(len(tests_run), num_tests_run_by_default + 1) 431 432 # Now check that we only run the skipped test. 433 self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html']) 434 435 # Now check that we don't run anything. 436 self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), []) 437 438 def test_isolated_script_test_also_run_disabled_tests(self): 439 self.assertEqual( 440 sorted(get_tests_run(['--isolated-script-test-also-run-disabled-tests', 'passes'])), 441 sorted(get_tests_run(['--skipped=ignore', 'passes'])) 442 ) 443 444 def test_gtest_also_run_disabled_tests(self): 445 self.assertEqual( 446 sorted(get_tests_run(['--gtest_also_run_disabled_tests', 'passes'])), 447 sorted(get_tests_run(['--skipped=ignore', 'passes'])) 448 ) 449 450 def test_iterations(self): 451 tests_to_run = ['passes/image.html', 'passes/text.html'] 452 tests_run = get_tests_run(['--iterations', '2', '--order', 'natural'] + tests_to_run) 453 self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html']) 454 455 def test_repeat_each_iterations_num_tests(self): 456 # The total number of tests should be: number_of_tests * 457 # repeat_each * iterations 458 host = MockHost() 459 _, err, _ = logging_run( 460 ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'], 461 tests_included=True, host=host) 462 self.assert_contains(err, "All 16 tests ran as expected (8 passed, 8 didn't).\n") 463 464 def test_skip_failing_tests(self): 465 # This tests that we skip both known failing and known flaky tests. Because there are 466 # no known flaky tests in the default test_expectations, we add additional expectations. 467 host = MockHost() 468 host.filesystem.write_text_file('/tmp/overrides.txt', '# results: [ Failure Pass ]\npasses/image.html [ Failure Pass ]\n') 469 470 batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host) 471 has_passes_text = False 472 for batch in batches: 473 self.assertNotIn('failures/expected/text.html', batch) 474 self.assertNotIn('passes/image.html', batch) 475 has_passes_text = has_passes_text or ('passes/text.html' in batch) 476 self.assertTrue(has_passes_text) 477 478 def test_single_file(self): 479 tests_run = get_tests_run(['passes/text.html']) 480 self.assertEqual(tests_run, ['passes/text.html']) 481 482 def test_single_file_with_prefix(self): 483 tests_run = get_tests_run([WEB_TESTS_LAST_COMPONENT + '/passes/text.html']) 484 self.assertEqual(['passes/text.html'], tests_run) 485 486 def test_no_flag_specific_files_json_results(self): 487 host = MockHost() 488 port = host.port_factory.get('test-win-win7') 489 host.filesystem.write_text_file( 490 '/tmp/overrides.txt', '# results: [ Timeout ]\nfailures/expected/text.html [ Timeout ]') 491 self.assertTrue(logging_run( 492 ['--order', 'natural', 'failures/expected/text.html', '--num-retries', '1', 493 '--additional-driver-flag', '--composite-after-paint', '--additional-expectations', 494 '/tmp/overrides.txt'], 495 tests_included=True, host=host)) 496 results = json.loads( 497 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 498 test_results = results['tests']['failures']['expected']['text.html'] 499 self.assertNotIn('flag_name', results) 500 self.assertNotIn('flag_expectations', test_results) 501 self.assertNotIn('base_expectations', test_results) 502 503 def test_no_flag_expectations_found_json_results(self): 504 host = MockHost() 505 port = host.port_factory.get('test-win-win7') 506 flag_exp_path = host.filesystem.join( 507 port.web_tests_dir(), 'FlagExpectations', 'composite-after-paint') 508 host.filesystem.write_text_file( 509 '/tmp/overrides.txt', '# results: [ Timeout ]\nfailures/expected/text.html [ Timeout ]') 510 host.filesystem.write_text_file(flag_exp_path, '') 511 self.assertTrue(logging_run( 512 ['--order', 'natural', 'failures/expected/text.html', '--num-retries', '1', 513 '--additional-driver-flag', '--composite-after-paint', '--additional-expectations', 514 '/tmp/overrides.txt'], 515 tests_included=True, host=host)) 516 results = json.loads( 517 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 518 test_results = results['tests']['failures']['expected']['text.html'] 519 self.assertEqual(results['flag_name'], '/composite-after-paint') 520 self.assertNotIn('flag_expectations', test_results) 521 self.assertNotIn('base_expectations', test_results) 522 523 def test_slow_flag_expectations_in_json_results(self): 524 host = MockHost() 525 port = host.port_factory.get('test-win-win7') 526 flag_exp_path = host.filesystem.join( 527 port.web_tests_dir(), 'FlagExpectations', 'composite-after-paint') 528 host.filesystem.write_text_file( 529 '/tmp/overrides.txt', '# results: [ Timeout ]\nfailures/expected/text.html [ Timeout ]') 530 host.filesystem.write_text_file( 531 flag_exp_path, 532 '# results: [ Slow ]\nfailures/expected/text.html [ Slow ]') 533 self.assertTrue(logging_run( 534 ['--order', 'natural', 'failures/expected/text.html', '--num-retries', '1', 535 '--additional-driver-flag', '--composite-after-paint', '--additional-expectations', 536 '/tmp/overrides.txt'], 537 tests_included=True, host=host)) 538 results = json.loads( 539 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 540 test_results = results['tests']['failures']['expected']['text.html'] 541 self.assertEqual(results['flag_name'], '/composite-after-paint') 542 self.assertEqual(test_results['flag_expectations'], ['PASS']) 543 self.assertEqual(test_results['base_expectations'], ['FAIL', 'TIMEOUT']) 544 545 def test_flag_and_base_expectations_in_json_results(self): 546 host = MockHost() 547 port = host.port_factory.get('test-win-win7') 548 flag_exp_path = host.filesystem.join( 549 port.web_tests_dir(), 'FlagExpectations', 'composite-after-paint') 550 host.filesystem.write_text_file( 551 '/tmp/overrides.txt', '# results: [ Timeout ]\nfailures/expected/text.html [ Timeout ]') 552 host.filesystem.write_text_file( 553 flag_exp_path, 554 '# results: [ Crash Failure ]\nfailures/expected/text.html [ Crash Failure ]') 555 self.assertTrue(logging_run( 556 ['--order', 'natural', 'failures/expected/text.html', '--num-retries', '1', 557 '--additional-driver-flag', '--composite-after-paint', '--additional-expectations', 558 '/tmp/overrides.txt'], 559 tests_included=True, host=host)) 560 results = json.loads( 561 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 562 test_results = results['tests']['failures']['expected']['text.html'] 563 self.assertEqual(results['flag_name'], '/composite-after-paint') 564 self.assertEqual(test_results['flag_expectations'], ['FAIL', 'CRASH']) 565 self.assertEqual(test_results['base_expectations'], ['FAIL', 'TIMEOUT']) 566 567 def test_flag_and_default_base_expectations_in_json_results(self): 568 host = MockHost() 569 port = host.port_factory.get('test-win-win7') 570 flag_exp_path = host.filesystem.join( 571 port.web_tests_dir(), 'FlagExpectations', 'composite-after-paint') 572 host.filesystem.write_text_file( 573 flag_exp_path, '# results: [ Failure ]\npasses/args.html [ Failure ]') 574 self.assertTrue(logging_run( 575 ['--order', 'natural', 'passes/args.html', '--num-retries', '1', 576 '--additional-driver-flag', '--composite-after-paint'], 577 tests_included=True, host=host)) 578 results = json.loads( 579 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 580 test_results = results['tests']['passes']['args.html'] 581 self.assertEqual(results['flag_name'], '/composite-after-paint') 582 self.assertEqual(test_results['flag_expectations'], ['FAIL']) 583 self.assertEqual(test_results['base_expectations'], ['PASS']) 584 585 def test_stderr_is_saved(self): 586 host = MockHost() 587 self.assertTrue(passing_run(host=host)) 588 self.assertEqual( 589 host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'), 590 'stuff going to stderr') 591 results = json.loads( 592 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 593 test_results = results['tests']['passes']['error.html'] 594 self.assertEqual(test_results['artifacts']['stderr'], ['layout-test-results/passes/error-stderr.txt']) 595 596 def test_crash_log_is_saved(self): 597 host = MockHost() 598 self.assertTrue(logging_run( 599 ['--order', 'natural', 'failures/unexpected/crash.html', '--num-retries', '1'], 600 tests_included=True, host=host)) 601 results = json.loads( 602 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 603 self.assertEqual( 604 host.filesystem.read_text_file( 605 '/tmp/layout-test-results/failures/unexpected/crash-crash-log.txt'), 606 'crash log') 607 results = json.loads( 608 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 609 test_results = results['tests']['failures']['unexpected']['crash.html'] 610 self.assertEqual(test_results['artifacts']['crash_log'], [ 611 'layout-test-results/failures/unexpected/crash-crash-log.txt', 612 'layout-test-results/retry_1/failures/unexpected/crash-crash-log.txt']) 613 614 def test_crash_log_is_saved_after_delay(self): 615 host = MockHost() 616 self.assertTrue(logging_run( 617 ['--order', 'natural', 'failures/unexpected/crash-with-delayed-log.html', 618 '--num-retries', '1'], 619 tests_included=True, host=host)) 620 results = json.loads( 621 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 622 self.assertEqual( 623 host.filesystem.read_text_file( 624 '/tmp/layout-test-results/failures/unexpected/crash-with-delayed-log-crash-log.txt'), 625 'delayed crash log') 626 results = json.loads( 627 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 628 test_results = results['tests']['failures']['unexpected']['crash-with-delayed-log.html'] 629 self.assertEqual(test_results['artifacts']['crash_log'], [ 630 'layout-test-results/failures/unexpected/crash-with-delayed-log-crash-log.txt', 631 'layout-test-results/retry_1/failures/unexpected/crash-with-delayed-log-crash-log.txt']) 632 633 def test_reftest_mismatch_with_text_mismatch_only_writes_stderr_once(self): 634 # test that there is no exception when two failure types, FailureTextMismatch and 635 # FailureReftestMismatch both have the same stderr to print out. 636 host = MockHost() 637 self.assertTrue(logging_run( 638 ['--order', 'natural', 'failures/unexpected/reftest-mismatch-with-text-mismatch-with-stderr.html',], 639 tests_included=True, host=host)) 640 641 @unittest.skip('Need to make subprocesses use mock filesystem') 642 def test_crash_log_is_saved_after_delay_using_multiple_jobs(self): 643 # TODO(rmhasan): When web_test_runner.run() spawns multiple jobs it uses 644 # the non mock file system. We should figure out how to make all subprocesses 645 # use the mock file system. 646 host = MockHost() 647 self.assertTrue(logging_run( 648 ['--order', 'natural', 'failures/unexpected/crash-with-delayed-log.html', 649 'passes/args.html', '-j', '2'], 650 tests_included=True, host=host)) 651 results = json.loads( 652 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 653 test_results = results['tests']['failures']['unexpected']['crash-with-delayed-log.html'] 654 self.assertEqual(test_results['artifacts']['crash_log'], [ 655 'failures/unexpected/crash-with-delayed-log-crash-log.txt']) 656 657 def test_crash_sample_file_is_saved(self): 658 host = MockHost() 659 self.assertTrue(logging_run( 660 ['--order', 'natural', 'failures/unexpected/crash-with-sample.html', 661 '--num-retries', '1'], 662 tests_included=True, host=host)) 663 results = json.loads( 664 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 665 self.assertEqual( 666 host.filesystem.read_text_file( 667 '/tmp/layout-test-results/failures/unexpected/crash-with-sample-sample.txt'), 668 'crash sample file') 669 results = json.loads( 670 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 671 test_results = results['tests']['failures']['unexpected']['crash-with-sample.html'] 672 self.assertEqual(test_results['artifacts']['sample_file'], [ 673 'layout-test-results/failures/unexpected/crash-with-sample-sample.txt', 674 'layout-test-results/retry_1/failures/unexpected/crash-with-sample-sample.txt']) 675 676 @unittest.skip('Need to make subprocesses use mock filesystem') 677 def test_crash_sample_file_is_saved_multiple_jobs(self): 678 host = MockHost() 679 self.assertTrue(logging_run( 680 ['--order', 'natural', 'failures/unexpected/crash-with-sample.html', 681 'passes/image.html', '--num-retries', '1', '-j', '2'], 682 tests_included=True, host=host)) 683 results = json.loads( 684 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 685 results = json.loads( 686 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 687 test_results = results['tests']['failures']['unexpected']['crash-with-sample.html'] 688 self.assertEqual(test_results['artifacts']['sample_file'], [ 689 'layout-test-results/failures/unexpected/crash-with-sample-sample.txt', 690 'layout-test-results/retry_1/failures/unexpected/crash-with-sample-sample.txt']) 691 692 def test_reftest_crash_log_is_saved(self): 693 host = MockHost() 694 self.assertTrue(logging_run( 695 ['--order', 'natural', 'failures/unexpected/crash-reftest.html', '--num-retries', '1'], 696 tests_included=True, host=host)) 697 self.assertEqual( 698 host.filesystem.read_text_file( 699 '/tmp/layout-test-results/failures/unexpected/crash-reftest-crash-log.txt'), 700 'reftest crash log') 701 self.assertEqual( 702 host.filesystem.read_text_file( 703 '/tmp/layout-test-results/retry_1/failures/unexpected/crash-reftest-crash-log.txt'), 704 'reftest crash log') 705 results = json.loads( 706 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 707 test_results = results['tests']['failures']['unexpected']['crash-reftest.html'] 708 self.assertEqual(test_results['artifacts']['crash_log'], [ 709 'layout-test-results/failures/unexpected/crash-reftest-crash-log.txt', 710 'layout-test-results/retry_1/failures/unexpected/crash-reftest-crash-log.txt']) 711 712 def test_test_list(self): 713 host = MockHost() 714 filename = '/tmp/foo.txt' 715 host.filesystem.write_text_file(filename, 'passes/text.html') 716 tests_run = get_tests_run(['--test-list=%s' % filename], host=host) 717 self.assertEqual(['passes/text.html'], tests_run) 718 host.filesystem.remove(filename) 719 details, err, _ = logging_run(['--test-list=%s' % filename], tests_included=True, host=host) 720 self.assertEqual(details.exit_code, exit_codes.NO_TESTS_EXIT_STATUS) 721 self.assert_not_empty(err) 722 723 def test_test_list_with_prefix(self): 724 host = MockHost() 725 filename = '/tmp/foo.txt' 726 host.filesystem.write_text_file(filename, WEB_TESTS_LAST_COMPONENT + '/passes/text.html') 727 tests_run = get_tests_run(['--test-list=%s' % filename], host=host) 728 self.assertEqual(['passes/text.html'], tests_run) 729 730 def test_isolated_script_test_filter(self): 731 host = MockHost() 732 tests_run = get_tests_run( 733 ['--isolated-script-test-filter=passes/text.html::passes/image.html', 'passes/error.html'], 734 host=host 735 ) 736 self.assertEqual(sorted(tests_run), []) 737 738 tests_run = get_tests_run( 739 ['--isolated-script-test-filter=passes/error.html::passes/image.html', 'passes/error.html'], 740 host=host 741 ) 742 self.assertEqual(sorted(tests_run), ['passes/error.html']) 743 744 tests_run = get_tests_run( 745 ['--isolated-script-test-filter=-passes/error.html::passes/image.html'], 746 host=host 747 ) 748 self.assertEqual(sorted(tests_run), ['passes/image.html']) 749 750 tests_run = get_tests_run( 751 ['--isolated-script-test-filter=passes/error.html::passes/image.html', 752 '--isolated-script-test-filter=-passes/error.html'], 753 host=host 754 ) 755 self.assertEqual(sorted(tests_run), ['passes/image.html']) 756 757 def test_gtest_filter(self): 758 host = MockHost() 759 tests_run = get_tests_run(['--gtest_filter=passes/text.html:passes/image.html', 'passes/error.html'], host=host) 760 self.assertEqual(sorted(tests_run), ['passes/error.html', 'passes/image.html', 'passes/text.html']) 761 762 def test_sharding_even(self): 763 # Test that we actually select the right part 764 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html'] 765 766 with mock.patch('__builtin__.hash', len): 767 # Shard 0 of 2 768 tests_run = get_tests_run(['--shard-index', '0', '--total-shards', '2', '--order', 'natural'] + tests_to_run) 769 self.assertEqual(tests_run, ['passes/platform_image.html', 'passes/text.html']) 770 # Shard 1 of 2 771 tests_run = get_tests_run(['--shard-index', '1', '--total-shards', '2', '--order', 'natural'] + tests_to_run) 772 self.assertEqual(tests_run, ['passes/error.html', 'passes/image.html']) 773 774 def test_sharding_uneven(self): 775 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html', 776 'perf/foo/test.html'] 777 778 with mock.patch('__builtin__.hash', len): 779 # Shard 0 of 3 780 tests_run = get_tests_run(['--shard-index', '0', '--total-shards', '3', '--order', 'natural'] + tests_to_run) 781 self.assertEqual(tests_run, ['perf/foo/test.html']) 782 # Shard 1 of 3 783 tests_run = get_tests_run(['--shard-index', '1', '--total-shards', '3', '--order', 'natural'] + tests_to_run) 784 self.assertEqual(tests_run, ['passes/text.html']) 785 # Shard 2 of 3 786 tests_run = get_tests_run(['--shard-index', '2', '--total-shards', '3', '--order', 'natural'] + tests_to_run) 787 self.assertEqual(tests_run, ['passes/error.html', 'passes/image.html', 'passes/platform_image.html']) 788 789 def test_sharding_incorrect_arguments(self): 790 with self.assertRaises(ValueError): 791 get_tests_run(['--shard-index', '3']) 792 with self.assertRaises(ValueError): 793 get_tests_run(['--total-shards', '3']) 794 with self.assertRaises(ValueError): 795 get_tests_run(['--shard-index', '3', '--total-shards', '3']) 796 797 def test_sharding_environ(self): 798 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html'] 799 host = MockHost() 800 801 with mock.patch('__builtin__.hash', len): 802 host.environ['GTEST_SHARD_INDEX'] = '0' 803 host.environ['GTEST_TOTAL_SHARDS'] = '2' 804 shard_0_tests_run = get_tests_run(['--order', 'natural'] + tests_to_run, host=host) 805 self.assertEqual(shard_0_tests_run, ['passes/platform_image.html', 'passes/text.html']) 806 807 host.environ['GTEST_SHARD_INDEX'] = '1' 808 host.environ['GTEST_TOTAL_SHARDS'] = '2' 809 shard_1_tests_run = get_tests_run(['--order', 'natural'] + tests_to_run, host=host) 810 self.assertEqual(shard_1_tests_run, ['passes/error.html', 'passes/image.html']) 811 812 def test_smoke_test(self): 813 host = MockHost() 814 smoke_test_filename = test.WEB_TEST_DIR + '/SmokeTests' 815 host.filesystem.write_text_file(smoke_test_filename, 'passes/text.html\n') 816 817 # Test the default smoke testing. 818 tests_run = get_tests_run(['--smoke', '--order', 'natural'], host=host) 819 self.assertEqual(['passes/text.html'], tests_run) 820 821 # Test running the smoke tests plus some manually-specified tests. 822 tests_run = get_tests_run(['--smoke', 'passes/image.html', '--order', 'natural'], host=host) 823 self.assertEqual(['passes/image.html', 'passes/text.html'], tests_run) 824 825 # Test running the smoke tests plus some manually-specified tests. 826 tests_run = get_tests_run(['--no-smoke', 'passes/image.html', '--order', 'natural'], host=host) 827 self.assertEqual(['passes/image.html'], tests_run) 828 829 # Test that we don't run just the smoke tests by default on a normal test port. 830 tests_run = get_tests_run(['--order', 'natural'], host=host) 831 self.assertNotEqual(['passes/text.html'], tests_run) 832 833 # Create a port that does run only the smoke tests by default, and verify that works as expected. 834 port_obj = host.port_factory.get('test') 835 port_obj.default_smoke_test_only = lambda: True 836 tests_run = get_tests_run(['--order', 'natural'], host=host, port_obj=port_obj) 837 self.assertEqual(['passes/text.html'], tests_run) 838 839 # Verify that --no-smoke continues to work on a smoke-by-default port. 840 tests_run = get_tests_run(['--no-smoke', 'passes/image.html', '--order', 'natural'], 841 host=host, port_obj=port_obj) 842 self.assertNotIn('passes/text.html', tests_run) 843 844 def test_smoke_test_default_retry(self): 845 host = MockHost() 846 smoke_test_filename = test.WEB_TEST_DIR + '/SmokeTests' 847 host.filesystem.write_text_file( 848 smoke_test_filename, 'failures/unexpected/text-image-checksum.html\n') 849 850 # Retry if additional tests are given. 851 _, err, __ = logging_run(['--smoke', 'passes/image.html'], host=host, tests_included=True) 852 self.assertIn('Retrying', err.getvalue()) 853 854 def test_missing_and_unexpected_results(self): 855 # Test that we update expectations in place. If the expectation 856 # is missing, update the expected generic location. 857 host = MockHost() 858 details, _, _ = logging_run(['--no-show-results', 859 'failures/unexpected/missing_text.html', 860 'failures/unexpected/text-image-checksum.html'], 861 tests_included=True, host=host) 862 self.assertEqual(details.exit_code, 2) 863 results = json.loads(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 864 results['tests']['failures']['unexpected']['text-image-checksum.html'].pop('artifacts') 865 self.assertEqual( 866 results['tests']['failures']['unexpected']['text-image-checksum.html'], 867 { 868 'expected': 'PASS', 869 'actual': 'FAIL', 870 'is_unexpected': True, 871 'is_regression': True, 872 'text_mismatch': 'general text mismatch', 873 }) 874 results['tests']['failures']['unexpected']['missing_text.html'].pop('artifacts') 875 self.assertEqual( 876 results['tests']['failures']['unexpected']['missing_text.html'], 877 { 878 'expected': 'PASS', 879 'actual': 'FAIL', 880 'is_unexpected': True, 881 'is_regression': True, 882 'is_missing_text': True, 883 }) 884 self.assertEqual(results['num_regressions'], 2) 885 self.assertEqual(results['num_flaky'], 0) 886 887 def test_different_failure_on_retry(self): 888 # This tests that if a test fails two different ways -- both unexpected 889 # -- we treat it as a failure rather than a flaky result. We use the 890 # initial failure for simplicity and consistency w/ the flakiness 891 # dashboard, even if the second failure is worse. 892 893 details, _, _ = logging_run(['--num-retries=3', 'failures/unexpected/text_then_crash.html'], tests_included=True) 894 self.assertEqual(details.exit_code, 1) 895 self.assertEqual(details.summarized_failing_results['tests']['failures']['unexpected']['text_then_crash.html']['actual'], 896 'FAIL CRASH CRASH CRASH') 897 898 # If we get a test that fails two different ways -- but the second one is expected -- 899 # we should treat it as a flaky result and report the initial unexpected failure type 900 # to the dashboard. However, the test should be considered passing. 901 details, _, _ = logging_run(['--num-retries=3', 'failures/expected/crash_then_text.html'], tests_included=True) 902 self.assertEqual(details.exit_code, 0) 903 self.assertEqual(details.summarized_failing_results['tests']['failures']['expected']['crash_then_text.html']['actual'], 904 'CRASH FAIL') 905 906 def test_watch(self): 907 host = MockHost() 908 host.user.set_canned_responses(['r', 'r', 'q']) 909 _, output, _ = logging_run(['--watch', 'failures/unexpected/text.html'], tests_included=True, host=host) 910 output_string = output.getvalue() 911 self.assertIn( 912 'Link to pretty diff:\nfile:///tmp/layout-test-results/failures/unexpected/text-pretty-diff.html', output_string) 913 self.assertEqual(output_string.count('[1/1] failures/unexpected/text.html failed unexpectedly (text diff)'), 3) 914 915 def test_crash_with_stderr(self): 916 host = MockHost() 917 logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host) 918 full_results = json.loads(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 919 self.assertEqual(full_results['tests']['failures']['unexpected']['crash-with-stderr.html']['has_stderr'], True) 920 921 def test_no_image_failure_with_image_diff(self): 922 host = MockHost() 923 logging_run( 924 ['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host) 925 self.assertTrue(host.filesystem.read_text_file( 926 '/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1) 927 928 def test_exit_after_n_failures_upload(self): 929 host = MockHost() 930 details, regular_output, _ = logging_run( 931 ['failures/unexpected/text-image-checksum.html', 'passes/text.html', 932 '--exit-after-n-failures', '1', '--order', 'natural'], 933 tests_included=True, host=host) 934 935 # By returning False, we know that the incremental results were generated and then deleted. 936 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json')) 937 938 self.assertEqual(details.exit_code, exit_codes.EARLY_EXIT_STATUS) 939 940 # This checks that passes/text.html is considered Skip-ped. 941 self.assertIn('"skipped":1', host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 942 943 # This checks that we told the user we bailed out. 944 self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue()) 945 946 # This checks that neither test ran as expected. 947 # FIXME: This log message is confusing; tests that were skipped should be called out separately. 948 self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue()) 949 950 def test_exit_after_n_failures(self): 951 # Unexpected failures should result in tests stopping. 952 tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 953 'passes/text.html', '--exit-after-n-failures', '1', 954 '--order', 'natural']) 955 self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run) 956 957 # But we'll keep going for expected ones. 958 tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', 959 '--exit-after-n-failures', '1', 960 '--order', 'natural']) 961 self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run) 962 963 def test_exit_after_n_crashes(self): 964 # Unexpected crashes should result in tests stopping. 965 tests_run = get_tests_run(['--order', 'natural', 'failures/unexpected/crash.html', 966 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1']) 967 self.assertEqual(['failures/unexpected/crash.html'], tests_run) 968 969 # Same with timeouts. 970 tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', 971 '--exit-after-n-crashes-or-timeouts', '1', 972 '--order', 'natural']) 973 self.assertEqual(['failures/unexpected/timeout.html'], tests_run) 974 975 # But we'll keep going for expected ones. 976 tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', 977 '--exit-after-n-crashes-or-timeouts', '1', 978 '--order', 'natural']) 979 self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run) 980 981 def test_results_directory_absolute(self): 982 # We run a configuration that should fail, to generate output, then 983 # look for what the output results url was. 984 985 host = MockHost() 986 with host.filesystem.mkdtemp() as tmpdir: 987 _, _, user = logging_run(['--results-directory=' + str(tmpdir), '--order', 'natural'], 988 tests_included=True, host=host) 989 self.assertEqual(user.opened_urls, [abspath_to_uri(host.platform, host.filesystem.join( 990 tmpdir, 'layout-test-results', 'results.html'))]) 991 992 def test_results_directory_default(self): 993 # We run a configuration that should fail, to generate output, then 994 # look for what the output results url was. 995 996 # This is the default location. 997 _, _, user = logging_run(tests_included=True) 998 self.assertEqual(user.opened_urls, [abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')]) 999 1000 def test_results_directory_relative(self): 1001 # We run a configuration that should fail, to generate output, then 1002 # look for what the output results url was. 1003 host = MockHost() 1004 host.filesystem.maybe_make_directory('/tmp/cwd') 1005 host.filesystem.chdir('/tmp/cwd') 1006 _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host) 1007 self.assertEqual(user.opened_urls, [abspath_to_uri(host.platform, '/tmp/cwd/foo/layout-test-results/results.html')]) 1008 1009 def test_retrying_default_value(self): 1010 # Do not retry when the test list is explicit. 1011 host = MockHost() 1012 details, err, _ = logging_run(['failures/unexpected/text-image-checksum.html'], tests_included=True, host=host) 1013 self.assertEqual(details.exit_code, 1) 1014 self.assertNotIn('Retrying', err.getvalue()) 1015 1016 # Retry 3 times by default when the test list is not explicit. 1017 host = MockHost() 1018 details, err, _ = logging_run(['failures/unexpected'], tests_included=True, host=host) 1019 self.assertEqual(details.exit_code, test.UNEXPECTED_NON_VIRTUAL_FAILURES) 1020 self.assertIn('Retrying', err.getvalue()) 1021 self.assertTrue( 1022 host.filesystem.exists('/tmp/layout-test-results/retry_1/failures/unexpected/text-image-checksum-actual.txt')) 1023 self.assertTrue( 1024 host.filesystem.exists('/tmp/layout-test-results/retry_2/failures/unexpected/text-image-checksum-actual.txt')) 1025 self.assertTrue( 1026 host.filesystem.exists('/tmp/layout-test-results/retry_3/failures/unexpected/text-image-checksum-actual.txt')) 1027 1028 def test_retrying_default_value_test_list(self): 1029 host = MockHost() 1030 filename = '/tmp/foo.txt' 1031 host.filesystem.write_text_file(filename, 'failures/unexpected/text-image-checksum.html\nfailures/unexpected/crash.html') 1032 details, err, _ = logging_run(['--test-list=%s' % filename, '--order', 'natural'], 1033 tests_included=True, host=host) 1034 self.assertEqual(details.exit_code, 2) 1035 self.assertIn('Retrying', err.getvalue()) 1036 1037 host = MockHost() 1038 filename = '/tmp/foo.txt' 1039 host.filesystem.write_text_file(filename, 'failures') 1040 details, err, _ = logging_run(['--test-list=%s' % filename], tests_included=True, host=host) 1041 self.assertEqual(details.exit_code, test.UNEXPECTED_NON_VIRTUAL_FAILURES) 1042 self.assertIn('Retrying', err.getvalue()) 1043 1044 def test_retrying_and_flaky_tests(self): 1045 host = MockHost() 1046 details, err, _ = logging_run(['--num-retries=3', 'failures/flaky'], tests_included=True, host=host) 1047 self.assertEqual(details.exit_code, 0) 1048 self.assertIn('Retrying', err.getvalue()) 1049 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt')) 1050 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retry_1/failures/flaky/text-actual.txt')) 1051 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retry_2/failures/flaky/text-actual.txt')) 1052 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retry_3/failures/flaky/text-actual.txt')) 1053 1054 def test_retrying_crashed_tests(self): 1055 host = MockHost() 1056 details, err, _ = logging_run(['--num-retries=3', 'failures/unexpected/crash.html'], tests_included=True, host=host) 1057 self.assertEqual(details.exit_code, 1) 1058 self.assertIn('Retrying', err.getvalue()) 1059 1060 def test_retrying_leak_tests(self): 1061 host = MockHost() 1062 details, err, _ = logging_run( 1063 ['--num-retries=1', 'failures/unexpected/leak.html'], 1064 tests_included=True, host=host) 1065 self.assertEqual(details.exit_code, 1) 1066 self.assertIn('Retrying', err.getvalue()) 1067 self.assertEqual(host.filesystem.read_text_file( 1068 '/tmp/layout-test-results/failures/unexpected/leak-leak-log.txt'), 1069 'leak detected') 1070 results = json.loads( 1071 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1072 test_results = results['tests']['failures']['unexpected']['leak.html'] 1073 self.assertEqual(test_results['artifacts']['leak_log'], [ 1074 'layout-test-results/failures/unexpected/leak-leak-log.txt', 1075 'layout-test-results/retry_1/failures/unexpected/leak-leak-log.txt']) 1076 1077 def test_unexpected_text_mismatch(self): 1078 host = MockHost() 1079 details, _, _ = logging_run( 1080 ['--num-retries=1', 'failures/unexpected/text-mismatch-overlay.html'], 1081 tests_included=True, host=host) 1082 self.assertEqual(details.exit_code, 1) 1083 results = json.loads( 1084 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1085 test_results = results['tests']['failures']['unexpected']['text-mismatch-overlay.html'] 1086 self.assertEqual(test_results['artifacts']['actual_text'], [ 1087 'layout-test-results/failures/unexpected/text-mismatch-overlay-actual.txt', 1088 'layout-test-results/retry_1/failures/unexpected/text-mismatch-overlay-actual.txt']) 1089 self.assertEqual(test_results['artifacts']['expected_text'], [ 1090 'layout-test-results/failures/unexpected/text-mismatch-overlay-expected.txt', 1091 'layout-test-results/retry_1/failures/unexpected/text-mismatch-overlay-expected.txt']) 1092 self.assertEqual(test_results['artifacts']['text_diff'], [ 1093 'layout-test-results/failures/unexpected/text-mismatch-overlay-diff.txt', 1094 'layout-test-results/retry_1/failures/unexpected/text-mismatch-overlay-diff.txt']) 1095 self.assertEqual(test_results['artifacts']['pretty_text_diff'], [ 1096 'layout-test-results/failures/unexpected/text-mismatch-overlay-pretty-diff.html', 1097 'layout-test-results/retry_1/failures/unexpected/text-mismatch-overlay-pretty-diff.html']) 1098 self.assertEqual(test_results['artifacts']['overlay'], [ 1099 'layout-test-results/failures/unexpected/text-mismatch-overlay-overlay.html', 1100 'layout-test-results/retry_1/failures/unexpected/text-mismatch-overlay-overlay.html']) 1101 1102 def test_unexpected_no_text_baseline(self): 1103 host = MockHost() 1104 details, _, _ = logging_run( 1105 ['--num-retries=1', 'failures/unexpected/no-text-baseline.html'], 1106 tests_included=True, host=host) 1107 self.assertEqual(details.exit_code, 1) 1108 results = json.loads( 1109 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1110 test_results = results['tests']['failures']['unexpected']['no-text-baseline.html'] 1111 self.assertEqual(test_results['artifacts']['actual_text'], [ 1112 'layout-test-results/failures/unexpected/no-text-baseline-actual.txt', 1113 'layout-test-results/retry_1/failures/unexpected/no-text-baseline-actual.txt']) 1114 self.assertNotIn('expected_text', test_results['artifacts']) 1115 self.assertEqual(test_results['artifacts']['text_diff'], [ 1116 'layout-test-results/failures/unexpected/no-text-baseline-diff.txt', 1117 'layout-test-results/retry_1/failures/unexpected/no-text-baseline-diff.txt']) 1118 self.assertEqual(test_results['artifacts']['pretty_text_diff'], [ 1119 'layout-test-results/failures/unexpected/no-text-baseline-pretty-diff.html', 1120 'layout-test-results/retry_1/failures/unexpected/no-text-baseline-pretty-diff.html']) 1121 self.assertNotIn('overlay', test_results['artifacts']) 1122 1123 def test_unexpected_no_text_generated(self): 1124 host = MockHost() 1125 details, _, _ = logging_run( 1126 ['--num-retries=1', 'failures/unexpected/no-text-generated.html'], 1127 tests_included=True, host=host) 1128 self.assertEqual(details.exit_code, 1) 1129 results = json.loads( 1130 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1131 test_results = results['tests']['failures']['unexpected']['no-text-generated.html'] 1132 self.assertEqual(test_results['artifacts']['expected_text'], [ 1133 'layout-test-results/failures/unexpected/no-text-generated-expected.txt', 1134 'layout-test-results/retry_1/failures/unexpected/no-text-generated-expected.txt']) 1135 self.assertNotIn('actual_text', test_results['artifacts']) 1136 self.assertEqual(test_results['artifacts']['text_diff'], [ 1137 'layout-test-results/failures/unexpected/no-text-generated-diff.txt', 1138 'layout-test-results/retry_1/failures/unexpected/no-text-generated-diff.txt']) 1139 self.assertEqual(test_results['artifacts']['pretty_text_diff'], [ 1140 'layout-test-results/failures/unexpected/no-text-generated-pretty-diff.html', 1141 'layout-test-results/retry_1/failures/unexpected/no-text-generated-pretty-diff.html']) 1142 self.assertNotIn('overlay', test_results['artifacts']) 1143 1144 def test_reftest_mismatching_image(self): 1145 host = MockHost() 1146 details, _, _ = logging_run( 1147 ['--num-retries=1', 'failures/unexpected/reftest.html'], 1148 tests_included=True, host=host) 1149 self.assertEqual(details.exit_code, 1) 1150 results = json.loads( 1151 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1152 test_results = results['tests']['failures']['unexpected']['reftest.html'] 1153 self.assertEqual(test_results['artifacts']['actual_image'], [ 1154 'layout-test-results/failures/unexpected/reftest-actual.png', 1155 'layout-test-results/retry_1/failures/unexpected/reftest-actual.png']) 1156 self.assertEqual(test_results['artifacts']['expected_image'], [ 1157 'layout-test-results/failures/unexpected/reftest-expected.png', 1158 'layout-test-results/retry_1/failures/unexpected/reftest-expected.png']) 1159 self.assertEqual(test_results['artifacts']['image_diff'], [ 1160 'layout-test-results/failures/unexpected/reftest-diff.png', 1161 'layout-test-results/retry_1/failures/unexpected/reftest-diff.png']) 1162 self.assertEqual(test_results['artifacts']['pretty_image_diff'], [ 1163 'layout-test-results/failures/unexpected/reftest-diffs.html', 1164 'layout-test-results/retry_1/failures/unexpected/reftest-diffs.html']) 1165 self.assertEqual(test_results['artifacts']['reference_file_mismatch'], [ 1166 'layout-test-results/failures/unexpected/reftest-expected.html', 1167 'layout-test-results/retry_1/failures/unexpected/reftest-expected.html']) 1168 1169 def test_reftest_failure_matching_image(self): 1170 host = MockHost() 1171 details, _, _ = logging_run( 1172 ['failures/unexpected/mismatch.html'], 1173 tests_included=True, host=host) 1174 self.assertEqual(details.exit_code, 1) 1175 results = json.loads( 1176 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1177 test_results = results['tests']['failures']['unexpected']['mismatch.html'] 1178 self.assertIn('reference_file_match', test_results['artifacts']) 1179 self.assertEqual(test_results['artifacts']['reference_file_match'], 1180 ['layout-test-results/failures/unexpected/mismatch-expected-mismatch.html']) 1181 1182 def test_unexpected_image_mismatch(self): 1183 host = MockHost() 1184 details, _, _ = logging_run( 1185 ['--num-retries=1', 'failures/unexpected/image-mismatch.html'], 1186 tests_included=True, host=host) 1187 self.assertEqual(details.exit_code, 1) 1188 results = json.loads( 1189 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1190 test_results = results['tests']['failures']['unexpected']['image-mismatch.html'] 1191 self.assertEqual(test_results['artifacts']['actual_image'], [ 1192 'layout-test-results/failures/unexpected/image-mismatch-actual.png', 1193 'layout-test-results/retry_1/failures/unexpected/image-mismatch-actual.png']) 1194 self.assertEqual(test_results['artifacts']['expected_image'], [ 1195 'layout-test-results/failures/unexpected/image-mismatch-expected.png', 1196 'layout-test-results/retry_1/failures/unexpected/image-mismatch-expected.png']) 1197 self.assertEqual(test_results['artifacts']['image_diff'], [ 1198 'layout-test-results/failures/unexpected/image-mismatch-diff.png', 1199 'layout-test-results/retry_1/failures/unexpected/image-mismatch-diff.png']) 1200 self.assertEqual(test_results['artifacts']['pretty_image_diff'], [ 1201 'layout-test-results/failures/unexpected/image-mismatch-diffs.html', 1202 'layout-test-results/retry_1/failures/unexpected/image-mismatch-diffs.html']) 1203 1204 def test_unexpected_no_image_generated(self): 1205 host = MockHost() 1206 details, _, _ = logging_run( 1207 ['--num-retries=1', 'failures/unexpected/no-image-generated.html'], 1208 tests_included=True, host=host) 1209 self.assertEqual(details.exit_code, 1) 1210 results = json.loads( 1211 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1212 test_results = results['tests']['failures']['unexpected']['no-image-generated.html'] 1213 self.assertNotIn('actual_image', test_results['artifacts']) 1214 self.assertEqual(test_results['artifacts']['expected_image'], [ 1215 'layout-test-results/failures/unexpected/no-image-generated-expected.png', 1216 'layout-test-results/retry_1/failures/unexpected/no-image-generated-expected.png']) 1217 self.assertNotIn('image_diff', test_results['artifacts']) 1218 self.assertNotIn('pretty_image_diff', test_results['artifacts']) 1219 1220 def test_unexpected_no_image_baseline(self): 1221 host = MockHost() 1222 details, _, _ = logging_run( 1223 ['--num-retries=1', 'failures/unexpected/no-image-baseline.html'], 1224 tests_included=True, host=host) 1225 self.assertEqual(details.exit_code, 1) 1226 results = json.loads( 1227 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1228 test_results = results['tests']['failures']['unexpected']['no-image-baseline.html'] 1229 self.assertNotIn('expected_image', test_results['artifacts']) 1230 self.assertEqual(test_results['artifacts']['actual_image'], [ 1231 'layout-test-results/failures/unexpected/no-image-baseline-actual.png', 1232 'layout-test-results/retry_1/failures/unexpected/no-image-baseline-actual.png']) 1233 self.assertNotIn('image_diff', test_results['artifacts']) 1234 self.assertNotIn('pretty_image_diff', test_results['artifacts']) 1235 1236 def test_unexpected_audio_mismatch(self): 1237 host = MockHost() 1238 details, _, _ = logging_run( 1239 ['--num-retries=1', 'failures/unexpected/audio-mismatch.html'], 1240 tests_included=True, host=host) 1241 self.assertEqual(details.exit_code, 1) 1242 results = json.loads( 1243 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1244 test_results = results['tests']['failures']['unexpected']['audio-mismatch.html'] 1245 self.assertEqual(test_results['artifacts']['actual_audio'], [ 1246 'layout-test-results/failures/unexpected/audio-mismatch-actual.wav', 1247 'layout-test-results/retry_1/failures/unexpected/audio-mismatch-actual.wav']) 1248 self.assertEqual(test_results['artifacts']['expected_audio'], [ 1249 'layout-test-results/failures/unexpected/audio-mismatch-expected.wav', 1250 'layout-test-results/retry_1/failures/unexpected/audio-mismatch-expected.wav']) 1251 1252 def test_unexpected_audio_missing_baseline(self): 1253 host = MockHost() 1254 details, _, _ = logging_run( 1255 ['--num-retries=1', 'failures/unexpected/no-audio-baseline.html'], 1256 tests_included=True, host=host) 1257 self.assertEqual(details.exit_code, 1) 1258 results = json.loads( 1259 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1260 test_results = results['tests']['failures']['unexpected']['no-audio-baseline.html'] 1261 self.assertEqual(test_results['artifacts']['actual_audio'], [ 1262 'layout-test-results/failures/unexpected/no-audio-baseline-actual.wav', 1263 'layout-test-results/retry_1/failures/unexpected/no-audio-baseline-actual.wav']) 1264 1265 def test_unexpected_no_audio_generated(self): 1266 host = MockHost() 1267 details, _, _ = logging_run( 1268 ['--num-retries=1', 'failures/unexpected/no-audio-generated.html'], 1269 tests_included=True, host=host) 1270 self.assertEqual(details.exit_code, 1) 1271 results = json.loads( 1272 host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')) 1273 test_results = results['tests']['failures']['unexpected']['no-audio-generated.html'] 1274 self.assertEqual(test_results['artifacts']['expected_audio'], [ 1275 'layout-test-results/failures/unexpected/no-audio-generated-expected.wav', 1276 'layout-test-results/retry_1/failures/unexpected/no-audio-generated-expected.wav']) 1277 1278 def test_retrying_uses_retry_directories(self): 1279 host = MockHost() 1280 details, _, _ = logging_run(['--num-retries=3', 'failures/unexpected/text-image-checksum.html'], 1281 tests_included=True, host=host) 1282 self.assertEqual(details.exit_code, 1) 1283 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt')) 1284 self.assertTrue( 1285 host.filesystem.exists('/tmp/layout-test-results/retry_1/failures/unexpected/text-image-checksum-actual.txt')) 1286 self.assertTrue( 1287 host.filesystem.exists('/tmp/layout-test-results/retry_2/failures/unexpected/text-image-checksum-actual.txt')) 1288 self.assertTrue( 1289 host.filesystem.exists('/tmp/layout-test-results/retry_3/failures/unexpected/text-image-checksum-actual.txt')) 1290 1291 def test_retrying_alias_flag(self): 1292 host = MockHost() 1293 _, err, __ = logging_run(['--test-launcher-retry-limit=3', 'failures/unexpected/crash.html'], 1294 tests_included=True, host=host) 1295 self.assertIn('Retrying', err.getvalue()) 1296 1297 def test_clobber_old_results(self): 1298 host = MockHost() 1299 details, _, _ = logging_run(['--num-retries=3', 'failures/unexpected/text-image-checksum.html'], 1300 tests_included=True, host=host) 1301 # See tests above for what files exist at this point. 1302 1303 # Now we test that --clobber-old-results does remove the old retries. 1304 details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 1305 'failures/unexpected/text-image-checksum.html'], 1306 tests_included=True, host=host) 1307 self.assertEqual(details.exit_code, 1) 1308 self.assertTrue('Clobbering old results' in err.getvalue()) 1309 self.assertIn('failures/unexpected/text-image-checksum.html', err.getvalue()) 1310 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt')) 1311 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retry_1')) 1312 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retry_2')) 1313 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retry_3')) 1314 1315 def test_run_order__inline(self): 1316 # These next tests test that we run the tests in ascending alphabetical 1317 # order per directory. HTTP tests are sharded separately from other tests, 1318 # so we have to test both. 1319 tests_run = get_tests_run(['--order', 'natural', '-i', 'passes/virtual_passes', 'passes']) 1320 self.assertEqual(tests_run, sorted(tests_run)) 1321 1322 tests_run = get_tests_run(['--order', 'natural', 'http/tests/passes']) 1323 self.assertEqual(tests_run, sorted(tests_run)) 1324 1325 def test_virtual(self): 1326 self.assertTrue(passing_run(['--order', 'natural', 'passes/text.html', 'passes/args.html', 1327 'virtual/passes/text.html', 'virtual/passes/args.html'])) 1328 1329 def test_virtual_warns_when_wildcard_used(self): 1330 virtual_test_warning_msg = ('WARNING: Wildcards in paths are not supported for ' 1331 'virtual test suites.') 1332 1333 run_details, err, _ = logging_run(['passes/args.html', 'virtual/passes/'], 1334 tests_included=True) 1335 self.assertEqual(len(run_details.summarized_full_results['tests']['passes'].keys()), 1) 1336 self.assertFalse(virtual_test_warning_msg in err.getvalue()) 1337 1338 run_details, err, _ = logging_run(['passes/args.html', 'virtual/passes/*'], 1339 tests_included=True) 1340 self.assertEqual(len(run_details.summarized_full_results['tests']['passes'].keys()), 1) 1341 self.assertTrue(virtual_test_warning_msg in err.getvalue()) 1342 1343 def test_reftest_run(self): 1344 tests_run = get_tests_run(['passes/reftest.html']) 1345 self.assertEqual(['passes/reftest.html'], tests_run) 1346 1347 def test_reftest_expected_html_should_be_ignored(self): 1348 tests_run = get_tests_run(['passes/reftest-expected.html']) 1349 self.assertEqual([], tests_run) 1350 1351 def test_reftest_driver_should_run_expected_html(self): 1352 tests_run = get_test_results(['passes/reftest.html']) 1353 self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html']) 1354 1355 def test_reftest_driver_should_run_expected_mismatch_html(self): 1356 tests_run = get_test_results(['passes/mismatch.html']) 1357 self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html']) 1358 1359 def test_reftest_crash(self): 1360 test_results = get_test_results(['failures/unexpected/crash-reftest.html']) 1361 # The list of references should be empty since the test crashed and we didn't run any references. 1362 self.assertEqual(test_results[0].references, []) 1363 1364 def test_reftest_with_virtual_reference(self): 1365 _, err, _ = logging_run(['--details', 'virtual/virtual_passes/passes/reftest.html'], tests_included=True) 1366 self.assertTrue('ref: virtual/virtual_passes/passes/reftest-expected.html' in err.getvalue()) 1367 self.assertTrue(re.search(r'args: --virtual-arg\s*ref:', err.getvalue())) 1368 1369 def test_reftest_matching_text_expectation(self): 1370 test_name = 'passes/reftest-with-text.html' 1371 host = MockHost() 1372 run_details, _, _ = logging_run([test_name], tests_included=True, host=host) 1373 self.assertEqual(run_details.exit_code, 0) 1374 self.assertEqual(run_details.initial_results.total, 1) 1375 test_result = run_details.initial_results.all_results[0] 1376 self.assertEqual(test_result.test_name, test_name) 1377 self.assertEqual(len(test_result.failures), 0) 1378 1379 def test_reftest_mismatching_text_expectation(self): 1380 test_name = 'failures/unexpected/reftest-with-mismatching-text.html' 1381 host = MockHost() 1382 run_details, _, _ = logging_run([test_name], tests_included=True, host=host) 1383 self.assertNotEqual(run_details.exit_code, 0) 1384 self.assertEqual(run_details.initial_results.total, 1) 1385 test_result = run_details.initial_results.all_results[0] 1386 self.assertEqual(test_result.test_name, test_name) 1387 self.assertEqual(len(test_result.failures), 1) 1388 self.assertEqual(test_result.type, ResultType.Failure) 1389 1390 def test_reftest_mismatching_pixel_matching_text(self): 1391 test_name = 'failures/unexpected/reftest-with-matching-text.html' 1392 host = MockHost() 1393 run_details, _, _ = logging_run([test_name], tests_included=True, host=host) 1394 self.assertNotEqual(run_details.exit_code, 0) 1395 self.assertEqual(run_details.initial_results.total, 1) 1396 test_result = run_details.initial_results.all_results[0] 1397 self.assertEqual(test_result.test_name, test_name) 1398 self.assertEqual(len(test_result.failures), 1) 1399 self.assertEqual(test_result.type, ResultType.Failure) 1400 1401 def test_reftest_mismatching_both_text_and_pixel(self): 1402 test_name = 'failures/unexpected/reftest.html' 1403 host = MockHost() 1404 host.filesystem.write_text_file(test.WEB_TEST_DIR + '/failures/unexpected/reftest-expected.txt', 'mismatch') 1405 run_details, _, _ = logging_run([test_name], tests_included=True, host=host) 1406 self.assertNotEqual(run_details.exit_code, 0) 1407 self.assertEqual(run_details.initial_results.total, 1) 1408 test_result = run_details.initial_results.all_results[0] 1409 self.assertEqual(test_result.test_name, test_name) 1410 self.assertEqual(len(test_result.failures), 2) 1411 self.assertEqual(test_result.type, ResultType.Failure) 1412 1413 def test_extra_baselines(self): 1414 host = MockHost() 1415 extra_txt = test.WEB_TEST_DIR + '/passes/image-expected.txt' 1416 host.filesystem.write_text_file(extra_txt, 'Extra txt') 1417 extra_wav = test.WEB_TEST_DIR + '/passes/image-expected.wav' 1418 host.filesystem.write_text_file(extra_wav, 'Extra wav') 1419 test_name = 'passes/image.html' 1420 run_details, log_stream, _ = logging_run([test_name], tests_included=True, host=host) 1421 self.assertNotEqual(run_details.exit_code, 0) 1422 self.assertEqual(run_details.initial_results.total, 1) 1423 test_result = run_details.initial_results.all_results[0] 1424 self.assertEqual(test_result.test_name, test_name) 1425 self.assertEqual(len(test_result.failures), 2) 1426 self.assertTrue(test_failures.has_failure_type(test_failures.FailureTextNotGenerated, test_result.failures)) 1427 self.assertTrue(test_failures.has_failure_type(test_failures.FailureAudioNotGenerated, test_result.failures)) 1428 self.assert_contains(log_stream, 'Please remove %s' % extra_txt) 1429 self.assert_contains(log_stream, 'Please remove %s' % extra_wav) 1430 1431 def test_empty_overriding_baselines(self): 1432 host = MockHost() 1433 base_baseline = test.WEB_TEST_DIR + '/passes/image-expected.txt' 1434 host.filesystem.write_text_file(base_baseline, 'Non-empty') 1435 platform_baseline = test.WEB_TEST_DIR + '/platform/test-mac-mac10.10/passes/image-expected.txt' 1436 host.filesystem.write_text_file(platform_baseline, '') 1437 test_name = 'passes/image.html' 1438 run_details, log_stream, _ = logging_run([test_name], tests_included=True, host=host) 1439 self.assertEqual(run_details.exit_code, 0) 1440 self.assertEqual(run_details.initial_results.total, 1) 1441 test_result = run_details.initial_results.all_results[0] 1442 self.assertEqual(test_result.test_name, test_name) 1443 self.assertEqual(len(test_result.failures), 0) 1444 self.assertNotIn('Please remove', log_stream) 1445 1446 def test_reftest_extra_baselines(self): 1447 host = MockHost() 1448 extra_png = test.WEB_TEST_DIR + '/passes/reftest-expected.png' 1449 host.filesystem.write_text_file(extra_png, 'Extra png') 1450 extra_wav = test.WEB_TEST_DIR + '/passes/reftest-expected.wav' 1451 host.filesystem.write_text_file(extra_wav, 'Extra wav') 1452 test_name = 'passes/reftest.html' 1453 run_details, log_stream, _ = logging_run([test_name], tests_included=True, host=host) 1454 self.assertNotEqual(run_details.exit_code, 0) 1455 self.assertEqual(run_details.initial_results.total, 1) 1456 test_result = run_details.initial_results.all_results[0] 1457 self.assertEqual(test_result.test_name, test_name) 1458 self.assertEqual(len(test_result.failures), 1) 1459 self.assertTrue(test_failures.has_failure_type(test_failures.FailureAudioNotGenerated, test_result.failures)) 1460 # For now extra png baseline is only reported in an error message. 1461 self.assert_contains(log_stream, 'Please remove %s' % extra_png) 1462 self.assert_contains(log_stream, 'Please remove %s' % extra_wav) 1463 1464 def test_reftest_with_text_extra_baselines(self): 1465 host = MockHost() 1466 extra_png = test.WEB_TEST_DIR + '/passes/reftest-with-text-expected.png' 1467 host.filesystem.write_text_file(extra_png, 'Extra png') 1468 extra_wav = test.WEB_TEST_DIR + '/passes/reftest-with-text-expected.wav' 1469 host.filesystem.write_text_file(extra_wav, 'Extra wav') 1470 test_name = 'passes/reftest-with-text.html' 1471 run_details, log_stream, _ = logging_run([test_name], tests_included=True, host=host) 1472 self.assertNotEqual(run_details.exit_code, 0) 1473 self.assertEqual(run_details.initial_results.total, 1) 1474 test_result = run_details.initial_results.all_results[0] 1475 self.assertEqual(test_result.test_name, test_name) 1476 self.assertEqual(len(test_result.failures), 1) 1477 self.assertTrue(test_failures.has_failure_type(test_failures.FailureAudioNotGenerated, test_result.failures)) 1478 # For now extra png baseline is only reported in an error message. 1479 self.assert_contains(log_stream, 'Please remove %s' % extra_png) 1480 self.assert_contains(log_stream, 'Please remove %s' % extra_wav) 1481 1482 def test_reftest_extra_png_baseline(self): 1483 host = MockHost() 1484 extra_png = test.WEB_TEST_DIR + '/passes/reftest-expected.png' 1485 host.filesystem.write_text_file(extra_png, 'Extra png') 1486 test_name = 'passes/reftest.html' 1487 run_details, log_stream, _ = logging_run([test_name], tests_included=True, host=host) 1488 self.assertEqual(run_details.exit_code, 0) 1489 # For now extra png baseline is only reported in an error message. 1490 self.assert_contains(log_stream, 'Please remove %s' % extra_png) 1491 1492 def test_passing_testharness_extra_baselines(self): 1493 host = MockHost() 1494 extra_png = test.WEB_TEST_DIR + '/passes/testharness-expected.png' 1495 host.filesystem.write_text_file(extra_png, 'Extra png') 1496 extra_txt = test.WEB_TEST_DIR + '/passes/testharness-expected.txt' 1497 host.filesystem.write_text_file( 1498 extra_txt, 1499 'This is a testharness.js-based test.\nPASS: bah\nHarness: the test ran to completion.') 1500 extra_wav = test.WEB_TEST_DIR + '/passes/testharness-expected.wav' 1501 host.filesystem.write_text_file(extra_wav, 'Extra wav') 1502 test_name = 'passes/testharness.html' 1503 run_details, log_stream, _ = logging_run([test_name], tests_included=True, host=host) 1504 self.assertNotEqual(run_details.exit_code, 0) 1505 self.assertEqual(run_details.initial_results.total, 1) 1506 test_result = run_details.initial_results.all_results[0] 1507 self.assertEqual(test_result.test_name, test_name) 1508 self.assertEqual(len(test_result.failures), 2) 1509 self.assertTrue(test_failures.has_failure_type(test_failures.FailureImageHashNotGenerated, test_result.failures)) 1510 self.assertTrue(test_failures.has_failure_type(test_failures.FailureAudioNotGenerated, test_result.failures)) 1511 # For now extra txt baseline for all-pass testharness test is only reported in an error message. 1512 self.assert_contains(log_stream, 'Please remove %s' % extra_png) 1513 self.assert_contains(log_stream, 'Please remove %s' % extra_txt) 1514 self.assert_contains(log_stream, 'Please remove %s' % extra_wav) 1515 1516 def test_passing_testharness_extra_txt_baseline(self): 1517 host = MockHost() 1518 extra_txt = test.WEB_TEST_DIR + '/passes/testharness-expected.txt' 1519 host.filesystem.write_text_file( 1520 extra_txt, 1521 'This is a testharness.js-based test.\nPASS: bah\nHarness: the test ran to completion.') 1522 test_name = 'passes/testharness.html' 1523 run_details, log_stream, _ = logging_run([test_name], tests_included=True, host=host) 1524 self.assertEqual(run_details.exit_code, 0) 1525 # For now extra txt baseline for all-pass testharness test is only reported in an error message. 1526 self.assert_contains(log_stream, 'Please remove %s' % extra_txt) 1527 1528 def test_passing_testharness_extra_mismatching_txt_baseline(self): 1529 host = MockHost() 1530 extra_txt = test.WEB_TEST_DIR + '/passes/testharness-expected.txt' 1531 host.filesystem.write_text_file( 1532 extra_txt, 1533 'This is a testharness.js-based test.\nFAIL: bah\nHarness: the test ran to completion.') 1534 test_name = 'passes/testharness.html' 1535 run_details, log_stream, _ = logging_run([test_name], tests_included=True, host=host) 1536 self.assertNotEqual(run_details.exit_code, 0) 1537 self.assertEqual(run_details.initial_results.total, 1) 1538 test_result = run_details.initial_results.all_results[0] 1539 self.assertEqual(test_result.test_name, test_name) 1540 self.assertEqual(len(test_result.failures), 1) 1541 self.assertTrue(test_failures.has_failure_type(test_failures.FailureTextMismatch, test_result.failures)) 1542 self.assert_contains(log_stream, 'Please remove %s' % extra_txt) 1543 1544 def test_passing_testharness_overriding_baseline(self): 1545 # An all-pass testharness text baseline is necessary when it overrides a fallback baseline. 1546 host = MockHost() 1547 # The base baseline expects a failure. 1548 base_baseline = test.WEB_TEST_DIR + '/passes/testharness-expected.txt' 1549 host.filesystem.write_text_file(base_baseline, 'Failure') 1550 platform_baseline = test.WEB_TEST_DIR + '/platform/test-mac-mac10.10/passes/testharness-expected.txt' 1551 host.filesystem.write_text_file( 1552 platform_baseline, 1553 'This is a testharness.js-based test.\nPASS: bah\nHarness: the test ran to completion.') 1554 run_details, log_stream, _ = logging_run( 1555 ['passes/testharness.html'], tests_included=True, host=host) 1556 self.assertEqual(run_details.exit_code, 0) 1557 self.assertNotIn('Please remove', log_stream.getvalue()) 1558 1559 def test_additional_platform_directory(self): 1560 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--order', 'natural'])) 1561 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo', '--order', 'natural'])) 1562 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', 1563 '/tmp/bar', '--order', 'natural'])) 1564 self.assertTrue(passing_run(['--additional-platform-directory', 'foo', '--order', 'natural'])) 1565 1566 def test_additional_expectations(self): 1567 host = MockHost() 1568 host.filesystem.write_text_file('/tmp/overrides.txt', '# results: [ Failure ]\nfailures/unexpected/mismatch.html [ Failure ]\n') 1569 self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'], 1570 tests_included=True, host=host)) 1571 1572 def test_platform_directories_ignored_when_searching_for_tests(self): 1573 tests_run = get_tests_run(['--platform', 'test-mac-mac10.10']) 1574 self.assertNotIn('platform/test-mac-mac10.10/http/test.html', tests_run) 1575 self.assertNotIn('platform/test-win-win7/http/test.html', tests_run) 1576 1577 def test_platform_directories_not_searched_for_additional_tests(self): 1578 tests_run = get_tests_run(['--platform', 'test-mac-mac10.10', 'http']) 1579 self.assertNotIn('platform/test-mac-mac10.10/http/test.html', tests_run) 1580 self.assertNotIn('platform/test-win-win7/http/test.html', tests_run) 1581 1582 def test_output_diffs(self): 1583 host = MockHost() 1584 logging_run(['failures/unexpected/text-image-checksum.html'], tests_included=True, host=host) 1585 written_files = host.filesystem.written_files 1586 self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys())) 1587 self.assertTrue(any(path.endswith('-pretty-diff.html') for path in written_files.keys())) 1588 self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files)) 1589 1590 def test_unsupported_platform(self): 1591 stderr = StringIO.StringIO() 1592 res = run_web_tests.main(['--platform', 'foo'], stderr) 1593 1594 self.assertEqual(res, exit_codes.UNEXPECTED_ERROR_EXIT_STATUS) 1595 self.assertTrue('unsupported platform' in stderr.getvalue()) 1596 1597 def test_verbose_in_child_processes(self): 1598 # When we actually run multiple processes, we may have to reconfigure logging in the 1599 # child process (e.g., on win32) and we need to make sure that works and we still 1600 # see the verbose log output. However, we can't use logging_run() because using 1601 # output_capture to capture stderr latter results in a nonpicklable host. 1602 1603 options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--jobs', 1604 '2', 'passes/text.html', 'passes/image.html'], tests_included=True) 1605 host = MockHost() 1606 port_obj = host.port_factory.get(port_name=options.platform, options=options) 1607 logging_stream = StringIO.StringIO() 1608 printer = Printer(host, options, logging_stream) 1609 run_web_tests.run(port_obj, options, parsed_args, printer) 1610 self.assertTrue('text.html passed' in logging_stream.getvalue()) 1611 self.assertTrue('image.html passed' in logging_stream.getvalue()) 1612 1613 def disabled_test_driver_logging(self): 1614 # FIXME: Figure out how to either use a mock-test port to 1615 # get output or mack mock ports work again. 1616 host = Host() 1617 _, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', 'fast/harness/results.html'], 1618 tests_included=True, host=host) 1619 self.assertIn('OUT:', err.getvalue()) 1620 1621 def _check_json_test_results(self, host, details): 1622 self.assertEqual(details.exit_code, 0) 1623 self.assertTrue(host.filesystem.exists('/tmp/json_results.json')) 1624 json_failing_test_results = host.filesystem.read_text_file('/tmp/json_results.json') 1625 self.assertEqual(json.loads(json_failing_test_results), details.summarized_full_results) 1626 1627 def test_json_test_results(self): 1628 host = MockHost() 1629 details, _, _ = logging_run( 1630 ['--json-test-results', '/tmp/json_results.json'], host=host) 1631 self._check_json_test_results(host, details) 1632 1633 def test_json_test_results_alias_write_full_results_to(self): 1634 host = MockHost() 1635 details, _, _ = logging_run( 1636 ['--write-full-results-to', '/tmp/json_results.json'], host=host) 1637 self._check_json_test_results(host, details) 1638 1639 def test_json_test_results_alias_isolated_script_test_output(self): 1640 host = MockHost() 1641 details, _, _ = logging_run( 1642 ['--isolated-script-test-output', '/tmp/json_results.json'], host=host) 1643 self._check_json_test_results(host, details) 1644 1645 def test_json_failing_test_results(self): 1646 host = MockHost() 1647 details, _, _ = logging_run( 1648 ['--json-failing-test-results', '/tmp/json_failing_results.json'], host=host) 1649 self.assertEqual(details.exit_code, 0) 1650 self.assertTrue(host.filesystem.exists('/tmp/json_failing_results.json')) 1651 json_failing_test_results = host.filesystem.read_text_file('/tmp/json_failing_results.json') 1652 self.assertEqual(json.loads(json_failing_test_results), details.summarized_failing_results) 1653 1654 def test_no_default_expectations(self): 1655 self.assertFalse(passing_run(['--ignore-default-expectations', 'failures/expected/text.html'])) 1656 1657 1658class RebaselineTest(unittest.TestCase, StreamTestingMixin): 1659 """Tests for flags which cause new baselines to be written. 1660 1661 When running web tests, there are several flags which write new 1662 baselines. This is separate from the "blink_tool.py rebaseline" commands, 1663 which fetch new baselines from elsewhere rather than generating them. 1664 """ 1665 1666 def assert_baselines(self, written_files, log_stream, expected_file_base, expected_extensions): 1667 """Asserts that the written_files contains baselines for one test. 1668 1669 Args: 1670 written_files: from FileSystem.written_files. 1671 log_stream: The log stream from the run. 1672 expected_file_base: Relative path to the baseline, 1673 without the extension, from the web test directory. 1674 expected_extensions: Expected extensions which should be written. 1675 """ 1676 for ext in expected_extensions: 1677 baseline = '%s-expected%s' % (expected_file_base, ext) 1678 baseline_full_path = '%s/%s' % (test.WEB_TEST_DIR, baseline) 1679 self.assertIsNotNone(written_files.get(baseline_full_path)) 1680 baseline_message = 'Writing new baseline "%s"\n' % baseline 1681 self.assert_contains(log_stream, baseline_message) 1682 # Assert that baselines with other extensions were not written. 1683 for ext in ({'.png', '.txt', '.wav'} - set(expected_extensions)): 1684 baseline = '%s-expected%s' % (expected_file_base, ext) 1685 baseline_full_path = '%s/%s' % (test.WEB_TEST_DIR, baseline) 1686 self.assertIsNone(written_files.get(baseline_full_path)) 1687 1688 def test_reset_results_basic(self): 1689 # Test that we update baselines in place when the test fails 1690 # (text and image mismatch). 1691 host = MockHost() 1692 details, log_stream, _ = logging_run( 1693 ['--reset-results', 'failures/unexpected/text-image-checksum.html'], 1694 tests_included=True, host=host) 1695 written_files = host.filesystem.written_files 1696 # The run exit code is 0, indicating success; since we're resetting 1697 # baselines, it's OK for actual results to not match baselines. 1698 self.assertEqual(details.exit_code, 0) 1699 self.assertEqual(len(written_files.keys()), 7) 1700 self.assert_baselines( 1701 written_files, log_stream, 1702 'failures/unexpected/text-image-checksum', 1703 expected_extensions=['.txt', '.png']) 1704 1705 def test_no_baselines_are_written_with_no_reset_results_flag(self): 1706 # This test checks that we're *not* writing baselines when we're not 1707 # supposed to be (when there's no --reset-results flag). 1708 host = MockHost() 1709 details, log_stream, _ = logging_run( 1710 ['failures/unexpected/text-image-checksum.html'], 1711 tests_included=True, host=host) 1712 written_files = host.filesystem.written_files 1713 # In a normal test run where actual results don't match baselines, the 1714 # exit code indicates failure. 1715 self.assertEqual(details.exit_code, 1) 1716 self.assert_baselines( 1717 written_files, log_stream, 'failures/unexpected/text-image-checksum', 1718 expected_extensions=[]) 1719 1720 def test_reset_results_missing_results(self): 1721 # Test that we create new baselines at the generic location for 1722 # if we are missing baselines. 1723 host = MockHost() 1724 details, log_stream, _ = logging_run( 1725 [ 1726 '--reset-results', 1727 'failures/unexpected/missing_text.html', 1728 'failures/unexpected/missing_image.html', 1729 'failures/unexpected/missing_render_tree_dump.html' 1730 ], 1731 tests_included=True, host=host) 1732 written_files = host.filesystem.written_files 1733 self.assertEqual(details.exit_code, 0) 1734 self.assertEqual(len(written_files.keys()), 8) 1735 self.assert_baselines( 1736 written_files, log_stream, 1737 'failures/unexpected/missing_text', ['.txt']) 1738 self.assert_baselines( 1739 written_files, log_stream, 1740 'failures/unexpected/missing_image', ['.png']) 1741 self.assert_baselines( 1742 written_files, log_stream, 1743 'failures/unexpected/missing_render_tree_dump', 1744 expected_extensions=['.txt']) 1745 1746 def test_reset_results_testharness_no_baseline(self): 1747 # Tests that we create new result for a failing testharness test without 1748 # baselines, but don't create one for a passing one. 1749 host = MockHost() 1750 details, log_stream, _ = logging_run( 1751 [ 1752 '--reset-results', 1753 'failures/unexpected/testharness.html', 1754 'passes/testharness.html' 1755 ], 1756 tests_included=True, host=host) 1757 written_files = host.filesystem.written_files 1758 self.assertEqual(details.exit_code, 0) 1759 self.assertEqual(len(written_files.keys()), 6) 1760 self.assert_baselines(written_files, log_stream, 'failures/unexpected/testharness', ['.txt']) 1761 self.assert_baselines(written_files, log_stream, 'passes/testharness', []) 1762 1763 def test_reset_results_testharness_existing_baseline(self): 1764 # Tests that we update existing baseline for a testharness test. 1765 host = MockHost() 1766 host.filesystem.write_text_file( 1767 test.WEB_TEST_DIR + '/failures/unexpected/testharness-expected.txt', 'foo') 1768 details, log_stream, _ = logging_run( 1769 [ 1770 '--reset-results', 1771 'failures/unexpected/testharness.html' 1772 ], 1773 tests_included=True, host=host) 1774 self.assertEqual(details.exit_code, 0) 1775 written_files = host.filesystem.written_files 1776 self.assertEqual(len(written_files.keys()), 6) 1777 self.assert_baselines(written_files, log_stream, 'failures/unexpected/testharness', ['.txt']) 1778 1779 def test_reset_results_image_only(self): 1780 # Tests that we don't create new text results for an image-only test. 1781 host = MockHost() 1782 details, log_stream, _ = logging_run( 1783 [ 1784 '--reset-results', 1785 'failures/unexpected/image-only.html', 1786 ], 1787 tests_included=True, host=host) 1788 self.assertEqual(details.exit_code, 0) 1789 written_files = host.filesystem.written_files 1790 self.assertEqual(len(written_files.keys()), 6) 1791 self.assert_baselines(written_files, log_stream, 'failures/unexpected/image-only', ['.png']) 1792 1793 def test_copy_baselines(self): 1794 # Test that we update the baselines in the version-specific directories 1795 # if the new baseline is different from the fallback baseline. 1796 host = MockHost() 1797 host.filesystem.write_text_file( 1798 test.WEB_TEST_DIR + 1799 '/failures/unexpected/text-image-checksum-expected.txt', 1800 # This value is the same as actual text result of the test defined 1801 # in blinkpy.web_tests.port.test. This is added so that we also 1802 # check that the text baseline isn't written if it matches. 1803 'text-image-checksum_fail-txt') 1804 details, log_stream, _ = logging_run( 1805 [ 1806 '--copy-baselines', 1807 'failures/unexpected/text-image-checksum.html' 1808 ], 1809 tests_included=True, host=host) 1810 written_files = host.filesystem.written_files 1811 self.assertEqual(details.exit_code, 1) 1812 self.assertEqual(len(written_files.keys()), 11) 1813 self.assert_contains( 1814 log_stream, 1815 'Copying baseline to "platform/test-mac-mac10.10/failures/unexpected/text-image-checksum-expected.png"') 1816 self.assert_contains( 1817 log_stream, 1818 'Not copying baseline to "platform/test-mac-mac10.10/failures/unexpected/text-image-checksum-expected.txt"') 1819 1820 def test_reset_results_with_copy_baselines(self): 1821 # Test that we update the baselines in the version-specific directories 1822 # if the new baseline is different from the fallback baseline. 1823 host = MockHost() 1824 host.filesystem.write_text_file( 1825 test.WEB_TEST_DIR + 1826 '/failures/unexpected/text-image-checksum-expected.txt', 1827 # This value is the same as actual text result of the test defined 1828 # in blinkpy.web_tests.port.test. This is added so that we also 1829 # check that the text baseline isn't written if it matches. 1830 'text-image-checksum_fail-txt') 1831 details, log_stream, _ = logging_run( 1832 [ 1833 '--reset-results', '--copy-baselines', 1834 'failures/unexpected/text-image-checksum.html' 1835 ], 1836 tests_included=True, host=host) 1837 written_files = host.filesystem.written_files 1838 self.assertEqual(details.exit_code, 0) 1839 self.assertEqual(len(written_files.keys()), 7) 1840 self.assert_baselines( 1841 written_files, log_stream, 1842 'platform/test-mac-mac10.10/failures/unexpected/text-image-checksum', 1843 expected_extensions=['.png']) 1844 1845 def test_reset_results_reftest(self): 1846 # Test rebaseline of reference tests. 1847 # Reference tests don't have baselines, so they should be ignored. 1848 host = MockHost() 1849 details, log_stream, _ = logging_run( 1850 ['--reset-results', 'passes/reftest.html'], 1851 tests_included=True, host=host) 1852 written_files = host.filesystem.written_files 1853 self.assertEqual(details.exit_code, 0) 1854 self.assertEqual(len(written_files.keys()), 5) 1855 self.assert_baselines( 1856 written_files, log_stream, 'passes/reftest', expected_extensions=[]) 1857 1858 def test_reset_results_reftest_with_text(self): 1859 # In this case, there is a text baseline present; a new baseline is 1860 # written even though this is a reference test. 1861 host = MockHost() 1862 details, log_stream, _ = logging_run( 1863 ['--reset-results', 'failures/unexpected/reftest-with-mismatching-text.html'], 1864 tests_included=True, host=host) 1865 written_files = host.filesystem.written_files 1866 self.assertEqual(details.exit_code, 0) 1867 self.assertEqual(len(written_files.keys()), 6) 1868 self.assert_baselines( 1869 written_files, log_stream, 'failures/unexpected/reftest-with-mismatching-text', 1870 expected_extensions=['.txt']) 1871 1872 def test_reset_results_remove_extra_baselines(self): 1873 host = MockHost() 1874 extra_txt = test.WEB_TEST_DIR + '/failures/unexpected/image-only-expected.txt' 1875 host.filesystem.write_text_file(extra_txt, 'Extra txt') 1876 extra_wav = test.WEB_TEST_DIR + '/failures/unexpected/image-only-expected.wav' 1877 host.filesystem.write_text_file(extra_wav, 'Extra wav') 1878 details, log_stream, _ = logging_run( 1879 ['--reset-results', 'failures/unexpected/image-only.html'], 1880 tests_included=True, host=host) 1881 written_files = host.filesystem.written_files 1882 self.assertEqual(details.exit_code, 0) 1883 self.assertEqual(len(written_files.keys()), 8) 1884 self.assertIsNone(written_files[extra_txt]) 1885 self.assertIsNone(written_files[extra_wav]) 1886 self.assert_baselines( 1887 written_files, log_stream, 'failures/unexpected/image-only', 1888 expected_extensions=['.png']) 1889 1890 def test_reset_results_reftest_remove_extra_baselines(self): 1891 host = MockHost() 1892 extra_png = test.WEB_TEST_DIR + '/passes/reftest-expected.png' 1893 host.filesystem.write_text_file(extra_png, 'Extra png') 1894 extra_wav = test.WEB_TEST_DIR + '/passes/reftest-expected.wav' 1895 host.filesystem.write_text_file(extra_wav, 'Extra wav') 1896 extra_txt = test.WEB_TEST_DIR + '/passes/reftest-expected.txt' 1897 host.filesystem.write_text_file(extra_txt, 'reftest') 1898 details, _, _ = logging_run(['--reset-results', 'passes/reftest.html'], 1899 tests_included=True, host=host) 1900 written_files = host.filesystem.written_files 1901 self.assertEqual(details.exit_code, 0) 1902 self.assertEqual(len(written_files.keys()), 8) 1903 self.assertIsNone(written_files[extra_png]) 1904 self.assertIsNone(written_files[extra_wav]) 1905 self.assertIsNone(written_files[extra_txt]) 1906 1907 def test_reset_results_reftest_with_text_remove_extra_baselines(self): 1908 host = MockHost() 1909 extra_png = test.WEB_TEST_DIR + '/passes/reftest-with-text-expected.png' 1910 host.filesystem.write_text_file(extra_png, 'Extra png') 1911 extra_wav = test.WEB_TEST_DIR + '/passes/reftest-with-text-expected.wav' 1912 host.filesystem.write_text_file(extra_wav, 'Extra wav') 1913 details, _, _ = logging_run(['--reset-results', 'passes/reftest-with-text.html'], 1914 tests_included=True, host=host) 1915 written_files = host.filesystem.written_files 1916 self.assertEqual(details.exit_code, 0) 1917 self.assertEqual(len(written_files.keys()), 7) 1918 self.assertIsNone(written_files[extra_png]) 1919 self.assertIsNone(written_files[extra_wav]) 1920 self.assertNotIn(test.WEB_TEST_DIR + '/passes/reftest-with-text-expected.txt', written_files) 1921 1922 def test_reset_results_passing_testharness_remove_extra_baselines(self): 1923 host = MockHost() 1924 extra_png = test.WEB_TEST_DIR + '/passes/testharness-expected.png' 1925 host.filesystem.write_text_file(extra_png, 'Extra png') 1926 extra_txt = test.WEB_TEST_DIR + '/passes/testharness-expected.txt' 1927 host.filesystem.write_text_file(extra_txt, 'Extra txt') 1928 details, log_stream, _ = logging_run( 1929 ['--reset-results', 'passes/testharness.html'], 1930 tests_included=True, host=host) 1931 written_files = host.filesystem.written_files 1932 self.assertEqual(details.exit_code, 0) 1933 self.assertEqual(len(written_files.keys()), 7) 1934 self.assertIsNone(written_files[extra_png]) 1935 self.assertIsNone(written_files[extra_txt]) 1936 self.assert_baselines( 1937 written_files, log_stream, 'passes/testharness', 1938 expected_extensions=[]) 1939 1940 def test_reset_results_failing_testharness(self): 1941 host = MockHost() 1942 details, log_stream, _ = logging_run( 1943 ['--reset-results', 'failures/unexpected/testharness.html'], 1944 tests_included=True, host=host) 1945 written_files = host.filesystem.written_files 1946 self.assertEqual(details.exit_code, 0) 1947 self.assertEqual(len(written_files.keys()), 6) 1948 self.assert_baselines( 1949 written_files, log_stream, 'failures/unexpected/testharness', 1950 expected_extensions=['.txt']) 1951 1952 def test_new_flag_specific_baseline(self): 1953 # Test writing new baselines under flag-specific directory if the actual 1954 # results are different from the current baselines. 1955 host = MockHost() 1956 host.filesystem.write_text_file( 1957 test.WEB_TEST_DIR + 1958 '/failures/unexpected/text-image-checksum-expected.txt', 1959 # This value is the same as actual text result of the test defined 1960 # in blinkpy.web_tests.port.test. This is added so that we also 1961 # check that the text baseline isn't written if it matches. 1962 'text-image-checksum_fail-txt') 1963 details, log_stream, _ = logging_run( 1964 ['--additional-driver-flag=--flag', 1965 '--reset-results', 1966 'failures/unexpected/text-image-checksum.html'], 1967 tests_included=True, host=host) 1968 written_files = host.filesystem.written_files 1969 self.assertEqual(details.exit_code, 0) 1970 self.assertEqual(len(written_files.keys()), 7) 1971 # We should create new image baseline only. 1972 self.assert_baselines( 1973 written_files, log_stream, 1974 'flag-specific/flag/failures/unexpected/text-image-checksum', 1975 expected_extensions=['.png']) 1976 1977 def test_copy_flag_specific_baseline(self): 1978 # Test writing new baselines under flag-specific directory if the actual 1979 # results are different from the current baselines. 1980 host = MockHost() 1981 host.filesystem.write_text_file( 1982 test.WEB_TEST_DIR + 1983 '/failures/unexpected/text-image-checksum-expected.txt', 1984 # This value is the same as actual text result of the test defined 1985 # in blinkpy.web_tests.port.test. This is added so that we also 1986 # check that the text baseline isn't written if it matches. 1987 'text-image-checksum_fail-txt') 1988 details, log_stream, _ = logging_run( 1989 ['--additional-driver-flag=--flag', 1990 '--copy-baselines', 1991 'failures/unexpected/text-image-checksum.html'], 1992 tests_included=True, host=host) 1993 written_files = host.filesystem.written_files 1994 self.assertEqual(details.exit_code, 1) 1995 self.assertEqual(len(written_files.keys()), 11) 1996 self.assert_contains( 1997 log_stream, 1998 'Copying baseline to "flag-specific/flag/failures/unexpected/text-image-checksum-expected.png"') 1999 self.assert_contains( 2000 log_stream, 2001 'Not copying baseline to "flag-specific/flag/failures/unexpected/text-image-checksum-expected.txt"') 2002 2003 def test_new_flag_specific_baseline_optimize(self): 2004 # Test removing existing baselines under flag-specific directory if the 2005 # actual results are the same as the fallback baselines. 2006 host = MockHost() 2007 host.filesystem.write_text_file( 2008 test.WEB_TEST_DIR + 2009 '/failures/unexpected/text-image-checksum-expected.txt', 2010 # This value is the same as actual text result of the test defined 2011 # in blinkpy.web_tests.port.test. This is added so that we check 2012 # that the flag-specific text baseline is removed if the actual 2013 # result is the same as this fallback baseline. 2014 'text-image-checksum_fail-txt') 2015 flag_specific_baseline_txt = ( 2016 test.WEB_TEST_DIR + 2017 '/flag-specific/flag/failures/unexpected/text-image-checksum-expected.txt') 2018 host.filesystem.write_text_file( 2019 flag_specific_baseline_txt, 'existing-baseline-different-from-fallback') 2020 2021 details, log_stream, _ = logging_run( 2022 ['--additional-driver-flag=--flag', 2023 '--reset-results', 2024 'failures/unexpected/text-image-checksum.html'], 2025 tests_included=True, host=host) 2026 self.assertEqual(details.exit_code, 0) 2027 self.assertFalse(host.filesystem.exists(flag_specific_baseline_txt)) 2028 written_files = host.filesystem.written_files 2029 self.assertEqual(len(written_files.keys()), 8) 2030 # We should create new image baseline only. 2031 self.assert_baselines( 2032 written_files, log_stream, 2033 'flag-specific/flag/failures/unexpected/text-image-checksum', 2034 expected_extensions=['.png']) 2035 2036 def test_new_virtual_baseline(self): 2037 # Test writing new baselines under virtual test directory if the actual 2038 # results are different from the current baselines. 2039 host = MockHost() 2040 host.filesystem.write_text_file( 2041 test.WEB_TEST_DIR + 2042 '/failures/unexpected/text-image-checksum-expected.txt', 2043 # This value is the same as actual text result of the test defined 2044 # in blinkpy.web_tests.port.test. This is added so that we also 2045 # check that the text baseline isn't written if it matches. 2046 'text-image-checksum_fail-txt') 2047 details, log_stream, _ = logging_run( 2048 ['--reset-results', 2049 'virtual/virtual_failures/failures/unexpected/text-image-checksum.html'], 2050 tests_included=True, host=host) 2051 written_files = host.filesystem.written_files 2052 self.assertEqual(details.exit_code, 0) 2053 self.assertEqual(len(written_files.keys()), 9) 2054 # We should create new image baseline only. 2055 self.assert_baselines( 2056 written_files, log_stream, 2057 'virtual/virtual_failures/failures/unexpected/text-image-checksum', 2058 expected_extensions=['.png']) 2059 2060 def test_new_platform_baseline_with_fallback(self): 2061 # Test that we update the existing baseline in the platform-specific 2062 # directory if the new baseline is different, with existing fallback 2063 # baseline (which should not matter). 2064 host = MockHost() 2065 host.filesystem.write_text_file( 2066 test.WEB_TEST_DIR + 2067 '/platform/test-mac-mac10.10/failures/unexpected/text-image-checksum-expected.png', 2068 'wrong-png-baseline') 2069 2070 details, log_stream, _ = logging_run( 2071 [ 2072 '--reset-results', 2073 'failures/unexpected/text-image-checksum.html' 2074 ], 2075 tests_included=True, host=host) 2076 written_files = host.filesystem.written_files 2077 self.assertEqual(details.exit_code, 0) 2078 self.assertEqual(len(written_files.keys()), 7) 2079 # We should reset the platform image baseline. 2080 self.assert_baselines( 2081 written_files, log_stream, 2082 'platform/test-mac-mac10.10/failures/unexpected/text-image-checksum', 2083 expected_extensions=['.png']) 2084 2085 def test_new_platform_baseline_without_fallback(self): 2086 # Test that we update the existing baseline in the platform-specific 2087 # directory if the new baseline is different, without existing fallback 2088 # baseline (which should not matter). 2089 host = MockHost() 2090 host.filesystem.write_text_file( 2091 test.WEB_TEST_DIR + 2092 '/platform/test-mac-mac10.10/failures/unexpected/text-image-checksum-expected.png', 2093 'wrong-png-baseline') 2094 host.filesystem.remove( 2095 test.WEB_TEST_DIR + '/failures/unexpected/text-image-checksum-expected.png') 2096 2097 details, log_stream, _ = logging_run( 2098 [ 2099 '--reset-results', 2100 'failures/unexpected/text-image-checksum.html' 2101 ], 2102 tests_included=True, host=host) 2103 written_files = host.filesystem.written_files 2104 self.assertEqual(details.exit_code, 0) 2105 self.assertEqual(len(written_files.keys()), 8) 2106 # We should reset the platform image baseline. 2107 self.assert_baselines( 2108 written_files, log_stream, 2109 'platform/test-mac-mac10.10/failures/unexpected/text-image-checksum', 2110 expected_extensions=['.png']) 2111 2112 def test_new_virtual_baseline_optimize(self): 2113 # Test removing existing baselines under flag-specific directory if the 2114 # actual results are the same as the fallback baselines. 2115 host = MockHost() 2116 host.filesystem.write_text_file( 2117 test.WEB_TEST_DIR + 2118 '/failures/unexpected/text-image-checksum-expected.txt', 2119 # This value is the same as actual text result of the test defined 2120 # in blinkpy.web_tests.port.test. This is added so that we check 2121 # that the flag-specific text baseline is removed if the actual 2122 # result is the same as this fallback baseline. 2123 'text-image-checksum_fail-txt') 2124 virtual_baseline_txt = ( 2125 test.WEB_TEST_DIR + 2126 '/virtual/virtual_failures/failures/unexpected/text-image-checksum-expected.txt') 2127 host.filesystem.write_text_file( 2128 virtual_baseline_txt, 'existing-baseline-different-from-fallback') 2129 2130 details, log_stream, _ = logging_run( 2131 ['--reset-results', 2132 'virtual/virtual_failures/failures/unexpected/text-image-checksum.html'], 2133 tests_included=True, host=host) 2134 self.assertEqual(details.exit_code, 0) 2135 self.assertFalse(host.filesystem.exists(virtual_baseline_txt)) 2136 written_files = host.filesystem.written_files 2137 self.assertEqual(len(written_files.keys()), 10) 2138 # We should create new image baseline only. 2139 self.assert_baselines( 2140 written_files, log_stream, 2141 'virtual/virtual_failures/failures/unexpected/text-image-checksum', 2142 expected_extensions=['.png']) 2143 2144 2145class MainTest(unittest.TestCase): 2146 2147 def test_exception_handling(self): 2148 orig_run_fn = run_web_tests.run 2149 2150 # pylint: disable=unused-argument 2151 def interrupting_run(port, options, args, printer): 2152 raise KeyboardInterrupt 2153 2154 def successful_run(port, options, args, printer): 2155 2156 class FakeRunDetails(object): 2157 exit_code = exit_codes.UNEXPECTED_ERROR_EXIT_STATUS 2158 2159 return FakeRunDetails() 2160 2161 def exception_raising_run(port, options, args, printer): 2162 assert False 2163 2164 stderr = StringIO.StringIO() 2165 try: 2166 run_web_tests.run = interrupting_run 2167 res = run_web_tests.main([], stderr) 2168 self.assertEqual(res, exit_codes.INTERRUPTED_EXIT_STATUS) 2169 2170 run_web_tests.run = successful_run 2171 res = run_web_tests.main(['--platform', 'test'], stderr) 2172 self.assertEqual(res, exit_codes.UNEXPECTED_ERROR_EXIT_STATUS) 2173 2174 run_web_tests.run = exception_raising_run 2175 res = run_web_tests.main([], stderr) 2176 self.assertEqual(res, exit_codes.UNEXPECTED_ERROR_EXIT_STATUS) 2177 finally: 2178 run_web_tests.run = orig_run_fn 2179