1import json
2import sys
3from os.path import dirname, join
4from io import StringIO
5
6from mozlog import handlers, structuredlog
7
8sys.path.insert(0, join(dirname(__file__), "..", ".."))
9from formatters.chromium import ChromiumFormatter
10
11
12def test_chromium_required_fields(capfd):
13    # Test that the test results contain a handful of required fields.
14
15    # Set up the handler.
16    output = StringIO()
17    logger = structuredlog.StructuredLogger("test_a")
18    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
19
20    # output a bunch of stuff
21    logger.suite_start(["test-id-1"], run_info={}, time=123)
22    logger.test_start("test-id-1")
23    logger.test_end("test-id-1", status="PASS", expected="PASS")
24    logger.suite_end()
25
26    # check nothing got output to stdout/stderr
27    # (note that mozlog outputs exceptions during handling to stderr!)
28    captured = capfd.readouterr()
29    assert captured.out == ""
30    assert captured.err == ""
31
32    # check the actual output of the formatter
33    output.seek(0)
34    output_obj = json.load(output)
35
36    # Check for existence of required fields
37    assert "interrupted" in output_obj
38    assert "path_delimiter" in output_obj
39    assert "version" in output_obj
40    assert "num_failures_by_type" in output_obj
41    assert "tests" in output_obj
42
43    test_obj = output_obj["tests"]["test-id-1"]
44    assert "actual" in test_obj
45    assert "expected" in test_obj
46
47
48def test_chromium_test_name_trie(capfd):
49    # Ensure test names are broken into directories and stored in a trie with
50    # test results at the leaves.
51
52    # Set up the handler.
53    output = StringIO()
54    logger = structuredlog.StructuredLogger("test_a")
55    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
56
57    # output a bunch of stuff
58    logger.suite_start(["/foo/bar/test-id-1", "/foo/test-id-2"], run_info={},
59                       time=123)
60    logger.test_start("/foo/bar/test-id-1")
61    logger.test_end("/foo/bar/test-id-1", status="TIMEOUT", expected="FAIL")
62    logger.test_start("/foo/test-id-2")
63    logger.test_end("/foo/test-id-2", status="ERROR", expected="TIMEOUT")
64    logger.suite_end()
65
66    # check nothing got output to stdout/stderr
67    # (note that mozlog outputs exceptions during handling to stderr!)
68    captured = capfd.readouterr()
69    assert captured.out == ""
70    assert captured.err == ""
71
72    # check the actual output of the formatter
73    output.seek(0)
74    output_obj = json.load(output)
75
76    # Ensure that the test names are broken up by directory name and that the
77    # results are stored at the leaves.
78    test_obj = output_obj["tests"]["foo"]["bar"]["test-id-1"]
79    assert test_obj["actual"] == "TIMEOUT"
80    assert test_obj["expected"] == "FAIL"
81
82    test_obj = output_obj["tests"]["foo"]["test-id-2"]
83    # The ERROR status is mapped to FAIL for Chromium
84    assert test_obj["actual"] == "FAIL"
85    assert test_obj["expected"] == "TIMEOUT"
86
87
88def test_num_failures_by_type(capfd):
89    # Test that the number of failures by status type is correctly calculated.
90
91    # Set up the handler.
92    output = StringIO()
93    logger = structuredlog.StructuredLogger("test_a")
94    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
95
96    # Run some tests with different statuses: 3 passes, 1 timeout
97    logger.suite_start(["t1", "t2", "t3", "t4"], run_info={}, time=123)
98    logger.test_start("t1")
99    logger.test_end("t1", status="PASS", expected="PASS")
100    logger.test_start("t2")
101    logger.test_end("t2", status="PASS", expected="PASS")
102    logger.test_start("t3")
103    logger.test_end("t3", status="PASS", expected="FAIL")
104    logger.test_start("t4")
105    logger.test_end("t4", status="TIMEOUT", expected="CRASH")
106    logger.suite_end()
107
108    # check nothing got output to stdout/stderr
109    # (note that mozlog outputs exceptions during handling to stderr!)
110    captured = capfd.readouterr()
111    assert captured.out == ""
112    assert captured.err == ""
113
114    # check the actual output of the formatter
115    output.seek(0)
116    num_failures_by_type = json.load(output)["num_failures_by_type"]
117
118    # We expect 3 passes and 1 timeout, nothing else.
119    assert sorted(num_failures_by_type.keys()) == ["PASS", "TIMEOUT"]
120    assert num_failures_by_type["PASS"] == 3
121    assert num_failures_by_type["TIMEOUT"] == 1
122
123
124def test_subtest_messages(capfd):
125    # Tests accumulation of test output
126
127    # Set up the handler.
128    output = StringIO()
129    logger = structuredlog.StructuredLogger("test_a")
130    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
131
132    # Run two tests with subtest messages. The subtest name should be included
133    # in the output. We should also tolerate missing messages.
134    logger.suite_start(["t1", "t2"], run_info={}, time=123)
135    logger.test_start("t1")
136    logger.test_status("t1", status="FAIL", subtest="t1_a",
137                       message="t1_a_message")
138    logger.test_status("t1", status="PASS", subtest="t1_b",
139                       message="t1_b_message")
140    logger.test_end("t1", status="PASS", expected="PASS")
141    logger.test_start("t2")
142    # Subtests with empty messages should not be ignored.
143    logger.test_status("t2", status="PASS", subtest="t2_a")
144    # A test-level message will also be appended
145    logger.test_end("t2", status="TIMEOUT", expected="PASS",
146                    message="t2_message")
147    logger.suite_end()
148
149    # check nothing got output to stdout/stderr
150    # (note that mozlog outputs exceptions during handling to stderr!)
151    captured = capfd.readouterr()
152    assert captured.out == ""
153    assert captured.err == ""
154
155    # check the actual output of the formatter
156    output.seek(0)
157    output_json = json.load(output)
158
159    t1_artifacts = output_json["tests"]["t1"]["artifacts"]
160    assert t1_artifacts["wpt_actual_metadata"] == [
161        "[t1]\n  expected: PASS\n",
162        "  [t1_a]\n    expected: FAIL\n",
163        "  [t1_b]\n    expected: PASS\n",
164    ]
165    assert t1_artifacts["wpt_log"] == [
166        "t1_a: t1_a_message\n",
167        "t1_b: t1_b_message\n",
168    ]
169    assert t1_artifacts["wpt_subtest_failure"] == ["true"]
170    t2_artifacts = output_json["tests"]["t2"]["artifacts"]
171    assert t2_artifacts["wpt_actual_metadata"] == [
172        "[t2]\n  expected: TIMEOUT\n",
173        "  [t2_a]\n    expected: PASS\n",
174    ]
175    assert t2_artifacts["wpt_log"] == [
176        "Harness: t2_message\n"
177    ]
178    assert "wpt_subtest_failure" not in t2_artifacts.keys()
179
180
181def test_subtest_failure(capfd):
182    # Tests that a test fails if a subtest fails
183
184    # Set up the handler.
185    output = StringIO()
186    logger = structuredlog.StructuredLogger("test_a")
187    formatter = ChromiumFormatter()
188    logger.add_handler(handlers.StreamHandler(output, formatter))
189
190    # Run a test with some subtest failures.
191    logger.suite_start(["t1"], run_info={}, time=123)
192    logger.test_start("t1")
193    logger.test_status("t1", status="FAIL", subtest="t1_a",
194                       message="t1_a_message")
195    logger.test_status("t1", status="PASS", subtest="t1_b",
196                       message="t1_b_message")
197    logger.test_status("t1", status="TIMEOUT", subtest="t1_c",
198                       message="t1_c_message")
199
200    # Make sure the test name was added to the set of tests with subtest fails
201    assert "t1" in formatter.tests_with_subtest_fails
202
203    # The test status is reported as a pass here because the harness was able to
204    # run the test to completion.
205    logger.test_end("t1", status="PASS", expected="PASS", message="top_message")
206    logger.suite_end()
207
208    # check nothing got output to stdout/stderr
209    # (note that mozlog outputs exceptions during handling to stderr!)
210    captured = capfd.readouterr()
211    assert captured.out == ""
212    assert captured.err == ""
213
214    # check the actual output of the formatter
215    output.seek(0)
216    output_json = json.load(output)
217
218    test_obj = output_json["tests"]["t1"]
219    t1_artifacts = test_obj["artifacts"]
220    assert t1_artifacts["wpt_actual_metadata"] == [
221        "[t1]\n  expected: PASS\n",
222        "  [t1_a]\n    expected: FAIL\n",
223        "  [t1_b]\n    expected: PASS\n",
224        "  [t1_c]\n    expected: TIMEOUT\n",
225    ]
226    assert t1_artifacts["wpt_log"] == [
227        "Harness: top_message\n",
228        "t1_a: t1_a_message\n",
229        "t1_b: t1_b_message\n",
230        "t1_c: t1_c_message\n",
231    ]
232    assert t1_artifacts["wpt_subtest_failure"] == ["true"]
233    # The status of the test in the output is a failure because subtests failed,
234    # despite the harness reporting that the test passed. But the harness status
235    # is logged as an artifact.
236    assert t1_artifacts["wpt_actual_status"] == ["PASS"]
237    assert test_obj["actual"] == "FAIL"
238    assert test_obj["expected"] == "PASS"
239    # Also ensure that the formatter cleaned up its internal state
240    assert "t1" not in formatter.tests_with_subtest_fails
241
242
243def test_expected_subtest_failure(capfd):
244    # Tests that an expected subtest failure does not cause the test to fail
245
246    # Set up the handler.
247    output = StringIO()
248    logger = structuredlog.StructuredLogger("test_a")
249    formatter = ChromiumFormatter()
250    logger.add_handler(handlers.StreamHandler(output, formatter))
251
252    # Run a test with some expected subtest failures.
253    logger.suite_start(["t1"], run_info={}, time=123)
254    logger.test_start("t1")
255    logger.test_status("t1", status="FAIL", expected="FAIL", subtest="t1_a",
256                       message="t1_a_message")
257    logger.test_status("t1", status="PASS", subtest="t1_b",
258                       message="t1_b_message")
259    logger.test_status("t1", status="TIMEOUT", expected="TIMEOUT", subtest="t1_c",
260                       message="t1_c_message")
261
262    # The subtest failures are all expected so this test should not be added to
263    # the set of tests with subtest failures.
264    assert "t1" not in formatter.tests_with_subtest_fails
265
266    # The test status is reported as a pass here because the harness was able to
267    # run the test to completion.
268    logger.test_end("t1", status="OK", expected="OK")
269    logger.suite_end()
270
271    # check nothing got output to stdout/stderr
272    # (note that mozlog outputs exceptions during handling to stderr!)
273    captured = capfd.readouterr()
274    assert captured.out == ""
275    assert captured.err == ""
276
277    # check the actual output of the formatter
278    output.seek(0)
279    output_json = json.load(output)
280
281    test_obj = output_json["tests"]["t1"]
282    assert test_obj["artifacts"]["wpt_actual_metadata"] == [
283        "[t1]\n  expected: OK\n",
284        "  [t1_a]\n    expected: FAIL\n",
285        "  [t1_b]\n    expected: PASS\n",
286        "  [t1_c]\n    expected: TIMEOUT\n",
287    ]
288    assert test_obj["artifacts"]["wpt_log"] == [
289        "t1_a: t1_a_message\n",
290        "t1_b: t1_b_message\n",
291        "t1_c: t1_c_message\n",
292    ]
293    # The status of the test in the output is a pass because the subtest
294    # failures were all expected.
295    assert test_obj["actual"] == "PASS"
296    assert test_obj["expected"] == "PASS"
297
298
299def test_unexpected_subtest_pass(capfd):
300    # A subtest that unexpectedly passes is considered a failure condition.
301
302    # Set up the handler.
303    output = StringIO()
304    logger = structuredlog.StructuredLogger("test_a")
305    formatter = ChromiumFormatter()
306    logger.add_handler(handlers.StreamHandler(output, formatter))
307
308    # Run a test with a subtest that is expected to fail but passes.
309    logger.suite_start(["t1"], run_info={}, time=123)
310    logger.test_start("t1")
311    logger.test_status("t1", status="PASS", expected="FAIL", subtest="t1_a",
312                       message="t1_a_message")
313
314    # Since the subtest behaviour is unexpected, it's considered a failure, so
315    # the test should be added to the set of tests with subtest failures.
316    assert "t1" in formatter.tests_with_subtest_fails
317
318    # The test status is reported as a pass here because the harness was able to
319    # run the test to completion.
320    logger.test_end("t1", status="PASS", expected="PASS")
321    logger.suite_end()
322
323    # check nothing got output to stdout/stderr
324    # (note that mozlog outputs exceptions during handling to stderr!)
325    captured = capfd.readouterr()
326    assert captured.out == ""
327    assert captured.err == ""
328
329    # check the actual output of the formatter
330    output.seek(0)
331    output_json = json.load(output)
332
333    test_obj = output_json["tests"]["t1"]
334    t1_artifacts = test_obj["artifacts"]
335    assert t1_artifacts["wpt_actual_metadata"] == [
336        "[t1]\n  expected: PASS\n",
337        "  [t1_a]\n    expected: PASS\n",
338    ]
339    assert t1_artifacts["wpt_log"] == [
340        "t1_a: t1_a_message\n",
341    ]
342    assert t1_artifacts["wpt_subtest_failure"] == ["true"]
343    # Since the subtest status is unexpected, we fail the test. But we report
344    # wpt_actual_status as an artifact
345    assert t1_artifacts["wpt_actual_status"] == ["PASS"]
346    assert test_obj["actual"] == "FAIL"
347    assert test_obj["expected"] == "PASS"
348    # Also ensure that the formatter cleaned up its internal state
349    assert "t1" not in formatter.tests_with_subtest_fails
350
351
352def test_expected_test_fail(capfd):
353    # Check that an expected test-level failure is treated as a Pass
354
355    # Set up the handler.
356    output = StringIO()
357    logger = structuredlog.StructuredLogger("test_a")
358    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
359
360    # Run some tests with different statuses: 3 passes, 1 timeout
361    logger.suite_start(["t1"], run_info={}, time=123)
362    logger.test_start("t1")
363    logger.test_end("t1", status="ERROR", expected="ERROR")
364    logger.suite_end()
365
366    # check nothing got output to stdout/stderr
367    # (note that mozlog outputs exceptions during handling to stderr!)
368    captured = capfd.readouterr()
369    assert captured.out == ""
370    assert captured.err == ""
371
372    # check the actual output of the formatter
373    output.seek(0)
374    output_json = json.load(output)
375
376    test_obj = output_json["tests"]["t1"]
377    # The test's actual and expected status should map from "ERROR" to "FAIL"
378    assert test_obj["actual"] == "FAIL"
379    assert test_obj["expected"] == "FAIL"
380    # ..and this test should not be a regression nor unexpected
381    assert "is_regression" not in test_obj
382    assert "is_unexpected" not in test_obj
383
384
385def test_unexpected_test_fail(capfd):
386    # Check that an unexpected test-level failure is marked as unexpected and
387    # as a regression.
388
389    # Set up the handler.
390    output = StringIO()
391    logger = structuredlog.StructuredLogger("test_a")
392    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
393
394    # Run some tests with different statuses: 3 passes, 1 timeout
395    logger.suite_start(["t1"], run_info={}, time=123)
396    logger.test_start("t1")
397    logger.test_end("t1", status="ERROR", expected="OK")
398    logger.suite_end()
399
400    # check nothing got output to stdout/stderr
401    # (note that mozlog outputs exceptions during handling to stderr!)
402    captured = capfd.readouterr()
403    assert captured.out == ""
404    assert captured.err == ""
405
406    # check the actual output of the formatter
407    output.seek(0)
408    output_json = json.load(output)
409
410    test_obj = output_json["tests"]["t1"]
411    # The test's actual and expected status should be mapped, ERROR->FAIL and
412    # OK->PASS
413    assert test_obj["actual"] == "FAIL"
414    assert test_obj["expected"] == "PASS"
415    # ..and this test should be a regression and unexpected
416    assert test_obj["is_regression"] is True
417    assert test_obj["is_unexpected"] is True
418
419
420def test_flaky_test_expected(capfd):
421    # Check that a flaky test with multiple possible statuses is seen as
422    # expected if its actual status is one of the possible ones.
423
424    # set up the handler.
425    output = StringIO()
426    logger = structuredlog.StructuredLogger("test_a")
427    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
428
429    # Run a test that is known to be flaky
430    logger.suite_start(["t1"], run_info={}, time=123)
431    logger.test_start("t1")
432    logger.test_end("t1", status="ERROR", expected="OK", known_intermittent=["ERROR", "TIMEOUT"])
433    logger.suite_end()
434
435    # check nothing got output to stdout/stderr
436    # (note that mozlog outputs exceptions during handling to stderr!)
437    captured = capfd.readouterr()
438    assert captured.out == ""
439    assert captured.err == ""
440
441    # check the actual output of the formatter
442    output.seek(0)
443    output_json = json.load(output)
444
445    test_obj = output_json["tests"]["t1"]
446    # The test's statuses are all mapped, changing ERROR->FAIL and OK->PASS
447    assert test_obj["actual"] == "FAIL"
448    # All the possible statuses are merged and sorted together into expected.
449    assert test_obj["expected"] == "FAIL PASS TIMEOUT"
450    # ...this is not a regression or unexpected because the actual status is one
451    # of the expected ones
452    assert "is_regression" not in test_obj
453    assert "is_unexpected" not in test_obj
454
455
456def test_flaky_test_unexpected(capfd):
457    # Check that a flaky test with multiple possible statuses is seen as
458    # unexpected if its actual status is NOT one of the possible ones.
459
460    # set up the handler.
461    output = StringIO()
462    logger = structuredlog.StructuredLogger("test_a")
463    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
464
465    # Run a test that is known to be flaky
466    logger.suite_start(["t1"], run_info={}, time=123)
467    logger.test_start("t1")
468    logger.test_end("t1", status="ERROR", expected="OK", known_intermittent=["TIMEOUT"])
469    logger.suite_end()
470
471    # check nothing got output to stdout/stderr
472    # (note that mozlog outputs exceptions during handling to stderr!)
473    captured = capfd.readouterr()
474    assert captured.out == ""
475    assert captured.err == ""
476
477    # check the actual output of the formatter
478    output.seek(0)
479    output_json = json.load(output)
480
481    test_obj = output_json["tests"]["t1"]
482    # The test's statuses are all mapped, changing ERROR->FAIL and OK->PASS
483    assert test_obj["actual"] == "FAIL"
484    # All the possible statuses are merged and sorted together into expected.
485    assert test_obj["expected"] == "PASS TIMEOUT"
486    # ...this is a regression and unexpected because the actual status is not
487    # one of the expected ones
488    assert test_obj["is_regression"] is True
489    assert test_obj["is_unexpected"] is True
490
491
492def test_precondition_failed(capfd):
493    # Check that a failed precondition gets properly handled.
494
495    # set up the handler.
496    output = StringIO()
497    logger = structuredlog.StructuredLogger("test_a")
498    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
499
500    # Run a test with a precondition failure
501    logger.suite_start(["t1"], run_info={}, time=123)
502    logger.test_start("t1")
503    logger.test_end("t1", status="PRECONDITION_FAILED", expected="OK")
504    logger.suite_end()
505
506    # check nothing got output to stdout/stderr
507    # (note that mozlog outputs exceptions during handling to stderr!)
508    captured = capfd.readouterr()
509    assert captured.out == ""
510    assert captured.err == ""
511
512    # check the actual output of the formatter
513    output.seek(0)
514    output_json = json.load(output)
515
516    test_obj = output_json["tests"]["t1"]
517    # The precondition failure should map to FAIL status, but we should also
518    # have an artifact containing the original PRECONDITION_FAILED status.
519    assert test_obj["actual"] == "FAIL"
520    assert test_obj["artifacts"]["wpt_actual_status"] == ["PRECONDITION_FAILED"]
521    # ...this is an unexpected regression because we expected a pass but failed
522    assert test_obj["is_regression"] is True
523    assert test_obj["is_unexpected"] is True
524
525
526def test_known_intermittent_empty(capfd):
527    # If the known_intermittent list is empty, we want to ensure we don't append
528    # any extraneous characters to the output.
529
530    # set up the handler.
531    output = StringIO()
532    logger = structuredlog.StructuredLogger("test_a")
533    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
534
535    # Run a test and include an empty known_intermittent list
536    logger.suite_start(["t1"], run_info={}, time=123)
537    logger.test_start("t1")
538    logger.test_end("t1", status="OK", expected="OK", known_intermittent=[])
539    logger.suite_end()
540
541    # check nothing got output to stdout/stderr
542    # (note that mozlog outputs exceptions during handling to stderr!)
543    captured = capfd.readouterr()
544    assert captured.out == ""
545    assert captured.err == ""
546
547    # check the actual output of the formatter
548    output.seek(0)
549    output_json = json.load(output)
550
551    test_obj = output_json["tests"]["t1"]
552    # Both actual and expected statuses get mapped to Pass. No extra whitespace
553    # anywhere.
554    assert test_obj["actual"] == "PASS"
555    assert test_obj["expected"] == "PASS"
556
557
558def test_known_intermittent_duplicate(capfd):
559    # We don't want to have duplicate statuses in the final "expected" field.
560
561    # Set up the handler.
562    output = StringIO()
563    logger = structuredlog.StructuredLogger("test_a")
564    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
565
566    # There are two duplications in this input:
567    # 1. known_intermittent already contains expected;
568    # 2. both statuses in known_intermittent map to FAIL in Chromium.
569    # In the end, we should only get one FAIL in Chromium "expected".
570    logger.suite_start(["t1"], run_info={}, time=123)
571    logger.test_start("t1")
572    logger.test_end("t1", status="ERROR", expected="ERROR", known_intermittent=["FAIL", "ERROR"])
573    logger.suite_end()
574
575    # Check nothing got output to stdout/stderr.
576    # (Note that mozlog outputs exceptions during handling to stderr!)
577    captured = capfd.readouterr()
578    assert captured.out == ""
579    assert captured.err == ""
580
581    # Check the actual output of the formatter.
582    output.seek(0)
583    output_json = json.load(output)
584
585    test_obj = output_json["tests"]["t1"]
586    assert test_obj["actual"] == "FAIL"
587    # No duplicate "FAIL" in "expected".
588    assert test_obj["expected"] == "FAIL"
589
590
591def test_reftest_screenshots(capfd):
592    # reftest_screenshots, if present, should be plumbed into artifacts.
593
594    # Set up the handler.
595    output = StringIO()
596    logger = structuredlog.StructuredLogger("test_a")
597    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
598
599    # Run a reftest with reftest_screenshots.
600    logger.suite_start(["t1"], run_info={}, time=123)
601    logger.test_start("t1")
602    logger.test_end("t1", status="FAIL", expected="PASS", extra={
603        "reftest_screenshots": [
604            {"url": "foo.html", "hash": "HASH1", "screenshot": "DATA1"},
605            "!=",
606            {"url": "foo-ref.html", "hash": "HASH2", "screenshot": "DATA2"},
607        ]
608    })
609    logger.suite_end()
610
611    # check nothing got output to stdout/stderr
612    # (note that mozlog outputs exceptions during handling to stderr!)
613    captured = capfd.readouterr()
614    assert captured.out == ""
615    assert captured.err == ""
616
617    # check the actual output of the formatter
618    output.seek(0)
619    output_json = json.load(output)
620
621    test_obj = output_json["tests"]["t1"]
622    assert test_obj["artifacts"]["screenshots"] == [
623        "foo.html: DATA1",
624        "foo-ref.html: DATA2",
625    ]
626
627
628def test_process_output_crashing_test(capfd):
629    """Test that chromedriver logs are preserved for crashing tests"""
630
631    # Set up the handler.
632    output = StringIO()
633    logger = structuredlog.StructuredLogger("test_a")
634    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
635
636    logger.suite_start(["t1", "t2", "t3"], run_info={}, time=123)
637
638    logger.test_start("t1")
639    logger.process_output(100, "This message should be recorded", "/some/path/to/chromedriver --some-flag")
640    logger.process_output(101, "This message should not be recorded", "/some/other/process --another-flag")
641    logger.process_output(100, "This message should also be recorded", "/some/path/to/chromedriver --some-flag")
642    logger.test_end("t1", status="CRASH", expected="CRASH")
643
644    logger.test_start("t2")
645    logger.process_output(100, "Another message for the second test", "/some/path/to/chromedriver --some-flag")
646    logger.test_end("t2", status="CRASH", expected="PASS")
647
648    logger.test_start("t3")
649    logger.process_output(100, "This test fails", "/some/path/to/chromedriver --some-flag")
650    logger.process_output(100, "But the output should not be captured", "/some/path/to/chromedriver --some-flag")
651    logger.process_output(100, "Because it does not crash", "/some/path/to/chromedriver --some-flag")
652    logger.test_end("t3", status="FAIL", expected="PASS")
653
654    logger.suite_end()
655
656    # check nothing got output to stdout/stderr
657    # (note that mozlog outputs exceptions during handling to stderr!)
658    captured = capfd.readouterr()
659    assert captured.out == ""
660    assert captured.err == ""
661
662    # check the actual output of the formatter
663    output.seek(0)
664    output_json = json.load(output)
665
666    test_obj = output_json["tests"]["t1"]
667    assert test_obj["artifacts"]["wpt_crash_log"] == [
668        "This message should be recorded",
669        "This message should also be recorded"
670    ]
671
672    test_obj = output_json["tests"]["t2"]
673    assert test_obj["artifacts"]["wpt_crash_log"] == [
674        "Another message for the second test"
675    ]
676
677    test_obj = output_json["tests"]["t3"]
678    assert "wpt_crash_log" not in test_obj["artifacts"]
679