1# This Source Code Form is subject to the terms of the Mozilla Public
2# License, v. 2.0. If a copy of the MPL was not distributed with this
3# file, You can obtain one at http://mozilla.org/MPL/2.0/.
4
5from __future__ import unicode_literals
6
7import json
8import os
9import sys
10
11import environment as env
12import products
13import testloader
14import wptcommandline
15import wptlogging
16import wpttest
17from testrunner import ManagerGroup
18
19here = os.path.split(__file__)[0]
20
21logger = None
22
23"""Runner for web-platform-tests
24
25The runner has several design goals:
26
27* Tests should run with no modification from upstream.
28
29* Tests should be regarded as "untrusted" so that errors, timeouts and even
30  crashes in the tests can be handled without failing the entire test run.
31
32* For performance tests can be run in multiple browsers in parallel.
33
34The upstream repository has the facility for creating a test manifest in JSON
35format. This manifest is used directly to determine which tests exist. Local
36metadata files are used to store the expected test results.
37"""
38
39def setup_logging(*args, **kwargs):
40    global logger
41    logger = wptlogging.setup(*args, **kwargs)
42
43def get_loader(test_paths, product, ssl_env, debug=None, run_info_extras=None, **kwargs):
44    if run_info_extras is None:
45        run_info_extras = {}
46
47    run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=debug,
48                                    extras=run_info_extras)
49
50    test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"]).load()
51
52    manifest_filters = []
53    meta_filters = []
54
55    if kwargs["include"] or kwargs["exclude"] or kwargs["include_manifest"]:
56        manifest_filters.append(testloader.TestFilter(include=kwargs["include"],
57                                                      exclude=kwargs["exclude"],
58                                                      manifest_path=kwargs["include_manifest"],
59                                                      test_manifests=test_manifests))
60    if kwargs["tags"]:
61        meta_filters.append(testloader.TagFilter(tags=kwargs["tags"]))
62
63    test_loader = testloader.TestLoader(test_manifests,
64                                        kwargs["test_types"],
65                                        run_info,
66                                        manifest_filters=manifest_filters,
67                                        meta_filters=meta_filters,
68                                        chunk_type=kwargs["chunk_type"],
69                                        total_chunks=kwargs["total_chunks"],
70                                        chunk_number=kwargs["this_chunk"],
71                                        include_https=ssl_env.ssl_enabled)
72    return run_info, test_loader
73
74def list_test_groups(test_paths, product, **kwargs):
75    env.do_delayed_imports(logger, test_paths)
76
77    ssl_env = env.ssl_env(logger, **kwargs)
78
79    run_info, test_loader = get_loader(test_paths, product, ssl_env,
80                                       **kwargs)
81
82    for item in sorted(test_loader.groups(kwargs["test_types"])):
83        print item
84
85
86def list_disabled(test_paths, product, **kwargs):
87    env.do_delayed_imports(logger, test_paths)
88
89    rv = []
90
91    ssl_env = env.ssl_env(logger, **kwargs)
92
93    run_info, test_loader = get_loader(test_paths, product, ssl_env,
94                                       **kwargs)
95
96    for test_type, tests in test_loader.disabled_tests.iteritems():
97        for test in tests:
98            rv.append({"test": test.id, "reason": test.disabled()})
99    print json.dumps(rv, indent=2)
100
101
102def get_pause_after_test(test_loader, **kwargs):
103    total_tests = sum(len(item) for item in test_loader.tests.itervalues())
104    if kwargs["pause_after_test"] is None:
105        if kwargs["repeat_until_unexpected"]:
106            return False
107        if kwargs["repeat"] == 1 and total_tests == 1:
108            return True
109        return False
110    return kwargs["pause_after_test"]
111
112
113def run_tests(config, test_paths, product, **kwargs):
114    with wptlogging.CaptureIO(logger, not kwargs["no_capture_stdio"]):
115        env.do_delayed_imports(logger, test_paths)
116
117        (check_args,
118         browser_cls, get_browser_kwargs,
119         executor_classes, get_executor_kwargs,
120         env_options, run_info_extras) = products.load_product(config, product)
121
122        ssl_env = env.ssl_env(logger, **kwargs)
123
124        check_args(**kwargs)
125
126        if "test_loader" in kwargs:
127            run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=None,
128                                            extras=run_info_extras(**kwargs))
129            test_loader = kwargs["test_loader"]
130        else:
131            run_info, test_loader = get_loader(test_paths,
132                                               product,
133                                               ssl_env,
134                                               run_info_extras=run_info_extras(**kwargs),
135                                               **kwargs)
136
137        if kwargs["run_by_dir"] is False:
138            test_source_cls = testloader.SingleTestSource
139            test_source_kwargs = {}
140        else:
141            # A value of None indicates infinite depth
142            test_source_cls = testloader.PathGroupedSource
143            test_source_kwargs = {"depth": kwargs["run_by_dir"]}
144
145        logger.info("Using %i client processes" % kwargs["processes"])
146
147        unexpected_total = 0
148
149        kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
150
151        with env.TestEnvironment(test_paths,
152                                 ssl_env,
153                                 kwargs["pause_after_test"],
154                                 kwargs["debug_info"],
155                                 env_options) as test_environment:
156            try:
157                test_environment.ensure_started()
158            except env.TestEnvironmentError as e:
159                logger.critical("Error starting test environment: %s" % e.message)
160                raise
161
162            browser_kwargs = get_browser_kwargs(ssl_env=ssl_env, **kwargs)
163
164            repeat = kwargs["repeat"]
165            repeat_count = 0
166            repeat_until_unexpected = kwargs["repeat_until_unexpected"]
167
168            while repeat_count < repeat or repeat_until_unexpected:
169                repeat_count += 1
170                if repeat_until_unexpected:
171                    logger.info("Repetition %i" % (repeat_count))
172                elif repeat > 1:
173                    logger.info("Repetition %i / %i" % (repeat_count, repeat))
174
175                unexpected_count = 0
176                logger.suite_start(test_loader.test_ids, run_info)
177                for test_type in kwargs["test_types"]:
178                    logger.info("Running %s tests" % test_type)
179
180                    for test in test_loader.disabled_tests[test_type]:
181                        logger.test_start(test.id)
182                        logger.test_end(test.id, status="SKIP")
183
184                    executor_cls = executor_classes.get(test_type)
185                    executor_kwargs = get_executor_kwargs(test_type,
186                                                          test_environment.external_config,
187                                                          test_environment.cache_manager,
188                                                          run_info,
189                                                          **kwargs)
190
191                    if executor_cls is None:
192                        logger.error("Unsupported test type %s for product %s" %
193                                     (test_type, product))
194                        continue
195
196
197                    with ManagerGroup("web-platform-tests",
198                                      kwargs["processes"],
199                                      test_source_cls,
200                                      test_source_kwargs,
201                                      browser_cls,
202                                      browser_kwargs,
203                                      executor_cls,
204                                      executor_kwargs,
205                                      kwargs["pause_after_test"],
206                                      kwargs["pause_on_unexpected"],
207                                      kwargs["debug_info"]) as manager_group:
208                        try:
209                            manager_group.run(test_type, test_loader.tests)
210                        except KeyboardInterrupt:
211                            logger.critical("Main thread got signal")
212                            manager_group.stop()
213                            raise
214                    unexpected_count += manager_group.unexpected_count()
215
216                unexpected_total += unexpected_count
217                logger.info("Got %i unexpected results" % unexpected_count)
218                if repeat_until_unexpected and unexpected_total > 0:
219                    break
220                logger.suite_end()
221
222    return unexpected_total == 0
223
224
225def main():
226    """Main entry point when calling from the command line"""
227    kwargs = wptcommandline.parse_args()
228
229    try:
230        if kwargs["prefs_root"] is None:
231            kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
232
233        setup_logging(kwargs, {"raw": sys.stdout})
234
235        if kwargs["list_test_groups"]:
236            list_test_groups(**kwargs)
237        elif kwargs["list_disabled"]:
238            list_disabled(**kwargs)
239        else:
240            return not run_tests(**kwargs)
241    except Exception:
242        if kwargs["pdb"]:
243            import pdb, traceback
244            print traceback.format_exc()
245            pdb.post_mortem()
246        else:
247            raise
248