1# This Source Code Form is subject to the terms of the Mozilla Public
2# License, v. 2.0. If a copy of the MPL was not distributed with this
3# file, You can obtain one at http://mozilla.org/MPL/2.0/.
4"""
5These transforms construct a task description to run the given test, based on a
6test description.  The implementation here is shared among all test kinds, but
7contains specific support for how we run tests in Gecko (via mozharness,
8invoked in particular ways).
9
10This is a good place to translate a test-description option such as
11`single-core: true` to the implementation of that option in a task description
12(worker options, mozharness commandline, environment variables, etc.)
13
14The test description should be fully formed by the time it reaches these
15transforms, and these transforms should not embody any specific knowledge about
16what should run where. this is the wrong place for special-casing platforms,
17for example - use `all_tests.py` instead.
18"""
19
20
21import logging
22from importlib import import_module
23
24from mozbuild.schedules import INCLUSIVE_COMPONENTS
25from voluptuous import (
26    Any,
27    Optional,
28    Required,
29    Exclusive,
30)
31
32from gecko_taskgraph.optimize.schema import OptimizationSchema
33from gecko_taskgraph.transforms.base import TransformSequence
34from gecko_taskgraph.transforms.test.other import get_mobile_project
35from gecko_taskgraph.util.schema import (
36    optionally_keyed_by,
37    resolve_keyed_by,
38    Schema,
39)
40from gecko_taskgraph.util.chunking import manifest_loaders
41
42
43logger = logging.getLogger(__name__)
44transforms = TransformSequence()
45
46
47# Schema for a test description
48#
49# *****WARNING*****
50#
51# This is a great place for baffling cruft to accumulate, and that makes
52# everyone move more slowly.  Be considerate of your fellow hackers!
53# See the warnings in taskcluster/docs/how-tos.rst
54#
55# *****WARNING*****
56test_description_schema = Schema(
57    {
58        # description of the suite, for the task metadata
59        Required("description"): str,
60        # test suite category and name
61        Optional("suite"): Any(
62            str,
63            {Optional("category"): str, Optional("name"): str},
64        ),
65        # base work directory used to set up the task.
66        Optional("workdir"): optionally_keyed_by("test-platform", Any(str, "default")),
67        # the name by which this test suite is addressed in try syntax; defaults to
68        # the test-name.  This will translate to the `unittest_try_name` or
69        # `talos_try_name` attribute.
70        Optional("try-name"): str,
71        # additional tags to mark up this type of test
72        Optional("tags"): {str: object},
73        # the symbol, or group(symbol), under which this task should appear in
74        # treeherder.
75        Required("treeherder-symbol"): str,
76        # the value to place in task.extra.treeherder.machine.platform; ideally
77        # this is the same as build-platform, and that is the default, but in
78        # practice it's not always a match.
79        Optional("treeherder-machine-platform"): str,
80        # attributes to appear in the resulting task (later transforms will add the
81        # common attributes)
82        Optional("attributes"): {str: object},
83        # relative path (from config.path) to the file task was defined in
84        Optional("job-from"): str,
85        # The `run_on_projects` attribute, defaulting to "all".  This dictates the
86        # projects on which this task should be included in the target task set.
87        # See the attributes documentation for details.
88        #
89        # Note that the special case 'built-projects', the default, uses the parent
90        # build task's run-on-projects, meaning that tests run only on platforms
91        # that are built.
92        Optional("run-on-projects"): optionally_keyed_by(
93            "app",
94            "subtest",
95            "test-platform",
96            "test-name",
97            "variant",
98            Any([str], "built-projects"),
99        ),
100        # When set only run on projects where the build would already be running.
101        # This ensures tasks where this is True won't be the cause of the build
102        # running on a project it otherwise wouldn't have.
103        Optional("built-projects-only"): bool,
104        # the sheriffing tier for this task (default: set based on test platform)
105        Optional("tier"): optionally_keyed_by(
106            "test-platform", "variant", "app", "subtest", Any(int, "default")
107        ),
108        # number of chunks to create for this task.  This can be keyed by test
109        # platform by passing a dictionary in the `by-test-platform` key.  If the
110        # test platform is not found, the key 'default' will be tried.
111        Required("chunks"): optionally_keyed_by("test-platform", Any(int, "dynamic")),
112        # Custom 'test_manifest_loader' to use, overriding the one configured in the
113        # parameters. When 'null', no test chunking will be performed. Can also
114        # be used to disable "manifest scheduling".
115        Optional("test-manifest-loader"): Any(None, *list(manifest_loaders)),
116        # the time (with unit) after which this task is deleted; default depends on
117        # the branch (see below)
118        Optional("expires-after"): str,
119        # The different configurations that should be run against this task, defined
120        # in the TEST_VARIANTS object in the variant.py transforms.
121        Optional("variants"): [str],
122        # Whether to run this task without any variants applied.
123        Required("run-without-variant"): optionally_keyed_by("test-platform", bool),
124        # The EC2 instance size to run these tests on.
125        Required("instance-size"): optionally_keyed_by(
126            "test-platform", Any("default", "large", "xlarge")
127        ),
128        # type of virtualization or hardware required by test.
129        Required("virtualization"): optionally_keyed_by(
130            "test-platform", Any("virtual", "virtual-with-gpu", "hardware")
131        ),
132        # Whether the task requires loopback audio or video (whatever that may mean
133        # on the platform)
134        Required("loopback-audio"): bool,
135        Required("loopback-video"): bool,
136        # Whether the test can run using a software GL implementation on Linux
137        # using the GL compositor. May not be used with "legacy" sized instances
138        # due to poor LLVMPipe performance (bug 1296086).  Defaults to true for
139        # unit tests on linux platforms and false otherwise
140        Optional("allow-software-gl-layers"): bool,
141        # For tasks that will run in docker-worker, this is the
142        # name of the docker image or in-tree docker image to run the task in.  If
143        # in-tree, then a dependency will be created automatically.  This is
144        # generally `desktop-test`, or an image that acts an awful lot like it.
145        Required("docker-image"): optionally_keyed_by(
146            "test-platform",
147            Any(
148                # a raw Docker image path (repo/image:tag)
149                str,
150                # an in-tree generated docker image (from `taskcluster/docker/<name>`)
151                {"in-tree": str},
152                # an indexed docker image
153                {"indexed": str},
154            ),
155        ),
156        # seconds of runtime after which the task will be killed.  Like 'chunks',
157        # this can be keyed by test pltaform.
158        Required("max-run-time"): optionally_keyed_by("test-platform", "subtest", int),
159        # the exit status code that indicates the task should be retried
160        Optional("retry-exit-status"): [int],
161        # Whether to perform a gecko checkout.
162        Required("checkout"): bool,
163        # Wheter to perform a machine reboot after test is done
164        Optional("reboot"): Any(False, "always", "on-exception", "on-failure"),
165        # What to run
166        Required("mozharness"): {
167            # the mozharness script used to run this task
168            Required("script"): optionally_keyed_by("test-platform", str),
169            # the config files required for the task
170            Required("config"): optionally_keyed_by("test-platform", [str]),
171            # mochitest flavor for mochitest runs
172            Optional("mochitest-flavor"): str,
173            # any additional actions to pass to the mozharness command
174            Optional("actions"): [str],
175            # additional command-line options for mozharness, beyond those
176            # automatically added
177            Required("extra-options"): optionally_keyed_by("test-platform", [str]),
178            # the artifact name (including path) to test on the build task; this is
179            # generally set in a per-kind transformation
180            Optional("build-artifact-name"): str,
181            Optional("installer-url"): str,
182            # If not false, tooltool downloads will be enabled via relengAPIProxy
183            # for either just public files, or all files.  Not supported on Windows
184            Required("tooltool-downloads"): Any(
185                False,
186                "public",
187                "internal",
188            ),
189            # Add --blob-upload-branch=<project> mozharness parameter
190            Optional("include-blob-upload-branch"): bool,
191            # The setting for --download-symbols (if omitted, the option will not
192            # be passed to mozharness)
193            Optional("download-symbols"): Any(True, "ondemand"),
194            # If set, then MOZ_NODE_PATH=/usr/local/bin/node is included in the
195            # environment.  This is more than just a helpful path setting -- it
196            # causes xpcshell tests to start additional servers, and runs
197            # additional tests.
198            Required("set-moz-node-path"): bool,
199            # If true, include chunking information in the command even if the number
200            # of chunks is 1
201            Required("chunked"): optionally_keyed_by("test-platform", bool),
202            Required("requires-signed-builds"): optionally_keyed_by(
203                "test-platform", bool
204            ),
205        },
206        # The set of test manifests to run.
207        Optional("test-manifests"): Any(
208            [str],
209            {"active": [str], "skipped": [str]},
210        ),
211        # The current chunk (if chunking is enabled).
212        Optional("this-chunk"): int,
213        # os user groups for test task workers; required scopes, will be
214        # added automatically
215        Optional("os-groups"): optionally_keyed_by("test-platform", [str]),
216        Optional("run-as-administrator"): optionally_keyed_by("test-platform", bool),
217        # -- values supplied by the task-generation infrastructure
218        # the platform of the build this task is testing
219        Required("build-platform"): str,
220        # the label of the build task generating the materials to test
221        Required("build-label"): str,
222        # the label of the signing task generating the materials to test.
223        # Signed builds are used in xpcshell tests on Windows, for instance.
224        Optional("build-signing-label"): str,
225        # the build's attributes
226        Required("build-attributes"): {str: object},
227        # the platform on which the tests will run
228        Required("test-platform"): str,
229        # limit the test-platforms (as defined in test-platforms.yml)
230        # that the test will run on
231        Optional("limit-platforms"): optionally_keyed_by("app", "subtest", [str]),
232        # the name of the test (the key in tests.yml)
233        Required("test-name"): str,
234        # the product name, defaults to firefox
235        Optional("product"): str,
236        # conditional files to determine when these tests should be run
237        Exclusive("when", "optimization"): {
238            Optional("files-changed"): [str],
239        },
240        # Optimization to perform on this task during the optimization phase.
241        # Optimizations are defined in taskcluster/gecko_taskgraph/optimize.py.
242        Exclusive("optimization", "optimization"): OptimizationSchema,
243        # The SCHEDULES component for this task; this defaults to the suite
244        # (not including the flavor) but can be overridden here.
245        Exclusive("schedules-component", "optimization"): Any(
246            str,
247            [str],
248        ),
249        Optional("worker-type"): optionally_keyed_by(
250            "test-platform",
251            Any(str, None),
252        ),
253        Optional(
254            "require-signed-extensions",
255            description="Whether the build being tested requires extensions be signed.",
256        ): optionally_keyed_by("release-type", "test-platform", bool),
257        # The target name, specifying the build artifact to be tested.
258        # If None or not specified, a transform sets the target based on OS:
259        # target.dmg (Mac), target.apk (Android), target.tar.bz2 (Linux),
260        # or target.zip (Windows).
261        Optional("target"): optionally_keyed_by(
262            "app",
263            "test-platform",
264            Any(
265                str,
266                None,
267                {Required("index"): str, Required("name"): str},
268            ),
269        ),
270        # A list of artifacts to install from 'fetch' tasks. Validation deferred
271        # to 'job' transforms.
272        Optional("fetches"): object,
273        # Raptor / browsertime specific keys, defer validation to 'raptor.py'
274        # transform.
275        Optional("raptor"): object,
276        # Raptor / browsertime specific keys that need to be here since 'raptor' schema
277        # is evluated *before* test_description_schema
278        Optional("app"): str,
279        Optional("subtest"): str,
280        # Define if a given task supports artifact builds or not, see bug 1695325.
281        Optional("supports-artifact-builds"): bool,
282    }
283)
284
285
286@transforms.add
287def handle_keyed_by_mozharness(config, tasks):
288    """Resolve a mozharness field if it is keyed by something"""
289    fields = [
290        "mozharness",
291        "mozharness.chunked",
292        "mozharness.config",
293        "mozharness.extra-options",
294        "mozharness.requires-signed-builds",
295        "mozharness.script",
296    ]
297    for task in tasks:
298        for field in fields:
299            resolve_keyed_by(
300                task, field, item_name=task["test-name"], enforce_single_match=False
301            )
302        yield task
303
304
305@transforms.add
306def set_defaults(config, tasks):
307    for task in tasks:
308        build_platform = task["build-platform"]
309        if build_platform.startswith("android"):
310            # all Android test tasks download internal objects from tooltool
311            task["mozharness"]["tooltool-downloads"] = "internal"
312            task["mozharness"]["actions"] = ["get-secrets"]
313
314            # loopback-video is always true for Android, but false for other
315            # platform phyla
316            task["loopback-video"] = True
317        task["mozharness"]["set-moz-node-path"] = True
318
319        # software-gl-layers is only meaningful on linux unittests, where it defaults to True
320        if task["test-platform"].startswith("linux") and task["suite"] not in [
321            "talos",
322            "raptor",
323        ]:
324            task.setdefault("allow-software-gl-layers", True)
325        else:
326            task["allow-software-gl-layers"] = False
327
328        task.setdefault("try-name", task["test-name"])
329        task.setdefault("os-groups", [])
330        task.setdefault("run-as-administrator", False)
331        task.setdefault("chunks", 1)
332        task.setdefault("run-on-projects", "built-projects")
333        task.setdefault("built-projects-only", False)
334        task.setdefault("instance-size", "default")
335        task.setdefault("max-run-time", 3600)
336        task.setdefault("reboot", False)
337        task.setdefault("virtualization", "virtual")
338        task.setdefault("loopback-audio", False)
339        task.setdefault("loopback-video", False)
340        task.setdefault("limit-platforms", [])
341        task.setdefault("docker-image", {"in-tree": "ubuntu1804-test"})
342        task.setdefault("checkout", False)
343        task.setdefault("require-signed-extensions", False)
344        task.setdefault("run-without-variant", True)
345        task.setdefault("variants", [])
346        task.setdefault("supports-artifact-builds", True)
347
348        task["mozharness"].setdefault("extra-options", [])
349        task["mozharness"].setdefault("requires-signed-builds", False)
350        task["mozharness"].setdefault("tooltool-downloads", "public")
351        task["mozharness"].setdefault("set-moz-node-path", False)
352        task["mozharness"].setdefault("chunked", False)
353        yield task
354
355
356transforms.add_validate(test_description_schema)
357
358
359@transforms.add
360def resolve_keys(config, tasks):
361    keys = (
362        "require-signed-extensions",
363        "run-without-variant",
364    )
365    for task in tasks:
366        for key in keys:
367            resolve_keyed_by(
368                task,
369                key,
370                item_name=task["test-name"],
371                enforce_single_match=False,
372                **{
373                    "release-type": config.params["release_type"],
374                },
375            )
376        yield task
377
378
379@transforms.add
380def run_sibling_transforms(config, tasks):
381    """Runs other transform files next to this module."""
382    # List of modules to load transforms from in order.
383    transform_modules = (
384        ("variant", None),
385        ("raptor", lambda t: t["suite"] == "raptor"),
386        ("other", None),
387        ("worker", None),
388        ("fission", None),
389        # These transforms should always run last as there is never any
390        # difference in configuration from one chunk to another (other than
391        # chunk number).
392        ("chunk", None),
393    )
394
395    for task in tasks:
396        xforms = TransformSequence()
397        for name, filterfn in transform_modules:
398            if filterfn and not filterfn(task):
399                continue
400
401            mod = import_module(f"gecko_taskgraph.transforms.test.{name}")
402            xforms.add(mod.transforms)
403
404        yield from xforms(config, [task])
405
406
407@transforms.add
408def make_job_description(config, tasks):
409    """Convert *test* descriptions to *job* descriptions (input to
410    gecko_taskgraph.transforms.job)"""
411
412    for task in tasks:
413        attributes = task.get("attributes", {})
414
415        mobile = get_mobile_project(task)
416        if mobile and (mobile not in task["test-name"]):
417            label = "{}-{}-{}-{}".format(
418                config.kind, task["test-platform"], mobile, task["test-name"]
419            )
420        else:
421            label = "{}-{}-{}".format(
422                config.kind, task["test-platform"], task["test-name"]
423            )
424
425        try_name = task["try-name"]
426        if attributes.get("unittest_variant"):
427            suffix = task.pop("variant-suffix")
428            label += suffix
429            try_name += suffix
430
431        if "1proc" not in attributes.get("unittest_variant", ""):
432            label += "-e10s"
433
434        if task["chunks"] > 1:
435            label += "-{}".format(task["this-chunk"])
436
437        build_label = task["build-label"]
438
439        if task["suite"] == "talos":
440            attr_try_name = "talos_try_name"
441        elif task["suite"] == "raptor":
442            attr_try_name = "raptor_try_name"
443        else:
444            attr_try_name = "unittest_try_name"
445
446        attr_build_platform, attr_build_type = task["build-platform"].split("/", 1)
447        attributes.update(
448            {
449                "build_platform": attr_build_platform,
450                "build_type": attr_build_type,
451                "test_platform": task["test-platform"],
452                "test_chunk": str(task["this-chunk"]),
453                "supports-artifact-builds": task["supports-artifact-builds"],
454                attr_try_name: try_name,
455            }
456        )
457
458        if "test-manifests" in task:
459            attributes["test_manifests"] = task["test-manifests"]
460
461        jobdesc = {}
462        name = "{}-{}".format(task["test-platform"], task["test-name"])
463        jobdesc["name"] = name
464        jobdesc["label"] = label
465        jobdesc["description"] = task["description"]
466        jobdesc["attributes"] = attributes
467        jobdesc["dependencies"] = {"build": build_label}
468        jobdesc["job-from"] = task["job-from"]
469
470        if task.get("fetches"):
471            jobdesc["fetches"] = task["fetches"]
472
473        if task["mozharness"]["requires-signed-builds"] is True:
474            jobdesc["dependencies"]["build-signing"] = task["build-signing-label"]
475
476        if "expires-after" in task:
477            jobdesc["expires-after"] = task["expires-after"]
478
479        jobdesc["routes"] = []
480        jobdesc["run-on-projects"] = sorted(task["run-on-projects"])
481        jobdesc["scopes"] = []
482        jobdesc["tags"] = task.get("tags", {})
483        jobdesc["extra"] = {
484            "chunks": {
485                "current": task["this-chunk"],
486                "total": task["chunks"],
487            },
488            "suite": attributes["unittest_suite"],
489            "test-setting": task.pop("test-setting"),
490        }
491        jobdesc["treeherder"] = {
492            "symbol": task["treeherder-symbol"],
493            "kind": "test",
494            "tier": task["tier"],
495            "platform": task.get("treeherder-machine-platform", task["build-platform"]),
496        }
497
498        schedules = task.get("schedules-component", [])
499        if task.get("when"):
500            # This may still be used by comm-central.
501            jobdesc["when"] = task["when"]
502        elif "optimization" in task:
503            jobdesc["optimization"] = task["optimization"]
504        elif set(schedules) & set(INCLUSIVE_COMPONENTS):
505            jobdesc["optimization"] = {"test-inclusive": schedules}
506        else:
507            jobdesc["optimization"] = {"test": schedules}
508
509        run = jobdesc["run"] = {}
510        run["using"] = "mozharness-test"
511        run["test"] = task
512
513        if "workdir" in task:
514            run["workdir"] = task.pop("workdir")
515
516        jobdesc["worker-type"] = task.pop("worker-type")
517        if task.get("fetches"):
518            jobdesc["fetches"] = task.pop("fetches")
519
520        yield jobdesc
521
522
523def normpath(path):
524    return path.replace("/", "\\")
525
526
527def get_firefox_version():
528    with open("browser/config/version.txt") as f:
529        return f.readline().strip()
530