1# This Source Code Form is subject to the terms of the Mozilla Public
2# License, v. 2.0. If a copy of the MPL was not distributed with this
3# file, You can obtain one at http://mozilla.org/MPL/2.0/.
4"""
5These transformations take a task description and turn it into a TaskCluster
6task definition (along with attributes, label, etc.).  The input to these
7transformations is generic to any kind of task, but abstracts away some of the
8complexities of worker implementations, scopes, and treeherder annotations.
9"""
10
11from __future__ import absolute_import, print_function, unicode_literals
12
13import hashlib
14import os
15import re
16import time
17from copy import deepcopy
18import six
19from six import text_type
20
21import attr
22
23from mozbuild.util import memoize
24from taskgraph.util.attributes import TRUNK_PROJECTS
25from taskgraph.util.hash import hash_path
26from taskgraph.util.treeherder import split_symbol
27from taskgraph.transforms.base import TransformSequence
28from taskgraph.util.keyed_by import evaluate_keyed_by
29from taskgraph.util.schema import (
30    validate_schema,
31    Schema,
32    optionally_keyed_by,
33    resolve_keyed_by,
34    OptimizationSchema,
35    taskref_or_string,
36)
37from taskgraph.util.partners import get_partners_to_be_published
38from taskgraph.util.scriptworker import (
39    BALROG_ACTIONS,
40    get_release_config,
41)
42from taskgraph.util.signed_artifacts import get_signed_artifacts
43from taskgraph.util.workertypes import worker_type_implementation
44from voluptuous import Any, Required, Optional, Extra, Match
45from taskgraph import GECKO, MAX_DEPENDENCIES
46from ..util import docker as dockerutil
47from ..util.workertypes import get_worker_type
48
49RUN_TASK = os.path.join(GECKO, 'taskcluster', 'scripts', 'run-task')
50
51SCCACHE_GCS_PROJECT = 'sccache-3'
52
53
54@memoize
55def _run_task_suffix():
56    """String to append to cache names under control of run-task."""
57    return hash_path(RUN_TASK)[0:20]
58
59
60def _compute_geckoview_version(app_version, moz_build_date):
61    """Geckoview version string that matches geckoview gradle configuration"""
62    # Must be synchronized with /mobile/android/geckoview/build.gradle computeVersionCode(...)
63    version_without_milestone = re.sub(r'a[0-9]', '', app_version, 1)
64    parts = version_without_milestone.split('.')
65    return "%s.%s.%s" % (parts[0], parts[1], moz_build_date)
66
67
68# A task description is a general description of a TaskCluster task
69task_description_schema = Schema({
70    # the label for this task
71    Required('label'): text_type,
72
73    # description of the task (for metadata)
74    Required('description'): text_type,
75
76    # attributes for this task
77    Optional('attributes'): {text_type: object},
78
79    # relative path (from config.path) to the file task was defined in
80    Optional('job-from'): text_type,
81
82    # dependencies of this task, keyed by name; these are passed through
83    # verbatim and subject to the interpretation of the Task's get_dependencies
84    # method.
85    Optional('dependencies'): {text_type: object},
86
87    # Soft dependencies of this task, as a list of tasks labels
88    Optional('soft-dependencies'): [text_type],
89
90    Optional('requires'): Any('all-completed', 'all-resolved'),
91
92    # expiration and deadline times, relative to task creation, with units
93    # (e.g., "14 days").  Defaults are set based on the project.
94    Optional('expires-after'): text_type,
95    Optional('deadline-after'): text_type,
96
97    # custom routes for this task; the default treeherder routes will be added
98    # automatically
99    Optional('routes'): [text_type],
100
101    # custom scopes for this task; any scopes required for the worker will be
102    # added automatically. The following parameters will be substituted in each
103    # scope:
104    #  {level} -- the scm level of this push
105    #  {project} -- the project of this push
106    Optional('scopes'): [text_type],
107
108    # Tags
109    Optional('tags'): {text_type: text_type},
110
111    # custom "task.extra" content
112    Optional('extra'): {text_type: object},
113
114    # treeherder-related information; see
115    # https://firefox-ci-tc.services.mozilla.com/schemas/taskcluster-treeherder/v1/task-treeherder-config.json
116    # If not specified, no treeherder extra information or routes will be
117    # added to the task
118    Optional('treeherder'): {
119        # either a bare symbol, or "grp(sym)".
120        'symbol': text_type,
121
122        # the job kind
123        'kind': Any('build', 'test', 'other'),
124
125        # tier for this task
126        'tier': int,
127
128        # task platform, in the form platform/collection, used to set
129        # treeherder.machine.platform and treeherder.collection or
130        # treeherder.labels
131        'platform': Match('^[A-Za-z0-9_-]{1,50}/[A-Za-z0-9_-]{1,50}$'),
132    },
133
134    # information for indexing this build so its artifacts can be discovered;
135    # if omitted, the build will not be indexed.
136    Optional('index'): {
137        # the name of the product this build produces
138        'product': text_type,
139
140        # the names to use for this job in the TaskCluster index
141        'job-name': text_type,
142
143        # Type of gecko v2 index to use
144        'type': Any('generic', 'l10n',
145                    'shippable', 'shippable-l10n',
146                    'android-shippable', 'android-shippable-with-multi-l10n',
147                    'shippable-with-multi-l10n'),
148
149        # The rank that the task will receive in the TaskCluster
150        # index.  A newly completed task supercedes the currently
151        # indexed task iff it has a higher rank.  If unspecified,
152        # 'by-tier' behavior will be used.
153        'rank': Any(
154            # Rank is equal the timestamp of the build_date for tier-1
155            # tasks, and zero for non-tier-1.  This sorts tier-{2,3}
156            # builds below tier-1 in the index.
157            'by-tier',
158
159            # Rank is given as an integer constant (e.g. zero to make
160            # sure a task is last in the index).
161            int,
162
163            # Rank is equal to the timestamp of the build_date.  This
164            # option can be used to override the 'by-tier' behavior
165            # for non-tier-1 tasks.
166            'build_date',
167        ),
168    },
169
170    # The `run_on_projects` attribute, defaulting to "all".  This dictates the
171    # projects on which this task should be included in the target task set.
172    # See the attributes documentation for details.
173    Optional('run-on-projects'): optionally_keyed_by('build-platform', [text_type]),
174
175    # Like `run_on_projects`, `run-on-hg-branches` defaults to "all".
176    Optional('run-on-hg-branches'): optionally_keyed_by('project', [text_type]),
177
178    # The `shipping_phase` attribute, defaulting to None. This specifies the
179    # release promotion phase that this task belongs to.
180    Required('shipping-phase'): Any(
181        None,
182        'build',
183        'promote',
184        'push',
185        'ship',
186    ),
187
188    # The `shipping_product` attribute, defaulting to None. This specifies the
189    # release promotion product that this task belongs to.
190    Required('shipping-product'): Any(
191        None,
192        text_type
193    ),
194
195    # The `always-target` attribute will cause the task to be included in the
196    # target_task_graph regardless of filtering. Tasks included in this manner
197    # will be candidates for optimization even when `optimize_target_tasks` is
198    # False, unless the task was also explicitly chosen by the target_tasks
199    # method.
200    Required('always-target'): bool,
201
202    # Optimization to perform on this task during the optimization phase.
203    # Optimizations are defined in taskcluster/taskgraph/optimize.py.
204    Required('optimization'): OptimizationSchema,
205
206    # the provisioner-id/worker-type for the task.  The following parameters will
207    # be substituted in this string:
208    #  {level} -- the scm level of this push
209    'worker-type': text_type,
210
211    # Whether the job should use sccache compiler caching.
212    Required('needs-sccache'): bool,
213
214    # Set of artifacts relevant to release tasks
215    Optional('release-artifacts'): [text_type],
216
217    # information specific to the worker implementation that will run this task
218    Optional('worker'): {
219        Required('implementation'): text_type,
220        Extra: object,
221    },
222
223    # Override the default priority for the project
224    Optional('priority'): text_type,
225})
226
227TC_TREEHERDER_SCHEMA_URL = 'https://github.com/taskcluster/taskcluster-treeherder/' \
228                           'blob/master/schemas/task-treeherder-config.yml'
229
230
231UNKNOWN_GROUP_NAME = "Treeherder group {} (from {}) has no name; " \
232                     "add it to taskcluster/ci/config.yml"
233
234V2_ROUTE_TEMPLATES = [
235    "index.{trust-domain}.v2.{project}.latest.{product}.{job-name}",
236    "index.{trust-domain}.v2.{project}.pushdate.{build_date_long}.{product}.{job-name}",
237    "index.{trust-domain}.v2.{project}.pushdate.{build_date}.latest.{product}.{job-name}",
238    "index.{trust-domain}.v2.{project}.pushlog-id.{pushlog_id}.{product}.{job-name}",
239    "index.{trust-domain}.v2.{project}.revision.{branch_rev}.{product}.{job-name}",
240]
241
242# {central, inbound, autoland} write to a "trunk" index prefix. This facilitates
243# walking of tasks with similar configurations.
244V2_TRUNK_ROUTE_TEMPLATES = [
245    "index.{trust-domain}.v2.trunk.revision.{branch_rev}.{product}.{job-name}",
246]
247
248V2_SHIPPABLE_TEMPLATES = [
249    "index.{trust-domain}.v2.{project}.shippable.latest.{product}.{job-name}",
250    "index.{trust-domain}.v2.{project}.shippable.{build_date}.revision.{branch_rev}.{product}.{job-name}",  # noqa - too long
251    "index.{trust-domain}.v2.{project}.shippable.{build_date}.latest.{product}.{job-name}",
252    "index.{trust-domain}.v2.{project}.shippable.revision.{branch_rev}.{product}.{job-name}",
253]
254
255V2_SHIPPABLE_L10N_TEMPLATES = [
256    "index.{trust-domain}.v2.{project}.shippable.latest.{product}-l10n.{job-name}.{locale}",
257    "index.{trust-domain}.v2.{project}.shippable.{build_date}.revision.{branch_rev}.{product}-l10n.{job-name}.{locale}",  # noqa - too long
258    "index.{trust-domain}.v2.{project}.shippable.{build_date}.latest.{product}-l10n.{job-name}.{locale}",  # noqa - too long
259    "index.{trust-domain}.v2.{project}.shippable.revision.{branch_rev}.{product}-l10n.{job-name}.{locale}",  # noqa - too long
260]
261
262V2_L10N_TEMPLATES = [
263    "index.{trust-domain}.v2.{project}.revision.{branch_rev}.{product}-l10n.{job-name}.{locale}",
264    "index.{trust-domain}.v2.{project}.pushdate.{build_date_long}.{product}-l10n.{job-name}.{locale}",  # noqa - too long
265    "index.{trust-domain}.v2.{project}.pushlog-id.{pushlog_id}.{product}-l10n.{job-name}.{locale}",
266    "index.{trust-domain}.v2.{project}.latest.{product}-l10n.{job-name}.{locale}",
267]
268
269# This index is specifically for builds that include geckoview releases,
270# so we can hard-code the project to "geckoview"
271V2_GECKOVIEW_RELEASE = "index.{trust-domain}.v2.{project}.geckoview-version.{geckoview-version}.{product}.{job-name}"  # noqa - too long
272
273# the roots of the treeherder routes
274TREEHERDER_ROUTE_ROOT = 'tc-treeherder'
275
276
277def get_branch_rev(config):
278    return config.params['{}head_rev'.format(
279        config.graph_config['project-repo-param-prefix']
280    )]
281
282
283def get_branch_repo(config):
284    return config.params['{}head_repository'.format(
285        config.graph_config['project-repo-param-prefix'],
286    )]
287
288
289@memoize
290def get_default_priority(graph_config, project):
291    return evaluate_keyed_by(
292        graph_config['task-priority'],
293        "Graph Config",
294        {'project': project}
295    )
296
297
298# define a collection of payload builders, depending on the worker implementation
299payload_builders = {}
300
301
302@attr.s(frozen=True)
303class PayloadBuilder(object):
304    schema = attr.ib(type=Schema)
305    builder = attr.ib()
306
307
308def payload_builder(name, schema):
309    schema = Schema({Required('implementation'): name, Optional('os'): text_type}).extend(schema)
310
311    def wrap(func):
312        payload_builders[name] = PayloadBuilder(schema, func)
313        return func
314    return wrap
315
316
317# define a collection of index builders, depending on the type implementation
318index_builders = {}
319
320
321def index_builder(name):
322    def wrap(func):
323        index_builders[name] = func
324        return func
325    return wrap
326
327
328UNSUPPORTED_INDEX_PRODUCT_ERROR = """\
329The gecko-v2 product {product} is not in the list of configured products in
330`taskcluster/ci/config.yml'.
331"""
332
333
334def verify_index(config, index):
335    product = index['product']
336    if product not in config.graph_config['index']['products']:
337        raise Exception(UNSUPPORTED_INDEX_PRODUCT_ERROR.format(product=product))
338
339
340@payload_builder('docker-worker', schema={
341    Required('os'): 'linux',
342
343    # For tasks that will run in docker-worker, this is the
344    # name of the docker image or in-tree docker image to run the task in.  If
345    # in-tree, then a dependency will be created automatically.  This is
346    # generally `desktop-test`, or an image that acts an awful lot like it.
347    Required('docker-image'): Any(
348        # a raw Docker image path (repo/image:tag)
349        text_type,
350        # an in-tree generated docker image (from `taskcluster/docker/<name>`)
351        {'in-tree': text_type},
352        # an indexed docker image
353        {'indexed': text_type},
354    ),
355
356    # worker features that should be enabled
357    Required('chain-of-trust'): bool,
358    Required('taskcluster-proxy'): bool,
359    Required('allow-ptrace'): bool,
360    Required('loopback-video'): bool,
361    Required('loopback-audio'): bool,
362    Required('docker-in-docker'): bool,  # (aka 'dind')
363    Required('privileged'): bool,
364
365    # Paths to Docker volumes.
366    #
367    # For in-tree Docker images, volumes can be parsed from Dockerfile.
368    # This only works for the Dockerfile itself: if a volume is defined in
369    # a base image, it will need to be declared here. Out-of-tree Docker
370    # images will also require explicit volume annotation.
371    #
372    # Caches are often mounted to the same path as Docker volumes. In this
373    # case, they take precedence over a Docker volume. But a volume still
374    # needs to be declared for the path.
375    Optional('volumes'): [text_type],
376    Optional(
377        "required-volumes",
378        description=(
379            "Paths that are required to be volumes for performance reasons. "
380            "For in-tree images, these paths will be checked to verify that they "
381            "are defined as volumes."
382        ),
383    ): [text_type],
384
385    # caches to set up for the task
386    Optional('caches'): [{
387        # only one type is supported by any of the workers right now
388        'type': 'persistent',
389
390        # name of the cache, allowing re-use by subsequent tasks naming the
391        # same cache
392        'name': text_type,
393
394        # location in the task image where the cache will be mounted
395        'mount-point': text_type,
396
397        # Whether the cache is not used in untrusted environments
398        # (like the Try repo).
399        Optional('skip-untrusted'): bool,
400    }],
401
402    # artifacts to extract from the task image after completion
403    Optional('artifacts'): [{
404        # type of artifact -- simple file, or recursive directory
405        'type': Any('file', 'directory'),
406
407        # task image path from which to read artifact
408        'path': text_type,
409
410        # name of the produced artifact (root of the names for
411        # type=directory)
412        'name': text_type,
413    }],
414
415    # environment variables
416    Required('env'): {text_type: taskref_or_string},
417
418    # the command to run; if not given, docker-worker will default to the
419    # command in the docker image
420    Optional('command'): [taskref_or_string],
421
422    # the maximum time to run, in seconds
423    Required('max-run-time'): int,
424
425    # the exit status code(s) that indicates the task should be retried
426    Optional('retry-exit-status'): [int],
427
428    # the exit status code(s) that indicates the caches used by the task
429    # should be purged
430    Optional('purge-caches-exit-status'): [int],
431
432    # Wether any artifacts are assigned to this worker
433    Optional('skip-artifacts'): bool,
434})
435def build_docker_worker_payload(config, task, task_def):
436    worker = task['worker']
437    level = int(config.params['level'])
438
439    image = worker['docker-image']
440    if isinstance(image, dict):
441        if 'in-tree' in image:
442            name = image['in-tree']
443            docker_image_task = 'build-docker-image-' + image['in-tree']
444            task.setdefault('dependencies', {})['docker-image'] = docker_image_task
445
446            image = {
447                "path": "public/image.tar.zst",
448                "taskId": {"task-reference": "<docker-image>"},
449                "type": "task-image",
450            }
451
452            # Find VOLUME in Dockerfile.
453            volumes = dockerutil.parse_volumes(name)
454            for v in sorted(volumes):
455                if v in worker['volumes']:
456                    raise Exception('volume %s already defined; '
457                                    'if it is defined in a Dockerfile, '
458                                    'it does not need to be specified in the '
459                                    'worker definition' % v)
460
461                worker['volumes'].append(v)
462
463        elif 'indexed' in image:
464            image = {
465                "path": "public/image.tar.zst",
466                "namespace": image['indexed'],
467                "type": "indexed-image",
468            }
469        else:
470            raise Exception("unknown docker image type")
471
472    features = {}
473
474    if worker.get('taskcluster-proxy'):
475        features['taskclusterProxy'] = True
476
477    if worker.get('allow-ptrace'):
478        features['allowPtrace'] = True
479        task_def['scopes'].append('docker-worker:feature:allowPtrace')
480
481    if worker.get('chain-of-trust'):
482        features['chainOfTrust'] = True
483
484    if worker.get('docker-in-docker'):
485        features['dind'] = True
486
487    if task.get('needs-sccache'):
488        features['taskclusterProxy'] = True
489        task_def['scopes'].append(
490            'assume:project:taskcluster:{trust_domain}:level-{level}-sccache-buckets'.format(
491                trust_domain=config.graph_config['trust-domain'],
492                level=config.params['level'])
493        )
494        worker['env']['USE_SCCACHE'] = '1'
495        worker['env']['SCCACHE_GCS_PROJECT'] = SCCACHE_GCS_PROJECT
496        # Disable sccache idle shutdown.
497        worker['env']['SCCACHE_IDLE_TIMEOUT'] = '0'
498    else:
499        worker['env']['SCCACHE_DISABLE'] = '1'
500
501    capabilities = {}
502
503    for lo in 'audio', 'video':
504        if worker.get('loopback-' + lo):
505            capitalized = 'loopback' + lo.capitalize()
506            devices = capabilities.setdefault('devices', {})
507            devices[capitalized] = True
508            task_def['scopes'].append('docker-worker:capability:device:' + capitalized)
509
510    if worker.get('privileged'):
511        capabilities['privileged'] = True
512        task_def['scopes'].append('docker-worker:capability:privileged')
513
514    task_def['payload'] = payload = {
515        'image': image,
516        'env': worker['env'],
517    }
518    if 'command' in worker:
519        payload['command'] = worker['command']
520
521    if 'max-run-time' in worker:
522        payload['maxRunTime'] = worker['max-run-time']
523
524    run_task = payload.get('command', [''])[0].endswith('run-task')
525
526    # run-task exits EXIT_PURGE_CACHES if there is a problem with caches.
527    # Automatically retry the tasks and purge caches if we see this exit
528    # code.
529    # TODO move this closer to code adding run-task once bug 1469697 is
530    # addressed.
531    if run_task:
532        worker.setdefault('retry-exit-status', []).append(72)
533        worker.setdefault('purge-caches-exit-status', []).append(72)
534
535    payload['onExitStatus'] = {}
536    if 'retry-exit-status' in worker:
537        payload['onExitStatus']['retry'] = worker['retry-exit-status']
538    if 'purge-caches-exit-status' in worker:
539        payload['onExitStatus']['purgeCaches'] = worker['purge-caches-exit-status']
540
541    if 'artifacts' in worker:
542        artifacts = {}
543        for artifact in worker['artifacts']:
544            artifacts[artifact['name']] = {
545                'path': artifact['path'],
546                'type': artifact['type'],
547                'expires': task_def['expires'],  # always expire with the task
548            }
549        payload['artifacts'] = artifacts
550
551    if isinstance(worker.get('docker-image'), text_type):
552        out_of_tree_image = worker['docker-image']
553        run_task = run_task or out_of_tree_image.startswith(
554            'taskcluster/image_builder')
555    else:
556        out_of_tree_image = None
557        image = worker.get('docker-image', {}).get('in-tree')
558        run_task = run_task or image == 'image_builder'
559
560    if 'caches' in worker:
561        caches = {}
562
563        # run-task knows how to validate caches.
564        #
565        # To help ensure new run-task features and bug fixes don't interfere
566        # with existing caches, we seed the hash of run-task into cache names.
567        # So, any time run-task changes, we should get a fresh set of caches.
568        # This means run-task can make changes to cache interaction at any time
569        # without regards for backwards or future compatibility.
570        #
571        # But this mechanism only works for in-tree Docker images that are built
572        # with the current run-task! For out-of-tree Docker images, we have no
573        # way of knowing their content of run-task. So, in addition to varying
574        # cache names by the contents of run-task, we also take the Docker image
575        # name into consideration. This means that different Docker images will
576        # never share the same cache. This is a bit unfortunate. But it is the
577        # safest thing to do. Fortunately, most images are defined in-tree.
578        #
579        # For out-of-tree Docker images, we don't strictly need to incorporate
580        # the run-task content into the cache name. However, doing so preserves
581        # the mechanism whereby changing run-task results in new caches
582        # everywhere.
583
584        # As an additional mechanism to force the use of different caches, the
585        # string literal in the variable below can be changed. This is
586        # preferred to changing run-task because it doesn't require images
587        # to be rebuilt.
588        cache_version = 'v3'
589
590        if run_task:
591            suffix = '{}-{}'.format(cache_version, _run_task_suffix())
592
593            if out_of_tree_image:
594                name_hash = hashlib.sha256(
595                    six.ensure_binary(out_of_tree_image)).hexdigest()
596                suffix += name_hash[0:12]
597
598        else:
599            suffix = cache_version
600
601        skip_untrusted = config.params.is_try() or level == 1
602
603        for cache in worker['caches']:
604            # Some caches aren't enabled in environments where we can't
605            # guarantee certain behavior. Filter those out.
606            if cache.get('skip-untrusted') and skip_untrusted:
607                continue
608
609            name = '{trust_domain}-level-{level}-{name}-{suffix}'.format(
610                trust_domain=config.graph_config['trust-domain'],
611                level=config.params['level'],
612                name=cache['name'],
613                suffix=suffix,
614            )
615
616            caches[name] = cache['mount-point']
617            task_def['scopes'].append('docker-worker:cache:%s' % name)
618
619        # Assertion: only run-task is interested in this.
620        if run_task:
621            payload['env']['TASKCLUSTER_CACHES'] = ';'.join(sorted(
622                caches.values()))
623
624        payload['cache'] = caches
625
626    # And send down volumes information to run-task as well.
627    if run_task and worker.get('volumes'):
628        payload['env']['TASKCLUSTER_VOLUMES'] = ';'.join(
629            [six.ensure_text(s) for s in sorted(worker['volumes'])])
630
631    if payload.get('cache') and skip_untrusted:
632        payload['env']['TASKCLUSTER_UNTRUSTED_CACHES'] = '1'
633
634    if features:
635        payload['features'] = features
636    if capabilities:
637        payload['capabilities'] = capabilities
638
639    check_caches_are_volumes(task)
640    check_required_volumes(task)
641
642
643@payload_builder('generic-worker', schema={
644    Required('os'): Any('windows', 'macosx', 'linux', 'linux-bitbar'),
645    # see http://schemas.taskcluster.net/generic-worker/v1/payload.json
646    # and https://docs.taskcluster.net/reference/workers/generic-worker/payload
647
648    # command is a list of commands to run, sequentially
649    # on Windows, each command is a string, on OS X and Linux, each command is
650    # a string array
651    Required('command'): Any(
652        [taskref_or_string],   # Windows
653        [[taskref_or_string]]  # Linux / OS X
654    ),
655
656    # artifacts to extract from the task image after completion; note that artifacts
657    # for the generic worker cannot have names
658    Optional('artifacts'): [{
659        # type of artifact -- simple file, or recursive directory
660        'type': Any('file', 'directory'),
661
662        # filesystem path from which to read artifact
663        'path': text_type,
664
665        # if not specified, path is used for artifact name
666        Optional('name'): text_type
667    }],
668
669    # Directories and/or files to be mounted.
670    # The actual allowed combinations are stricter than the model below,
671    # but this provides a simple starting point.
672    # See https://docs.taskcluster.net/reference/workers/generic-worker/payload
673    Optional('mounts'): [{
674        # A unique name for the cache volume, implies writable cache directory
675        # (otherwise mount is a read-only file or directory).
676        Optional('cache-name'): text_type,
677        # Optional content for pre-loading cache, or mandatory content for
678        # read-only file or directory. Pre-loaded content can come from either
679        # a task artifact or from a URL.
680        Optional('content'): {
681
682            # *** Either (artifact and task-id) or url must be specified. ***
683
684            # Artifact name that contains the content.
685            Optional('artifact'): text_type,
686            # Task ID that has the artifact that contains the content.
687            Optional('task-id'): taskref_or_string,
688            # URL that supplies the content in response to an unauthenticated
689            # GET request.
690            Optional('url'): text_type
691        },
692
693        # *** Either file or directory must be specified. ***
694
695        # If mounting a cache or read-only directory, the filesystem location of
696        # the directory should be specified as a relative path to the task
697        # directory here.
698        Optional('directory'): text_type,
699        # If mounting a file, specify the relative path within the task
700        # directory to mount the file (the file will be read only).
701        Optional('file'): text_type,
702        # Required if and only if `content` is specified and mounting a
703        # directory (not a file). This should be the archive format of the
704        # content (either pre-loaded cache or read-only directory).
705        Optional('format'): Any('rar', 'tar.bz2', 'tar.gz', 'zip')
706    }],
707
708    # environment variables
709    Required('env'): {text_type: taskref_or_string},
710
711    # the maximum time to run, in seconds
712    Required('max-run-time'): int,
713
714    # os user groups for test task workers
715    Optional('os-groups'): [text_type],
716
717    # feature for test task to run as administarotr
718    Optional('run-as-administrator'): bool,
719
720    # optional features
721    Required('chain-of-trust'): bool,
722    Optional('taskcluster-proxy'): bool,
723
724    # the exit status code(s) that indicates the task should be retried
725    Optional('retry-exit-status'): [int],
726
727    # Wether any artifacts are assigned to this worker
728    Optional('skip-artifacts'): bool,
729})
730def build_generic_worker_payload(config, task, task_def):
731    worker = task['worker']
732    features = {}
733
734    task_def['payload'] = {
735        'command': worker['command'],
736        'maxRunTime': worker['max-run-time'],
737    }
738
739    if worker['os'] == 'windows':
740        task_def['payload']['onExitStatus'] = {
741            'retry': [
742                # These codes (on windows) indicate a process interruption,
743                # rather than a task run failure. See bug 1544403.
744                1073807364,  # process force-killed due to system shutdown
745                3221225786,  # sigint (any interrupt)
746            ]
747        }
748    if 'retry-exit-status' in worker:
749        task_def['payload'].setdefault(
750            'onExitStatus', {}).setdefault('retry', []).extend(worker['retry-exit-status'])
751    if worker['os'] == 'linux-bitbar':
752        task_def['payload'].setdefault('onExitStatus', {}).setdefault('retry', [])
753        # exit code 4 is used to indicate an intermittent android device error
754        if 4 not in task_def['payload']['onExitStatus']['retry']:
755            task_def['payload']['onExitStatus']['retry'].extend([4])
756
757    env = worker.get('env', {})
758
759    if task.get('needs-sccache'):
760        features['taskclusterProxy'] = True
761        task_def['scopes'].append(
762            'assume:project:taskcluster:{trust_domain}:level-{level}-sccache-buckets'.format(
763                trust_domain=config.graph_config['trust-domain'],
764                level=config.params['level'])
765        )
766        env['USE_SCCACHE'] = '1'
767        worker['env']['SCCACHE_GCS_PROJECT'] = SCCACHE_GCS_PROJECT
768        # Disable sccache idle shutdown.
769        env['SCCACHE_IDLE_TIMEOUT'] = '0'
770    else:
771        env['SCCACHE_DISABLE'] = '1'
772
773    if env:
774        task_def['payload']['env'] = env
775
776    artifacts = []
777
778    for artifact in worker.get('artifacts', []):
779        a = {
780            'path': artifact['path'],
781            'type': artifact['type'],
782        }
783        if 'name' in artifact:
784            a['name'] = artifact['name']
785        artifacts.append(a)
786
787    if artifacts:
788        task_def['payload']['artifacts'] = artifacts
789
790    # Need to copy over mounts, but rename keys to respect naming convention
791    #   * 'cache-name' -> 'cacheName'
792    #   * 'task-id'    -> 'taskId'
793    # All other key names are already suitable, and don't need renaming.
794    mounts = deepcopy(worker.get('mounts', []))
795    for mount in mounts:
796        if 'cache-name' in mount:
797            mount['cacheName'] = '{trust_domain}-level-{level}-{name}'.format(
798                trust_domain=config.graph_config['trust-domain'],
799                level=config.params['level'],
800                name=mount.pop('cache-name'),
801            )
802            task_def['scopes'].append('generic-worker:cache:{}'.format(mount['cacheName']))
803        if 'content' in mount:
804            if 'task-id' in mount['content']:
805                mount['content']['taskId'] = mount['content'].pop('task-id')
806            if 'artifact' in mount['content']:
807                if not mount['content']['artifact'].startswith('public/'):
808                    task_def['scopes'].append(
809                        'queue:get-artifact:{}'.format(mount['content']['artifact']))
810
811    if mounts:
812        task_def['payload']['mounts'] = mounts
813
814    if worker.get('os-groups'):
815        task_def['payload']['osGroups'] = worker['os-groups']
816        task_def['scopes'].extend(
817            ['generic-worker:os-group:{}/{}'.format(
818                task['worker-type'],
819                group
820            ) for group in worker['os-groups']])
821
822    if worker.get('chain-of-trust'):
823        features['chainOfTrust'] = True
824
825    if worker.get('taskcluster-proxy'):
826        features['taskclusterProxy'] = True
827
828    if worker.get('run-as-administrator', False):
829        features['runAsAdministrator'] = True
830        task_def['scopes'].append(
831            'generic-worker:run-as-administrator:{}'.format(task['worker-type']),
832        )
833
834    if features:
835        task_def['payload']['features'] = features
836
837
838@payload_builder('scriptworker-signing', schema={
839    # the maximum time to run, in seconds
840    Required('max-run-time'): int,
841
842    # list of artifact URLs for the artifacts that should be signed
843    Required('upstream-artifacts'): [{
844        # taskId of the task with the artifact
845        Required('taskId'): taskref_or_string,
846
847        # type of signing task (for CoT)
848        Required('taskType'): text_type,
849
850        # Paths to the artifacts to sign
851        Required('paths'): [text_type],
852
853        # Signing formats to use on each of the paths
854        Required('formats'): [text_type],
855    }],
856
857    # behavior for mac iscript
858    Optional('mac-behavior'): Any(
859        "mac_notarize_part_1", "mac_notarize_part_3", "mac_sign_and_pkg",
860        "mac_geckodriver",
861    ),
862    Optional('entitlements-url'): text_type,
863})
864def build_scriptworker_signing_payload(config, task, task_def):
865    worker = task['worker']
866
867    task_def['payload'] = {
868        'maxRunTime': worker['max-run-time'],
869        'upstreamArtifacts':  worker['upstream-artifacts']
870    }
871    if worker.get('mac-behavior'):
872        task_def['payload']['behavior'] = worker['mac-behavior']
873        if worker.get('entitlements-url'):
874            task_def['payload']['entitlements-url'] = worker['entitlements-url']
875    artifacts = set(task.get('release-artifacts', []))
876    for upstream_artifact in worker['upstream-artifacts']:
877        for path in upstream_artifact['paths']:
878            artifacts.update(get_signed_artifacts(
879                input=path,
880                formats=upstream_artifact['formats'],
881                behavior=worker.get('mac-behavior'),
882            ))
883    task['release-artifacts'] = list(artifacts)
884
885
886@payload_builder('notarization-poller', schema={
887    Required('uuid-manifest'): taskref_or_string,
888})
889def notarization_poller_payload(config, task, task_def):
890    worker = task['worker']
891    task_def['payload'] = {
892        'uuid_manifest':  worker['uuid-manifest']
893    }
894
895
896@payload_builder('beetmover', schema={
897    # the maximum time to run, in seconds
898    Required('max-run-time', default=600): int,
899
900    # locale key, if this is a locale beetmover job
901    Optional('locale'): text_type,
902
903    Optional('partner-public'): bool,
904
905    Required('release-properties'): {
906        'app-name': text_type,
907        'app-version': text_type,
908        'branch': text_type,
909        'build-id': text_type,
910        'hash-type': text_type,
911        'platform': text_type,
912    },
913
914    # list of artifact URLs for the artifacts that should be beetmoved
915    Required('upstream-artifacts'): [{
916        # taskId of the task with the artifact
917        Required('taskId'): taskref_or_string,
918
919        # type of signing task (for CoT)
920        Required('taskType'): text_type,
921
922        # Paths to the artifacts to sign
923        Required('paths'): [text_type],
924
925        # locale is used to map upload path and allow for duplicate simple names
926        Required('locale'): text_type,
927    }],
928    Optional('artifact-map'): object,
929})
930def build_beetmover_payload(config, task, task_def):
931    worker = task['worker']
932    release_config = get_release_config(config)
933    release_properties = worker['release-properties']
934
935    task_def['payload'] = {
936        'maxRunTime': worker['max-run-time'],
937        'releaseProperties': {
938            'appName': release_properties['app-name'],
939            'appVersion': release_properties['app-version'],
940            'branch': release_properties['branch'],
941            'buildid': release_properties['build-id'],
942            'hashType': release_properties['hash-type'],
943            'platform': release_properties['platform'],
944        },
945        'upload_date': config.params['build_date'],
946        'upstreamArtifacts':  worker['upstream-artifacts'],
947    }
948    if worker.get('locale'):
949        task_def['payload']['locale'] = worker['locale']
950    if worker.get('artifact-map'):
951        task_def['payload']['artifactMap'] = worker['artifact-map']
952    if worker.get('partner-public'):
953        task_def['payload']['is_partner_repack_public'] = worker['partner-public']
954    if release_config:
955        task_def['payload'].update(release_config)
956
957
958@payload_builder('beetmover-push-to-release', schema={
959    # the maximum time to run, in seconds
960    Required('max-run-time'): int,
961    Required('product'): text_type,
962})
963def build_beetmover_push_to_release_payload(config, task, task_def):
964    worker = task['worker']
965    release_config = get_release_config(config)
966    partners = ['{}/{}'.format(p, s) for p, s, _ in get_partners_to_be_published(config)]
967
968    task_def['payload'] = {
969        'maxRunTime': worker['max-run-time'],
970        'product': worker['product'],
971        'version': release_config['version'],
972        'build_number': release_config['build_number'],
973        'partners': partners,
974    }
975
976
977@payload_builder('beetmover-maven', schema={
978    Required('max-run-time', default=600): int,
979    Required('release-properties'): {
980        'app-name': text_type,
981        'app-version': text_type,
982        'branch': text_type,
983        'build-id': text_type,
984        'artifact-id': text_type,
985        'hash-type': text_type,
986        'platform': text_type,
987    },
988
989    Required('upstream-artifacts'): [{
990        Required('taskId'): taskref_or_string,
991        Required('taskType'): text_type,
992        Required('paths'): [text_type],
993        Required('zipExtract', default=False): bool,
994    }],
995    Optional('artifact-map'): object,
996})
997def build_beetmover_maven_payload(config, task, task_def):
998    build_beetmover_payload(config, task, task_def)
999
1000    task_def['payload']['artifact_id'] = task['worker']['release-properties']['artifact-id']
1001    if task['worker'].get('artifact-map'):
1002        task_def['payload']['artifactMap'] = task['worker']['artifact-map']
1003
1004    del task_def['payload']['releaseProperties']['hashType']
1005    del task_def['payload']['releaseProperties']['platform']
1006
1007
1008@payload_builder('balrog', schema={
1009    Required('balrog-action'): Any(*BALROG_ACTIONS),
1010    Optional('product'): text_type,
1011    Optional('platforms'): [text_type],
1012    Optional('release-eta'): text_type,
1013    Optional('channel-names'): optionally_keyed_by('release-type', [text_type]),
1014    Optional('require-mirrors'): bool,
1015    Optional('publish-rules'): optionally_keyed_by('release-type', 'release-level', [int]),
1016    Optional('rules-to-update'): optionally_keyed_by(
1017        'release-type', 'release-level', [text_type]),
1018    Optional('archive-domain'): optionally_keyed_by('release-level', text_type),
1019    Optional('download-domain'): optionally_keyed_by('release-level', text_type),
1020    Optional('blob-suffix'): text_type,
1021    Optional('complete-mar-filename-pattern'): text_type,
1022    Optional('complete-mar-bouncer-product-pattern'): text_type,
1023    Optional('update-line'): object,
1024    Optional('suffixes'): [text_type],
1025    Optional('background-rate'): optionally_keyed_by(
1026        'release-type', 'beta-number', Any(int, None)),
1027    Optional('force-fallback-mapping-update'): optionally_keyed_by(
1028        'release-type', 'beta-number', bool),
1029
1030
1031    # list of artifact URLs for the artifacts that should be beetmoved
1032    Optional('upstream-artifacts'): [{
1033        # taskId of the task with the artifact
1034        Required('taskId'): taskref_or_string,
1035
1036        # type of signing task (for CoT)
1037        Required('taskType'): text_type,
1038
1039        # Paths to the artifacts to sign
1040        Required('paths'): [text_type],
1041    }],
1042})
1043def build_balrog_payload(config, task, task_def):
1044    worker = task['worker']
1045    release_config = get_release_config(config)
1046    beta_number = None
1047    if 'b' in release_config['version']:
1048        beta_number = release_config['version'].split('b')[-1]
1049
1050    if worker['balrog-action'] == 'submit-locale' or worker['balrog-action'] == 'v2-submit-locale':
1051        task_def['payload'] = {
1052            'upstreamArtifacts':  worker['upstream-artifacts'],
1053            'suffixes': worker['suffixes'],
1054        }
1055    else:
1056        for prop in ('archive-domain', 'channel-names', 'download-domain',
1057                     'publish-rules', 'rules-to-update', 'background-rate',
1058                     'force-fallback-mapping-update'):
1059            if prop in worker:
1060                resolve_keyed_by(
1061                    worker, prop, task['description'],
1062                    **{
1063                        'release-type': config.params['release_type'],
1064                        'release-level': config.params.release_level(),
1065                        'beta-number': beta_number,
1066                    }
1067                )
1068        task_def['payload'] = {
1069            'build_number': release_config['build_number'],
1070            'product': worker['product'],
1071            'version': release_config['version'],
1072        }
1073        for prop in ('blob-suffix', 'complete-mar-filename-pattern',
1074                     'complete-mar-bouncer-product-pattern'):
1075            if prop in worker:
1076                task_def['payload'][prop.replace('-', '_')] = worker[prop]
1077        if worker['balrog-action'] == 'submit-toplevel' or \
1078                worker['balrog-action'] == 'v2-submit-toplevel':
1079            task_def['payload'].update({
1080                'app_version': release_config['appVersion'],
1081                'archive_domain': worker['archive-domain'],
1082                'channel_names': worker['channel-names'],
1083                'download_domain': worker['download-domain'],
1084                'partial_versions': release_config.get('partial_versions', ""),
1085                'platforms': worker['platforms'],
1086                'rules_to_update': worker['rules-to-update'],
1087                'require_mirrors': worker['require-mirrors'],
1088                'update_line': worker['update-line'],
1089            })
1090        else:  # schedule / ship
1091            task_def['payload'].update({
1092                'publish_rules': worker['publish-rules'],
1093                'release_eta': worker.get('release-eta', config.params.get('release_eta')) or '',
1094            })
1095            if worker.get('force-fallback-mapping-update'):
1096                task_def['payload']['force_fallback_mapping_update'] = \
1097                    worker['force-fallback-mapping-update']
1098            if worker.get('background-rate'):
1099                task_def['payload']['background_rate'] = worker['background-rate']
1100
1101
1102@payload_builder('bouncer-aliases', schema={
1103    Required('entries'): object,
1104})
1105def build_bouncer_aliases_payload(config, task, task_def):
1106    worker = task['worker']
1107
1108    task_def['payload'] = {
1109        'aliases_entries': worker['entries']
1110    }
1111
1112
1113@payload_builder('bouncer-locations', schema={
1114    Required('implementation'): 'bouncer-locations',
1115    Required('bouncer-products'): [text_type],
1116})
1117def build_bouncer_locations_payload(config, task, task_def):
1118    worker = task['worker']
1119    release_config = get_release_config(config)
1120
1121    task_def['payload'] = {
1122        'bouncer_products': worker['bouncer-products'],
1123        'version': release_config['version'],
1124        'product': task['shipping-product'],
1125    }
1126
1127
1128@payload_builder('bouncer-submission', schema={
1129    Required('locales'): [text_type],
1130    Required('entries'): object,
1131})
1132def build_bouncer_submission_payload(config, task, task_def):
1133    worker = task['worker']
1134
1135    task_def['payload'] = {
1136        'locales':  worker['locales'],
1137        'submission_entries': worker['entries']
1138    }
1139
1140
1141@payload_builder('push-snap', schema={
1142    Required('channel'): text_type,
1143    Required('upstream-artifacts'): [{
1144        Required('taskId'): taskref_or_string,
1145        Required('taskType'): text_type,
1146        Required('paths'): [text_type],
1147    }],
1148})
1149def build_push_snap_payload(config, task, task_def):
1150    worker = task['worker']
1151
1152    task_def['payload'] = {
1153        'channel': worker['channel'],
1154        'upstreamArtifacts':  worker['upstream-artifacts'],
1155    }
1156
1157
1158@payload_builder('push-flatpak', schema={
1159    Required('channel'): text_type,
1160    Required('upstream-artifacts'): [{
1161        Required('taskId'): taskref_or_string,
1162        Required('taskType'): text_type,
1163        Required('paths'): [text_type],
1164    }],
1165})
1166def build_push_flatpak_payload(config, task, task_def):
1167    worker = task['worker']
1168
1169    task_def['payload'] = {
1170        'channel': worker['channel'],
1171        'upstreamArtifacts':  worker['upstream-artifacts'],
1172    }
1173
1174
1175@payload_builder('shipit-shipped', schema={
1176    Required('release-name'): text_type,
1177})
1178def build_ship_it_shipped_payload(config, task, task_def):
1179    worker = task['worker']
1180
1181    task_def['payload'] = {
1182        'release_name': worker['release-name']
1183    }
1184
1185
1186@payload_builder('shipit-maybe-release', schema={
1187    Required('phase'): text_type,
1188})
1189def build_ship_it_maybe_release_payload(config, task, task_def):
1190    # expect branch name, including path
1191    branch = config.params['head_repository'][len('https://hg.mozilla.org/'):]
1192    # 'version' is e.g. '71.0b13' (app_version doesn't have beta number)
1193    version = config.params['version']
1194
1195    task_def['payload'] = {
1196        'product': task['shipping-product'],
1197        'branch': branch,
1198        'phase': task['worker']['phase'],
1199        'version': version,
1200        'cron_revision': config.params['head_rev'],
1201    }
1202
1203
1204@payload_builder('push-addons', schema={
1205    Required('channel'): Any('listed', 'unlisted'),
1206    Required('upstream-artifacts'): [{
1207        Required('taskId'): taskref_or_string,
1208        Required('taskType'): text_type,
1209        Required('paths'): [text_type],
1210    }],
1211})
1212def build_push_addons_payload(config, task, task_def):
1213    worker = task['worker']
1214
1215    task_def['payload'] = {
1216        'channel': worker['channel'],
1217        'upstreamArtifacts': worker['upstream-artifacts'],
1218    }
1219
1220
1221@payload_builder('treescript', schema={
1222    Required('tags'): [Any('buildN', 'release', None)],
1223    Required('bump'): bool,
1224    Optional('bump-files'): [text_type],
1225    Optional('repo-param-prefix'): text_type,
1226    Optional('dontbuild'): bool,
1227    Optional('ignore-closed-tree'): bool,
1228    Required('force-dry-run', default=True): bool,
1229    Required('push', default=False): bool,
1230    Optional('source-repo'): text_type,
1231    Optional('ssh-user'): text_type,
1232    Optional('l10n-bump-info'): {
1233        Required('name'): text_type,
1234        Required('path'): text_type,
1235        Required('version-path'): text_type,
1236        Optional('revision-url'): text_type,
1237        Optional('ignore-config'): object,
1238        Required('platform-configs'): [{
1239            Required('platforms'): [text_type],
1240            Required('path'): text_type,
1241            Optional('format'): text_type,
1242        }],
1243    },
1244    Optional('merge-info'): object,
1245})
1246def build_treescript_payload(config, task, task_def):
1247    worker = task['worker']
1248    release_config = get_release_config(config)
1249
1250    task_def['payload'] = {'actions': []}
1251    actions = task_def['payload']['actions']
1252    if worker['tags']:
1253        tag_names = []
1254        product = task['shipping-product'].upper()
1255        version = release_config['version'].replace('.', '_')
1256        buildnum = release_config['build_number']
1257        if 'buildN' in worker['tags']:
1258            tag_names.extend([
1259                "{}_{}_BUILD{}".format(product, version, buildnum),
1260            ])
1261        if 'release' in worker['tags']:
1262            tag_names.extend([
1263              "{}_{}_RELEASE".format(product, version)
1264            ])
1265        tag_info = {
1266            'tags': tag_names,
1267            'revision': config.params['{}head_rev'.format(worker.get('repo-param-prefix', ''))],
1268        }
1269        task_def['payload']['tag_info'] = tag_info
1270        actions.append('tag')
1271
1272    if worker['bump']:
1273        if not worker['bump-files']:
1274            raise Exception("Version Bump requested without bump-files")
1275
1276        bump_info = {}
1277        bump_info['next_version'] = release_config['next_version']
1278        bump_info['files'] = worker['bump-files']
1279        task_def['payload']['version_bump_info'] = bump_info
1280        actions.append('version_bump')
1281
1282    if worker.get('l10n-bump-info'):
1283        l10n_bump_info = {}
1284        for k, v in worker['l10n-bump-info'].items():
1285            l10n_bump_info[k.replace('-', '_')] = worker['l10n-bump-info'][k]
1286        task_def['payload']['l10n_bump_info'] = [l10n_bump_info]
1287        actions.append('l10n_bump')
1288
1289    if worker.get("merge-info"):
1290        merge_info = {
1291            merge_param_name.replace("-", "_"): merge_param_value
1292            for merge_param_name, merge_param_value in worker["merge-info"].items()
1293            if merge_param_name != "version-files"
1294        }
1295        merge_info["version_files"] = [
1296            {
1297                file_param_name.replace("-", "_"): file_param_value
1298                for file_param_name, file_param_value in file_entry.items()
1299            }
1300            for file_entry in worker["merge-info"]["version-files"]
1301        ]
1302        task_def["payload"]["merge_info"] = merge_info
1303        actions.append("merge_day")
1304
1305    if worker['push']:
1306        actions.append('push')
1307
1308    if worker.get('force-dry-run'):
1309        task_def['payload']['dry_run'] = True
1310
1311    if worker.get('dontbuild'):
1312        task_def['payload']['dontbuild'] = True
1313
1314    if worker.get('ignore-closed-tree') is not None:
1315        task_def['payload']['ignore_closed_tree'] = worker['ignore-closed-tree']
1316
1317    if worker.get('source-repo'):
1318        task_def['payload']['source_repo'] = worker['source-repo']
1319
1320    if worker.get('ssh-user'):
1321        task_def['payload']['ssh_user'] = worker['ssh-user']
1322
1323
1324@payload_builder('invalid', schema={
1325    # an invalid task is one which should never actually be created; this is used in
1326    # release automation on branches where the task just doesn't make sense
1327    Extra: object,
1328})
1329def build_invalid_payload(config, task, task_def):
1330    task_def['payload'] = 'invalid task - should never be created'
1331
1332
1333@payload_builder('always-optimized', schema={
1334    Extra: object,
1335})
1336@payload_builder('succeed', schema={
1337})
1338def build_dummy_payload(config, task, task_def):
1339    task_def['payload'] = {}
1340
1341
1342@payload_builder('script-engine-autophone', schema={
1343    Required('os'): Any('macosx', 'linux'),
1344
1345    # A link for an executable to download
1346    Optional('context'): text_type,
1347
1348    # Tells the worker whether machine should reboot
1349    # after the task is finished.
1350    Optional('reboot'):
1351    Any(False, 'always', 'never', 'on-exception', 'on-failure'),
1352
1353    # the command to run
1354    Optional('command'): [taskref_or_string],
1355
1356    # environment variables
1357    Optional('env'): {text_type: taskref_or_string},
1358
1359    # artifacts to extract from the task image after completion
1360    Optional('artifacts'): [{
1361        # type of artifact -- simple file, or recursive directory
1362        Required('type'): Any('file', 'directory'),
1363
1364        # task image path from which to read artifact
1365        Required('path'): text_type,
1366
1367        # name of the produced artifact (root of the names for
1368        # type=directory)
1369        Required('name'): text_type,
1370    }],
1371})
1372def build_script_engine_autophone_payload(config, task, task_def):
1373    worker = task['worker']
1374    artifacts = map(lambda artifact: {
1375        'name': artifact['name'],
1376        'path': artifact['path'],
1377        'type': artifact['type'],
1378        'expires': task_def['expires'],
1379    }, worker.get('artifacts', []))
1380
1381    task_def['payload'] = {
1382        'context': worker['context'],
1383        'command': worker['command'],
1384        'env': worker['env'],
1385        'artifacts': artifacts,
1386    }
1387    if worker.get('reboot'):
1388        task_def['payload'] = worker['reboot']
1389
1390    if task.get('needs-sccache'):
1391        raise Exception('needs-sccache not supported in taskcluster-worker')
1392
1393
1394transforms = TransformSequence()
1395
1396
1397@transforms.add
1398def set_implementation(config, tasks):
1399    """
1400    Set the worker implementation based on the worker-type alias.
1401    """
1402    for task in tasks:
1403        if 'implementation' in task['worker']:
1404            yield task
1405            continue
1406
1407        impl, os = worker_type_implementation(config.graph_config, task['worker-type'])
1408
1409        tags = task.setdefault('tags', {})
1410        tags['worker-implementation'] = impl
1411        if os:
1412            task['tags']['os'] = os
1413        worker = task.setdefault('worker', {})
1414        worker['implementation'] = impl
1415        if os:
1416            worker['os'] = os
1417
1418        yield task
1419
1420
1421@transforms.add
1422def set_defaults(config, tasks):
1423    for task in tasks:
1424        task.setdefault('shipping-phase', None)
1425        task.setdefault('shipping-product', None)
1426        task.setdefault('always-target', False)
1427        task.setdefault('optimization', None)
1428        task.setdefault('needs-sccache', False)
1429
1430        worker = task['worker']
1431        if worker['implementation'] in ('docker-worker',):
1432            worker.setdefault('chain-of-trust', False)
1433            worker.setdefault('taskcluster-proxy', False)
1434            worker.setdefault('allow-ptrace', True)
1435            worker.setdefault('loopback-video', False)
1436            worker.setdefault('loopback-audio', False)
1437            worker.setdefault('docker-in-docker', False)
1438            worker.setdefault('privileged', False)
1439            worker.setdefault('volumes', [])
1440            worker.setdefault('env', {})
1441            if 'caches' in worker:
1442                for c in worker['caches']:
1443                    c.setdefault('skip-untrusted', False)
1444        elif worker['implementation'] == 'generic-worker':
1445            worker.setdefault('env', {})
1446            worker.setdefault('os-groups', [])
1447            if worker['os-groups'] and worker['os'] != 'windows':
1448                raise Exception('os-groups feature of generic-worker is only supported on '
1449                                'Windows, not on {}'.format(worker['os']))
1450            worker.setdefault('chain-of-trust', False)
1451        elif worker['implementation'] in (
1452            'scriptworker-signing', 'beetmover', 'beetmover-push-to-release', 'beetmover-maven',
1453        ):
1454            worker.setdefault('max-run-time', 600)
1455        elif worker['implementation'] == 'push-apk':
1456            worker.setdefault('commit', False)
1457
1458        yield task
1459
1460
1461@transforms.add
1462def task_name_from_label(config, tasks):
1463    for task in tasks:
1464        if 'label' not in task:
1465            if 'name' not in task:
1466                raise Exception("task has neither a name nor a label")
1467            task['label'] = '{}-{}'.format(config.kind, task['name'])
1468        if task.get('name'):
1469            del task['name']
1470        yield task
1471
1472
1473UNSUPPORTED_SHIPPING_PRODUCT_ERROR = """\
1474The shipping product {product} is not in the list of configured products in
1475`taskcluster/ci/config.yml'.
1476"""
1477
1478
1479def validate_shipping_product(config, product):
1480    if product not in config.graph_config['release-promotion']['products']:
1481        raise Exception(UNSUPPORTED_SHIPPING_PRODUCT_ERROR.format(product=product))
1482
1483
1484@transforms.add
1485def validate(config, tasks):
1486    for task in tasks:
1487        validate_schema(
1488            task_description_schema, task,
1489            "In task {!r}:".format(task.get('label', '?no-label?')))
1490        validate_schema(
1491           payload_builders[task['worker']['implementation']].schema,
1492           task['worker'],
1493           "In task.run {!r}:".format(task.get('label', '?no-label?')))
1494        if task['shipping-product'] is not None:
1495            validate_shipping_product(config, task['shipping-product'])
1496        yield task
1497
1498
1499@index_builder('generic')
1500def add_generic_index_routes(config, task):
1501    index = task.get('index')
1502    routes = task.setdefault('routes', [])
1503
1504    verify_index(config, index)
1505
1506    subs = config.params.copy()
1507    subs['job-name'] = index['job-name']
1508    subs['build_date_long'] = time.strftime("%Y.%m.%d.%Y%m%d%H%M%S",
1509                                            time.gmtime(config.params['build_date']))
1510    subs['build_date'] = time.strftime("%Y.%m.%d",
1511                                       time.gmtime(config.params['build_date']))
1512    subs['product'] = index['product']
1513    subs['trust-domain'] = config.graph_config['trust-domain']
1514    subs['branch_rev'] = get_branch_rev(config)
1515
1516    project = config.params.get('project')
1517
1518    for tpl in V2_ROUTE_TEMPLATES:
1519        routes.append(tpl.format(**subs))
1520
1521    # Additionally alias all tasks for "trunk" repos into a common
1522    # namespace.
1523    if project and project in TRUNK_PROJECTS:
1524        for tpl in V2_TRUNK_ROUTE_TEMPLATES:
1525            routes.append(tpl.format(**subs))
1526
1527    return task
1528
1529
1530@index_builder('shippable')
1531def add_shippable_index_routes(config, task):
1532    index = task.get('index')
1533    routes = task.setdefault('routes', [])
1534
1535    verify_index(config, index)
1536
1537    subs = config.params.copy()
1538    subs['job-name'] = index['job-name']
1539    subs['build_date_long'] = time.strftime("%Y.%m.%d.%Y%m%d%H%M%S",
1540                                            time.gmtime(config.params['build_date']))
1541    subs['build_date'] = time.strftime("%Y.%m.%d",
1542                                       time.gmtime(config.params['build_date']))
1543    subs['product'] = index['product']
1544    subs['trust-domain'] = config.graph_config['trust-domain']
1545    subs['branch_rev'] = get_branch_rev(config)
1546
1547    for tpl in V2_SHIPPABLE_TEMPLATES:
1548        routes.append(tpl.format(**subs))
1549
1550    # Also add routes for en-US
1551    task = add_shippable_l10n_index_routes(config, task, force_locale="en-US")
1552
1553    return task
1554
1555
1556@index_builder('shippable-with-multi-l10n')
1557def add_shippable_multi_index_routes(config, task):
1558    task = add_shippable_index_routes(config, task)
1559    task = add_l10n_index_routes(config, task, force_locale="multi")
1560    return task
1561
1562
1563@index_builder('l10n')
1564def add_l10n_index_routes(config, task, force_locale=None):
1565    index = task.get('index')
1566    routes = task.setdefault('routes', [])
1567
1568    verify_index(config, index)
1569
1570    subs = config.params.copy()
1571    subs['job-name'] = index['job-name']
1572    subs['build_date_long'] = time.strftime("%Y.%m.%d.%Y%m%d%H%M%S",
1573                                            time.gmtime(config.params['build_date']))
1574    subs['product'] = index['product']
1575    subs['trust-domain'] = config.graph_config['trust-domain']
1576    subs['branch_rev'] = get_branch_rev(config)
1577
1578    locales = task['attributes'].get('chunk_locales',
1579                                     task['attributes'].get('all_locales'))
1580    # Some tasks has only one locale set
1581    if task['attributes'].get('locale'):
1582        locales = [task['attributes']['locale']]
1583
1584    if force_locale:
1585        # Used for en-US and multi-locale
1586        locales = [force_locale]
1587
1588    if not locales:
1589        raise Exception("Error: Unable to use l10n index for tasks without locales")
1590
1591    # If there are too many locales, we can't write a route for all of them
1592    # See Bug 1323792
1593    if len(locales) > 18:  # 18 * 3 = 54, max routes = 64
1594        return task
1595
1596    for locale in locales:
1597        for tpl in V2_L10N_TEMPLATES:
1598            routes.append(tpl.format(locale=locale, **subs))
1599
1600    return task
1601
1602
1603@index_builder('shippable-l10n')
1604def add_shippable_l10n_index_routes(config, task, force_locale=None):
1605    index = task.get('index')
1606    routes = task.setdefault('routes', [])
1607
1608    verify_index(config, index)
1609
1610    subs = config.params.copy()
1611    subs['job-name'] = index['job-name']
1612    subs['build_date_long'] = time.strftime("%Y.%m.%d.%Y%m%d%H%M%S",
1613                                            time.gmtime(config.params['build_date']))
1614    subs['product'] = index['product']
1615    subs['trust-domain'] = config.graph_config['trust-domain']
1616    subs['branch_rev'] = get_branch_rev(config)
1617
1618    locales = task['attributes'].get('chunk_locales',
1619                                     task['attributes'].get('all_locales'))
1620    # Some tasks has only one locale set
1621    if task['attributes'].get('locale'):
1622        locales = [task['attributes']['locale']]
1623
1624    if force_locale:
1625        # Used for en-US and multi-locale
1626        locales = [force_locale]
1627
1628    if not locales:
1629        raise Exception("Error: Unable to use l10n index for tasks without locales")
1630
1631    # If there are too many locales, we can't write a route for all of them
1632    # See Bug 1323792
1633    if len(locales) > 18:  # 18 * 3 = 54, max routes = 64
1634        return task
1635
1636    for locale in locales:
1637        for tpl in V2_SHIPPABLE_L10N_TEMPLATES:
1638            routes.append(tpl.format(locale=locale, **subs))
1639
1640    return task
1641
1642
1643def add_geckoview_index_routes(config, task):
1644    index = task.get('index')
1645    routes = task.setdefault('routes', [])
1646    geckoview_version = _compute_geckoview_version(
1647        config.params['app_version'],
1648        config.params['moz_build_date']
1649    )
1650
1651    subs = {
1652        'geckoview-version': geckoview_version,
1653        'job-name': index['job-name'],
1654        'product': index['product'],
1655        'project': config.params['project'],
1656        'trust-domain': config.graph_config['trust-domain'],
1657    }
1658    routes.append(V2_GECKOVIEW_RELEASE.format(**subs))
1659
1660    return task
1661
1662
1663@index_builder('android-shippable')
1664def add_android_shippable_index_routes(config, task):
1665    task = add_shippable_index_routes(config, task)
1666    task = add_geckoview_index_routes(config, task)
1667
1668    return task
1669
1670
1671@index_builder('android-shippable-with-multi-l10n')
1672def add_android_shippable_multi_index_routes(config, task):
1673    task = add_shippable_multi_index_routes(config, task)
1674    task = add_geckoview_index_routes(config, task)
1675
1676    return task
1677
1678
1679@transforms.add
1680def add_index_routes(config, tasks):
1681    for task in tasks:
1682        index = task.get('index', {})
1683
1684        # The default behavior is to rank tasks according to their tier
1685        extra_index = task.setdefault('extra', {}).setdefault('index', {})
1686        rank = index.get('rank', 'by-tier')
1687
1688        if rank == 'by-tier':
1689            # rank is zero for non-tier-1 tasks and based on pushid for others;
1690            # this sorts tier-{2,3} builds below tier-1 in the index
1691            tier = task.get('treeherder', {}).get('tier', 3)
1692            extra_index['rank'] = 0 if tier > 1 else int(config.params['build_date'])
1693        elif rank == 'build_date':
1694            extra_index['rank'] = int(config.params['build_date'])
1695        else:
1696            extra_index['rank'] = rank
1697
1698        if not index:
1699            yield task
1700            continue
1701
1702        index_type = index.get('type', 'generic')
1703        task = index_builders[index_type](config, task)
1704
1705        del task['index']
1706        yield task
1707
1708
1709@transforms.add
1710def try_task_config_env(config, tasks):
1711    """Set environment variables in the task."""
1712    env = config.params['try_task_config'].get('env')
1713    # Find all implementations that have an 'env' key.
1714    implementations = {name for name, builder in payload_builders.items()
1715                       if 'env' in builder.schema.schema}
1716    for task in tasks:
1717        if env and task['worker']['implementation'] in implementations:
1718            task['worker']['env'].update(env)
1719        yield task
1720
1721
1722@transforms.add
1723def try_task_config_chemspill_prio(config, tasks):
1724    """Increase the priority from lowest and very-low -> low, but leave others unchanged."""
1725    chemspill_prio = config.params['try_task_config'].get('chemspill-prio')
1726    for task in tasks:
1727        if chemspill_prio and task['priority'] in ('lowest', 'very-low'):
1728            task['priority'] = 'low'
1729        yield task
1730
1731
1732@transforms.add
1733def try_task_config_routes(config, tasks):
1734    """Set routes in the task."""
1735    routes = config.params['try_task_config'].get('routes')
1736    for task in tasks:
1737        if routes:
1738            task_routes = task.setdefault('routes', [])
1739            task_routes.extend(routes)
1740        yield task
1741
1742
1743@transforms.add
1744def build_task(config, tasks):
1745    for task in tasks:
1746        level = str(config.params['level'])
1747
1748        if task['worker-type'] in config.params['try_task_config'].get('worker-overrides', {}):
1749            worker_pool = (
1750                config.params['try_task_config']['worker-overrides'][task['worker-type']]
1751            )
1752            provisioner_id, worker_type = worker_pool.split('/', 1)
1753        else:
1754            provisioner_id, worker_type = get_worker_type(
1755                config.graph_config,
1756                task['worker-type'],
1757                level=level,
1758                release_level=config.params.release_level(),
1759            )
1760        task['worker-type'] = '/'.join([provisioner_id, worker_type])
1761        project = config.params['project']
1762
1763        routes = task.get('routes', [])
1764        scopes = [s.format(level=level, project=project) for s in task.get('scopes', [])]
1765
1766        # set up extra
1767        extra = task.get('extra', {})
1768        extra['parent'] = os.environ.get('TASK_ID', '')
1769        task_th = task.get('treeherder')
1770        if task_th:
1771            extra.setdefault('treeherder-platform', task_th['platform'])
1772            treeherder = extra.setdefault('treeherder', {})
1773
1774            machine_platform, collection = task_th['platform'].split('/', 1)
1775            treeherder['machine'] = {'platform': machine_platform}
1776            treeherder['collection'] = {collection: True}
1777
1778            group_names = config.graph_config['treeherder']['group-names']
1779            groupSymbol, symbol = split_symbol(task_th['symbol'])
1780            if groupSymbol != '?':
1781                treeherder['groupSymbol'] = groupSymbol
1782                if groupSymbol not in group_names:
1783                    path = os.path.join(config.path, task.get('job-from', ''))
1784                    raise Exception(UNKNOWN_GROUP_NAME.format(groupSymbol, path))
1785                treeherder['groupName'] = group_names[groupSymbol]
1786            treeherder['symbol'] = symbol
1787            if len(symbol) > 25 or len(groupSymbol) > 25:
1788                raise RuntimeError("Treeherder group and symbol names must not be longer than "
1789                                   "25 characters: {} (see {})".format(
1790                                       task_th['symbol'],
1791                                       TC_TREEHERDER_SCHEMA_URL,
1792                                       ))
1793            treeherder['jobKind'] = task_th['kind']
1794            treeherder['tier'] = task_th['tier']
1795
1796            branch_rev = get_branch_rev(config)
1797
1798            routes.append(
1799                '{}.v2.{}.{}.{}'.format(TREEHERDER_ROUTE_ROOT,
1800                                        config.params['project'],
1801                                        branch_rev,
1802                                        config.params['pushlog_id'])
1803            )
1804
1805        if 'expires-after' not in task:
1806            task['expires-after'] = '28 days' if config.params.is_try() else '1 year'
1807
1808        if 'deadline-after' not in task:
1809            task['deadline-after'] = '1 day'
1810
1811        if 'priority' not in task:
1812            task['priority'] = get_default_priority(config.graph_config, config.params['project'])
1813
1814        tags = task.get('tags', {})
1815        attributes = task.get('attributes', {})
1816
1817        tags.update({
1818            'createdForUser': config.params['owner'],
1819            'kind': config.kind,
1820            'label': task['label'],
1821            'retrigger': 'true' if attributes.get('retrigger', False) else 'false'
1822        })
1823
1824        task_def = {
1825            'provisionerId': provisioner_id,
1826            'workerType': worker_type,
1827            'routes': routes,
1828            'created': {'relative-datestamp': '0 seconds'},
1829            'deadline': {'relative-datestamp': task['deadline-after']},
1830            'expires': {'relative-datestamp': task['expires-after']},
1831            'scopes': scopes,
1832            'metadata': {
1833                'description': task['description'],
1834                'name': task['label'],
1835                'owner': config.params['owner'],
1836                'source': config.params.file_url(config.path, pretty=True),
1837            },
1838            'extra': extra,
1839            'tags': tags,
1840            'priority': task['priority'],
1841        }
1842
1843        if task.get('requires', None):
1844            task_def['requires'] = task['requires']
1845
1846        if task_th:
1847            # link back to treeherder in description
1848            th_push_link = 'https://treeherder.mozilla.org/#/jobs?repo={}&revision={}'.format(
1849                config.params['project'], branch_rev)
1850            task_def['metadata']['description'] += ' ([Treeherder push]({}))'.format(
1851                th_push_link)
1852
1853        # add the payload and adjust anything else as required (e.g., scopes)
1854        payload_builders[task['worker']['implementation']].builder(config, task, task_def)
1855
1856        # Resolve run-on-projects
1857        build_platform = attributes.get('build_platform')
1858        resolve_keyed_by(task, 'run-on-projects', item_name=task['label'],
1859                         **{'build-platform': build_platform})
1860        attributes['run_on_projects'] = task.get('run-on-projects', ['all'])
1861        attributes['always_target'] = task['always-target']
1862        # This logic is here since downstream tasks don't always match their
1863        # upstream dependency's shipping_phase.
1864        # A text_type task['shipping-phase'] takes precedence, then
1865        # an existing attributes['shipping_phase'], then fall back to None.
1866        if task.get('shipping-phase') is not None:
1867            attributes['shipping_phase'] = task['shipping-phase']
1868        else:
1869            attributes.setdefault('shipping_phase', None)
1870        # shipping_product will always match the upstream task's
1871        # shipping_product, so a pre-set existing attributes['shipping_product']
1872        # takes precedence over task['shipping-product']. However, make sure
1873        # we don't have conflicting values.
1874        if task.get('shipping-product') and \
1875                attributes.get('shipping_product') not in (None, task['shipping-product']):
1876            raise Exception(
1877                "{} shipping_product {} doesn't match task shipping-product {}!".format(
1878                    task['label'], attributes['shipping_product'], task['shipping-product']
1879                )
1880            )
1881        attributes.setdefault('shipping_product', task['shipping-product'])
1882
1883        # Set MOZ_AUTOMATION on all jobs.
1884        if task['worker']['implementation'] in (
1885            'generic-worker',
1886            'docker-worker',
1887        ):
1888            payload = task_def.get('payload')
1889            if payload:
1890                env = payload.setdefault('env', {})
1891                env['MOZ_AUTOMATION'] = '1'
1892
1893        yield {
1894            'label': task['label'],
1895            'task': task_def,
1896            'dependencies': task.get('dependencies', {}),
1897            'soft-dependencies': task.get('soft-dependencies', []),
1898            'attributes': attributes,
1899            'optimization': task.get('optimization', None),
1900            'release-artifacts': task.get('release-artifacts', []),
1901        }
1902
1903
1904@transforms.add
1905def chain_of_trust(config, tasks):
1906    for task in tasks:
1907        if task['task'].get('payload', {}).get('features', {}).get('chainOfTrust'):
1908            image = task.get('dependencies', {}).get('docker-image')
1909            if image:
1910                cot = task['task'].setdefault('extra', {}).setdefault('chainOfTrust', {})
1911                cot.setdefault('inputs', {})['docker-image'] = {
1912                    'task-reference': '<docker-image>'
1913                }
1914        yield task
1915
1916
1917@transforms.add
1918def check_task_identifiers(config, tasks):
1919    """Ensures that all tasks have well defined identifiers:
1920       ^[a-zA-Z0-9_-]{1,38}$
1921    """
1922    e = re.compile("^[a-zA-Z0-9_-]{1,38}$")
1923    for task in tasks:
1924        for attrib in ('workerType', 'provisionerId'):
1925            if not e.match(task['task'][attrib]):
1926                raise Exception(
1927                    'task {}.{} is not a valid identifier: {}'.format(
1928                        task['label'], attrib, task['task'][attrib]))
1929        yield task
1930
1931
1932@transforms.add
1933def check_task_dependencies(config, tasks):
1934    """Ensures that tasks don't have more than 100 dependencies."""
1935    for task in tasks:
1936        if len(task['dependencies']) > MAX_DEPENDENCIES:
1937            raise Exception(
1938                    'task {}/{} has too many dependencies ({} > {})'.format(
1939                        config.kind, task['label'], len(task['dependencies']),
1940                        MAX_DEPENDENCIES))
1941        yield task
1942
1943
1944def check_caches_are_volumes(task):
1945    """Ensures that all cache paths are defined as volumes.
1946
1947    Caches and volumes are the only filesystem locations whose content
1948    isn't defined by the Docker image itself. Some caches are optional
1949    depending on the job environment. We want paths that are potentially
1950    caches to have as similar behavior regardless of whether a cache is
1951    used. To help enforce this, we require that all paths used as caches
1952    to be declared as Docker volumes. This check won't catch all offenders.
1953    But it is better than nothing.
1954    """
1955    volumes = set(six.ensure_text(s) for s in task['worker']['volumes'])
1956    paths = set(six.ensure_text(c['mount-point'])
1957                for c in task['worker'].get('caches', []))
1958    missing = paths - volumes
1959
1960    if not missing:
1961        return
1962
1963    raise Exception('task %s (image %s) has caches that are not declared as '
1964                    'Docker volumes: %s '
1965                    '(have you added them as VOLUMEs in the Dockerfile?)'
1966                    % (task['label'], task['worker']['docker-image'],
1967                       ', '.join(sorted(missing))))
1968
1969
1970def check_required_volumes(task):
1971    """
1972    Ensures that all paths that are required to be volumes are defined as volumes.
1973
1974    Performance of writing to files in poor in directories not marked as
1975    volumes, in docker. Ensure that paths that are often written to are marked
1976    as volumes.
1977    """
1978    volumes = set(task['worker']['volumes'])
1979    paths = set(task['worker'].get('required-volumes', []))
1980    missing = paths - volumes
1981
1982    if not missing:
1983        return
1984
1985    raise Exception('task %s (image %s) has paths that should be volumes for peformance '
1986                    'that are not declared as Docker volumes: %s '
1987                    '(have you added them as VOLUMEs in the Dockerfile?)'
1988                    % (task['label'], task['worker']['docker-image'],
1989                       ', '.join(sorted(missing))))
1990
1991
1992@transforms.add
1993def check_run_task_caches(config, tasks):
1994    """Audit for caches requiring run-task.
1995
1996    run-task manages caches in certain ways. If a cache managed by run-task
1997    is used by a non run-task task, it could cause problems. So we audit for
1998    that and make sure certain cache names are exclusive to run-task.
1999
2000    IF YOU ARE TEMPTED TO MAKE EXCLUSIONS TO THIS POLICY, YOU ARE LIKELY
2001    CONTRIBUTING TECHNICAL DEBT AND WILL HAVE TO SOLVE MANY OF THE PROBLEMS
2002    THAT RUN-TASK ALREADY SOLVES. THINK LONG AND HARD BEFORE DOING THAT.
2003    """
2004    re_reserved_caches = re.compile('''^
2005        (checkouts|tooltool-cache)
2006    ''', re.VERBOSE)
2007
2008    re_sparse_checkout_cache = re.compile('^checkouts-sparse')
2009
2010    cache_prefix = '{trust_domain}-level-{level}-'.format(
2011        trust_domain=config.graph_config['trust-domain'],
2012        level=config.params['level'],
2013    )
2014
2015    suffix = _run_task_suffix()
2016
2017    for task in tasks:
2018        payload = task['task'].get('payload', {})
2019        command = payload.get('command') or ['']
2020
2021        main_command = command[0] if isinstance(command[0], text_type) else ''
2022        run_task = main_command.endswith('run-task')
2023
2024        require_sparse_cache = False
2025        have_sparse_cache = False
2026
2027        if run_task:
2028            for arg in command[1:]:
2029                if not isinstance(arg, text_type):
2030                    continue
2031
2032                if arg == '--':
2033                    break
2034
2035                if arg.startswith('--gecko-sparse-profile'):
2036                    if '=' not in arg:
2037                        raise Exception(
2038                            '{} is specifying `--gecko-sparse-profile` to run-task '
2039                            'as two arguments. Unable to determine if the sparse '
2040                            'profile exists.'.format(
2041                                task['label']))
2042                    _, sparse_profile = arg.split('=', 1)
2043                    if not os.path.exists(os.path.join(GECKO, sparse_profile)):
2044                        raise Exception(
2045                            '{} is using non-existant sparse profile {}.'.format(
2046                                task['label'], sparse_profile))
2047                    require_sparse_cache = True
2048                    break
2049
2050        for cache in payload.get('cache', {}):
2051            if not cache.startswith(cache_prefix):
2052                raise Exception(
2053                    '{} is using a cache ({}) which is not appropriate '
2054                    'for its trust-domain and level. It should start with {}.'
2055                    .format(task['label'], cache, cache_prefix)
2056                )
2057
2058            cache = cache[len(cache_prefix):]
2059
2060            if re_sparse_checkout_cache.match(cache):
2061                have_sparse_cache = True
2062
2063            if not re_reserved_caches.match(cache):
2064                continue
2065
2066            if not run_task:
2067                raise Exception(
2068                    '%s is using a cache (%s) reserved for run-task '
2069                    'change the task to use run-task or use a different '
2070                    'cache name' % (task['label'], cache))
2071
2072            if not cache.endswith(suffix):
2073                raise Exception(
2074                    '%s is using a cache (%s) reserved for run-task '
2075                    'but the cache name is not dependent on the contents '
2076                    'of run-task; change the cache name to conform to the '
2077                    'naming requirements' % (task['label'], cache))
2078
2079        if require_sparse_cache and not have_sparse_cache:
2080            raise Exception('%s is using a sparse checkout but not using '
2081                            'a sparse checkout cache; change the checkout '
2082                            'cache name so it is sparse aware' % task['label'])
2083
2084        yield task
2085