1from __future__ import absolute_import
2from __future__ import unicode_literals
3
4import functools
5import io
6import logging
7import os
8import string
9import sys
10from collections import namedtuple
11from operator import attrgetter
12
13import six
14import yaml
15from cached_property import cached_property
16
17from . import types
18from .. import const
19from ..const import COMPOSEFILE_V1 as V1
20from ..const import COMPOSEFILE_V2_1 as V2_1
21from ..const import COMPOSEFILE_V2_3 as V2_3
22from ..const import COMPOSEFILE_V3_0 as V3_0
23from ..const import COMPOSEFILE_V3_4 as V3_4
24from ..utils import build_string_dict
25from ..utils import json_hash
26from ..utils import parse_bytes
27from ..utils import parse_nanoseconds_int
28from ..utils import splitdrive
29from ..version import ComposeVersion
30from .environment import env_vars_from_file
31from .environment import Environment
32from .environment import split_env
33from .errors import CircularReference
34from .errors import ComposeFileNotFound
35from .errors import ConfigurationError
36from .errors import DuplicateOverrideFileFound
37from .errors import VERSION_EXPLANATION
38from .interpolation import interpolate_environment_variables
39from .sort_services import get_container_name_from_network_mode
40from .sort_services import get_service_name_from_network_mode
41from .sort_services import sort_service_dicts
42from .types import MountSpec
43from .types import parse_extra_hosts
44from .types import parse_restart_spec
45from .types import SecurityOpt
46from .types import ServiceLink
47from .types import ServicePort
48from .types import VolumeFromSpec
49from .types import VolumeSpec
50from .validation import match_named_volumes
51from .validation import validate_against_config_schema
52from .validation import validate_config_section
53from .validation import validate_cpu
54from .validation import validate_credential_spec
55from .validation import validate_depends_on
56from .validation import validate_extends_file_path
57from .validation import validate_healthcheck
58from .validation import validate_links
59from .validation import validate_network_mode
60from .validation import validate_pid_mode
61from .validation import validate_service_constraints
62from .validation import validate_top_level_object
63from .validation import validate_ulimits
64
65
66DOCKER_CONFIG_KEYS = [
67    'cap_add',
68    'cap_drop',
69    'cgroup_parent',
70    'command',
71    'cpu_count',
72    'cpu_percent',
73    'cpu_period',
74    'cpu_quota',
75    'cpu_rt_period',
76    'cpu_rt_runtime',
77    'cpu_shares',
78    'cpus',
79    'cpuset',
80    'detach',
81    'device_cgroup_rules',
82    'devices',
83    'dns',
84    'dns_search',
85    'dns_opt',
86    'domainname',
87    'entrypoint',
88    'env_file',
89    'environment',
90    'extra_hosts',
91    'group_add',
92    'hostname',
93    'healthcheck',
94    'image',
95    'ipc',
96    'isolation',
97    'labels',
98    'links',
99    'mac_address',
100    'mem_limit',
101    'mem_reservation',
102    'memswap_limit',
103    'mem_swappiness',
104    'net',
105    'oom_score_adj',
106    'oom_kill_disable',
107    'pid',
108    'ports',
109    'privileged',
110    'read_only',
111    'restart',
112    'runtime',
113    'secrets',
114    'security_opt',
115    'shm_size',
116    'pids_limit',
117    'stdin_open',
118    'stop_signal',
119    'sysctls',
120    'tty',
121    'user',
122    'userns_mode',
123    'volume_driver',
124    'volumes',
125    'volumes_from',
126    'working_dir',
127]
128
129ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
130    'blkio_config',
131    'build',
132    'container_name',
133    'credential_spec',
134    'dockerfile',
135    'init',
136    'log_driver',
137    'log_opt',
138    'logging',
139    'network_mode',
140    'platform',
141    'scale',
142    'stop_grace_period',
143]
144
145DOCKER_VALID_URL_PREFIXES = (
146    'http://',
147    'https://',
148    'git://',
149    'github.com/',
150    'git@',
151)
152
153SUPPORTED_FILENAMES = [
154    'docker-compose.yml',
155    'docker-compose.yaml',
156]
157
158DEFAULT_OVERRIDE_FILENAMES = ('docker-compose.override.yml', 'docker-compose.override.yaml')
159
160
161log = logging.getLogger(__name__)
162
163
164class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files environment')):
165    """
166    :param working_dir: the directory to use for relative paths in the config
167    :type  working_dir: string
168    :param config_files: list of configuration files to load
169    :type  config_files: list of :class:`ConfigFile`
170    :param environment: computed environment values for this project
171    :type  environment: :class:`environment.Environment`
172     """
173    def __new__(cls, working_dir, config_files, environment=None):
174        if environment is None:
175            environment = Environment.from_env_file(working_dir)
176        return super(ConfigDetails, cls).__new__(
177            cls, working_dir, config_files, environment
178        )
179
180
181class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
182    """
183    :param filename: filename of the config file
184    :type  filename: string
185    :param config: contents of the config file
186    :type  config: :class:`dict`
187    """
188
189    @classmethod
190    def from_filename(cls, filename):
191        return cls(filename, load_yaml(filename))
192
193    @cached_property
194    def version(self):
195        if 'version' not in self.config:
196            return V1
197
198        version = self.config['version']
199
200        if isinstance(version, dict):
201            log.warn('Unexpected type for "version" key in "{}". Assuming '
202                     '"version" is the name of a service, and defaulting to '
203                     'Compose file version 1.'.format(self.filename))
204            return V1
205
206        if not isinstance(version, six.string_types):
207            raise ConfigurationError(
208                'Version in "{}" is invalid - it should be a string.'
209                .format(self.filename))
210
211        if version == '1':
212            raise ConfigurationError(
213                'Version in "{}" is invalid. {}'
214                .format(self.filename, VERSION_EXPLANATION)
215            )
216
217        if version == '2':
218            return const.COMPOSEFILE_V2_0
219
220        if version == '3':
221            return const.COMPOSEFILE_V3_0
222
223        return ComposeVersion(version)
224
225    def get_service(self, name):
226        return self.get_service_dicts()[name]
227
228    def get_service_dicts(self):
229        return self.config if self.version == V1 else self.config.get('services', {})
230
231    def get_volumes(self):
232        return {} if self.version == V1 else self.config.get('volumes', {})
233
234    def get_networks(self):
235        return {} if self.version == V1 else self.config.get('networks', {})
236
237    def get_secrets(self):
238        return {} if self.version < const.COMPOSEFILE_V3_1 else self.config.get('secrets', {})
239
240    def get_configs(self):
241        return {} if self.version < const.COMPOSEFILE_V3_3 else self.config.get('configs', {})
242
243
244class Config(namedtuple('_Config', 'version services volumes networks secrets configs')):
245    """
246    :param version: configuration version
247    :type  version: int
248    :param services: List of service description dictionaries
249    :type  services: :class:`list`
250    :param volumes: Dictionary mapping volume names to description dictionaries
251    :type  volumes: :class:`dict`
252    :param networks: Dictionary mapping network names to description dictionaries
253    :type  networks: :class:`dict`
254    :param secrets: Dictionary mapping secret names to description dictionaries
255    :type secrets: :class:`dict`
256    :param configs: Dictionary mapping config names to description dictionaries
257    :type configs: :class:`dict`
258    """
259
260
261class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name config')):
262
263    @classmethod
264    def with_abs_paths(cls, working_dir, filename, name, config):
265        if not working_dir:
266            raise ValueError("No working_dir for ServiceConfig.")
267
268        return cls(
269            os.path.abspath(working_dir),
270            os.path.abspath(filename) if filename else filename,
271            name,
272            config)
273
274
275def find(base_dir, filenames, environment, override_dir=None):
276    if filenames == ['-']:
277        return ConfigDetails(
278            os.path.abspath(override_dir) if override_dir else os.getcwd(),
279            [ConfigFile(None, yaml.safe_load(sys.stdin))],
280            environment
281        )
282
283    if filenames:
284        filenames = [os.path.join(base_dir, f) for f in filenames]
285    else:
286        filenames = get_default_config_files(base_dir)
287
288    log.debug("Using configuration files: {}".format(",".join(filenames)))
289    return ConfigDetails(
290        override_dir if override_dir else os.path.dirname(filenames[0]),
291        [ConfigFile.from_filename(f) for f in filenames],
292        environment
293    )
294
295
296def validate_config_version(config_files):
297    main_file = config_files[0]
298    validate_top_level_object(main_file)
299    for next_file in config_files[1:]:
300        validate_top_level_object(next_file)
301
302        if main_file.version != next_file.version:
303            raise ConfigurationError(
304                "Version mismatch: file {0} specifies version {1} but "
305                "extension file {2} uses version {3}".format(
306                    main_file.filename,
307                    main_file.version,
308                    next_file.filename,
309                    next_file.version))
310
311
312def get_default_config_files(base_dir):
313    (candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
314
315    if not candidates:
316        raise ComposeFileNotFound(SUPPORTED_FILENAMES)
317
318    winner = candidates[0]
319
320    if len(candidates) > 1:
321        log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
322        log.warn("Using %s\n", winner)
323
324    return [os.path.join(path, winner)] + get_default_override_file(path)
325
326
327def get_default_override_file(path):
328    override_files_in_path = [os.path.join(path, override_filename) for override_filename
329                              in DEFAULT_OVERRIDE_FILENAMES
330                              if os.path.exists(os.path.join(path, override_filename))]
331    if len(override_files_in_path) > 1:
332        raise DuplicateOverrideFileFound(override_files_in_path)
333    return override_files_in_path
334
335
336def find_candidates_in_parent_dirs(filenames, path):
337    """
338    Given a directory path to start, looks for filenames in the
339    directory, and then each parent directory successively,
340    until found.
341
342    Returns tuple (candidates, path).
343    """
344    candidates = [filename for filename in filenames
345                  if os.path.exists(os.path.join(path, filename))]
346
347    if not candidates:
348        parent_dir = os.path.join(path, '..')
349        if os.path.abspath(parent_dir) != os.path.abspath(path):
350            return find_candidates_in_parent_dirs(filenames, parent_dir)
351
352    return (candidates, path)
353
354
355def check_swarm_only_config(service_dicts, compatibility=False):
356    warning_template = (
357        "Some services ({services}) use the '{key}' key, which will be ignored. "
358        "Compose does not support '{key}' configuration - use "
359        "`docker stack deploy` to deploy to a swarm."
360    )
361
362    def check_swarm_only_key(service_dicts, key):
363        services = [s for s in service_dicts if s.get(key)]
364        if services:
365            log.warn(
366                warning_template.format(
367                    services=", ".join(sorted(s['name'] for s in services)),
368                    key=key
369                )
370            )
371    if not compatibility:
372        check_swarm_only_key(service_dicts, 'deploy')
373    check_swarm_only_key(service_dicts, 'configs')
374
375
376def load(config_details, compatibility=False):
377    """Load the configuration from a working directory and a list of
378    configuration files.  Files are loaded in order, and merged on top
379    of each other to create the final configuration.
380
381    Return a fully interpolated, extended and validated configuration.
382    """
383    validate_config_version(config_details.config_files)
384
385    processed_files = [
386        process_config_file(config_file, config_details.environment)
387        for config_file in config_details.config_files
388    ]
389    config_details = config_details._replace(config_files=processed_files)
390
391    main_file = config_details.config_files[0]
392    volumes = load_mapping(
393        config_details.config_files, 'get_volumes', 'Volume'
394    )
395    networks = load_mapping(
396        config_details.config_files, 'get_networks', 'Network'
397    )
398    secrets = load_mapping(
399        config_details.config_files, 'get_secrets', 'Secret', config_details.working_dir
400    )
401    configs = load_mapping(
402        config_details.config_files, 'get_configs', 'Config', config_details.working_dir
403    )
404    service_dicts = load_services(config_details, main_file, compatibility)
405
406    if main_file.version != V1:
407        for service_dict in service_dicts:
408            match_named_volumes(service_dict, volumes)
409
410    check_swarm_only_config(service_dicts, compatibility)
411
412    version = V2_3 if compatibility and main_file.version >= V3_0 else main_file.version
413
414    return Config(version, service_dicts, volumes, networks, secrets, configs)
415
416
417def load_mapping(config_files, get_func, entity_type, working_dir=None):
418    mapping = {}
419
420    for config_file in config_files:
421        for name, config in getattr(config_file, get_func)().items():
422            mapping[name] = config or {}
423            if not config:
424                continue
425
426            external = config.get('external')
427            if external:
428                validate_external(entity_type, name, config, config_file.version)
429                if isinstance(external, dict):
430                    config['name'] = external.get('name')
431                elif not config.get('name'):
432                    config['name'] = name
433
434            if 'driver_opts' in config:
435                config['driver_opts'] = build_string_dict(
436                    config['driver_opts']
437                )
438
439            if 'labels' in config:
440                config['labels'] = parse_labels(config['labels'])
441
442            if 'file' in config:
443                config['file'] = expand_path(working_dir, config['file'])
444
445    return mapping
446
447
448def validate_external(entity_type, name, config, version):
449    if (version < V2_1 or (version >= V3_0 and version < V3_4)) and len(config.keys()) > 1:
450        raise ConfigurationError(
451            "{} {} declared as external but specifies additional attributes "
452            "({}).".format(
453                entity_type, name, ', '.join(k for k in config if k != 'external')))
454
455
456def load_services(config_details, config_file, compatibility=False):
457    def build_service(service_name, service_dict, service_names):
458        service_config = ServiceConfig.with_abs_paths(
459            config_details.working_dir,
460            config_file.filename,
461            service_name,
462            service_dict)
463        resolver = ServiceExtendsResolver(
464            service_config, config_file, environment=config_details.environment
465        )
466        service_dict = process_service(resolver.run())
467
468        service_config = service_config._replace(config=service_dict)
469        validate_service(service_config, service_names, config_file)
470        service_dict = finalize_service(
471            service_config,
472            service_names,
473            config_file.version,
474            config_details.environment,
475            compatibility
476        )
477        return service_dict
478
479    def build_services(service_config):
480        service_names = service_config.keys()
481        return sort_service_dicts([
482            build_service(name, service_dict, service_names)
483            for name, service_dict in service_config.items()
484        ])
485
486    def merge_services(base, override):
487        all_service_names = set(base) | set(override)
488        return {
489            name: merge_service_dicts_from_files(
490                base.get(name, {}),
491                override.get(name, {}),
492                config_file.version)
493            for name in all_service_names
494        }
495
496    service_configs = [
497        file.get_service_dicts() for file in config_details.config_files
498    ]
499
500    service_config = service_configs[0]
501    for next_config in service_configs[1:]:
502        service_config = merge_services(service_config, next_config)
503
504    return build_services(service_config)
505
506
507def interpolate_config_section(config_file, config, section, environment):
508    validate_config_section(config_file.filename, config, section)
509    return interpolate_environment_variables(
510        config_file.version,
511        config,
512        section,
513        environment
514    )
515
516
517def process_config_file(config_file, environment, service_name=None):
518    services = interpolate_config_section(
519        config_file,
520        config_file.get_service_dicts(),
521        'service',
522        environment)
523
524    if config_file.version > V1:
525        processed_config = dict(config_file.config)
526        processed_config['services'] = services
527        processed_config['volumes'] = interpolate_config_section(
528            config_file,
529            config_file.get_volumes(),
530            'volume',
531            environment)
532        processed_config['networks'] = interpolate_config_section(
533            config_file,
534            config_file.get_networks(),
535            'network',
536            environment)
537        if config_file.version >= const.COMPOSEFILE_V3_1:
538            processed_config['secrets'] = interpolate_config_section(
539                config_file,
540                config_file.get_secrets(),
541                'secret',
542                environment)
543        if config_file.version >= const.COMPOSEFILE_V3_3:
544            processed_config['configs'] = interpolate_config_section(
545                config_file,
546                config_file.get_configs(),
547                'config',
548                environment
549            )
550    else:
551        processed_config = services
552
553    config_file = config_file._replace(config=processed_config)
554    validate_against_config_schema(config_file)
555
556    if service_name and service_name not in services:
557        raise ConfigurationError(
558            "Cannot extend service '{}' in {}: Service not found".format(
559                service_name, config_file.filename))
560
561    return config_file
562
563
564class ServiceExtendsResolver(object):
565    def __init__(self, service_config, config_file, environment, already_seen=None):
566        self.service_config = service_config
567        self.working_dir = service_config.working_dir
568        self.already_seen = already_seen or []
569        self.config_file = config_file
570        self.environment = environment
571
572    @property
573    def signature(self):
574        return self.service_config.filename, self.service_config.name
575
576    def detect_cycle(self):
577        if self.signature in self.already_seen:
578            raise CircularReference(self.already_seen + [self.signature])
579
580    def run(self):
581        self.detect_cycle()
582
583        if 'extends' in self.service_config.config:
584            service_dict = self.resolve_extends(*self.validate_and_construct_extends())
585            return self.service_config._replace(config=service_dict)
586
587        return self.service_config
588
589    def validate_and_construct_extends(self):
590        extends = self.service_config.config['extends']
591        if not isinstance(extends, dict):
592            extends = {'service': extends}
593
594        config_path = self.get_extended_config_path(extends)
595        service_name = extends['service']
596
597        if config_path == self.config_file.filename:
598            try:
599                service_config = self.config_file.get_service(service_name)
600            except KeyError:
601                raise ConfigurationError(
602                    "Cannot extend service '{}' in {}: Service not found".format(
603                        service_name, config_path)
604                )
605        else:
606            extends_file = ConfigFile.from_filename(config_path)
607            validate_config_version([self.config_file, extends_file])
608            extended_file = process_config_file(
609                extends_file, self.environment, service_name=service_name
610            )
611            service_config = extended_file.get_service(service_name)
612
613        return config_path, service_config, service_name
614
615    def resolve_extends(self, extended_config_path, service_dict, service_name):
616        resolver = ServiceExtendsResolver(
617            ServiceConfig.with_abs_paths(
618                os.path.dirname(extended_config_path),
619                extended_config_path,
620                service_name,
621                service_dict),
622            self.config_file,
623            already_seen=self.already_seen + [self.signature],
624            environment=self.environment
625        )
626
627        service_config = resolver.run()
628        other_service_dict = process_service(service_config)
629        validate_extended_service_dict(
630            other_service_dict,
631            extended_config_path,
632            service_name)
633
634        return merge_service_dicts(
635            other_service_dict,
636            self.service_config.config,
637            self.config_file.version)
638
639    def get_extended_config_path(self, extends_options):
640        """Service we are extending either has a value for 'file' set, which we
641        need to obtain a full path too or we are extending from a service
642        defined in our own file.
643        """
644        filename = self.service_config.filename
645        validate_extends_file_path(
646            self.service_config.name,
647            extends_options,
648            filename)
649        if 'file' in extends_options:
650            return expand_path(self.working_dir, extends_options['file'])
651        return filename
652
653
654def resolve_environment(service_dict, environment=None):
655    """Unpack any environment variables from an env_file, if set.
656    Interpolate environment values if set.
657    """
658    env = {}
659    for env_file in service_dict.get('env_file', []):
660        env.update(env_vars_from_file(env_file))
661
662    env.update(parse_environment(service_dict.get('environment')))
663    return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(env))
664
665
666def resolve_build_args(buildargs, environment):
667    args = parse_build_arguments(buildargs)
668    return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(args))
669
670
671def validate_extended_service_dict(service_dict, filename, service):
672    error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
673
674    if 'links' in service_dict:
675        raise ConfigurationError(
676            "%s services with 'links' cannot be extended" % error_prefix)
677
678    if 'volumes_from' in service_dict:
679        raise ConfigurationError(
680            "%s services with 'volumes_from' cannot be extended" % error_prefix)
681
682    if 'net' in service_dict:
683        if get_container_name_from_network_mode(service_dict['net']):
684            raise ConfigurationError(
685                "%s services with 'net: container' cannot be extended" % error_prefix)
686
687    if 'network_mode' in service_dict:
688        if get_service_name_from_network_mode(service_dict['network_mode']):
689            raise ConfigurationError(
690                "%s services with 'network_mode: service' cannot be extended" % error_prefix)
691
692    if 'depends_on' in service_dict:
693        raise ConfigurationError(
694            "%s services with 'depends_on' cannot be extended" % error_prefix)
695
696
697def validate_service(service_config, service_names, config_file):
698    service_dict, service_name = service_config.config, service_config.name
699    validate_service_constraints(service_dict, service_name, config_file)
700    validate_paths(service_dict)
701
702    validate_cpu(service_config)
703    validate_ulimits(service_config)
704    validate_network_mode(service_config, service_names)
705    validate_pid_mode(service_config, service_names)
706    validate_depends_on(service_config, service_names)
707    validate_links(service_config, service_names)
708    validate_healthcheck(service_config)
709    validate_credential_spec(service_config)
710
711    if not service_dict.get('image') and has_uppercase(service_name):
712        raise ConfigurationError(
713            "Service '{name}' contains uppercase characters which are not valid "
714            "as part of an image name. Either use a lowercase service name or "
715            "use the `image` field to set a custom name for the service image."
716            .format(name=service_name))
717
718
719def process_service(service_config):
720    working_dir = service_config.working_dir
721    service_dict = dict(service_config.config)
722
723    if 'env_file' in service_dict:
724        service_dict['env_file'] = [
725            expand_path(working_dir, path)
726            for path in to_list(service_dict['env_file'])
727        ]
728
729    if 'build' in service_dict:
730        process_build_section(service_dict, working_dir)
731
732    if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
733        service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
734
735    if 'sysctls' in service_dict:
736        service_dict['sysctls'] = build_string_dict(parse_sysctls(service_dict['sysctls']))
737
738    if 'labels' in service_dict:
739        service_dict['labels'] = parse_labels(service_dict['labels'])
740
741    service_dict = process_depends_on(service_dict)
742
743    for field in ['dns', 'dns_search', 'tmpfs']:
744        if field in service_dict:
745            service_dict[field] = to_list(service_dict[field])
746
747    service_dict = process_security_opt(process_blkio_config(process_ports(
748        process_healthcheck(service_dict)
749    )))
750
751    return service_dict
752
753
754def process_build_section(service_dict, working_dir):
755    if isinstance(service_dict['build'], six.string_types):
756        service_dict['build'] = resolve_build_path(working_dir, service_dict['build'])
757    elif isinstance(service_dict['build'], dict):
758        if 'context' in service_dict['build']:
759            path = service_dict['build']['context']
760            service_dict['build']['context'] = resolve_build_path(working_dir, path)
761        if 'labels' in service_dict['build']:
762            service_dict['build']['labels'] = parse_labels(service_dict['build']['labels'])
763
764
765def process_ports(service_dict):
766    if 'ports' not in service_dict:
767        return service_dict
768
769    ports = []
770    for port_definition in service_dict['ports']:
771        if isinstance(port_definition, ServicePort):
772            ports.append(port_definition)
773        else:
774            ports.extend(ServicePort.parse(port_definition))
775    service_dict['ports'] = ports
776    return service_dict
777
778
779def process_depends_on(service_dict):
780    if 'depends_on' in service_dict and not isinstance(service_dict['depends_on'], dict):
781        service_dict['depends_on'] = dict([
782            (svc, {'condition': 'service_started'}) for svc in service_dict['depends_on']
783        ])
784    return service_dict
785
786
787def process_blkio_config(service_dict):
788    if not service_dict.get('blkio_config'):
789        return service_dict
790
791    for field in ['device_read_bps', 'device_write_bps']:
792        if field in service_dict['blkio_config']:
793            for v in service_dict['blkio_config'].get(field, []):
794                rate = v.get('rate', 0)
795                v['rate'] = parse_bytes(rate)
796                if v['rate'] is None:
797                    raise ConfigurationError('Invalid format for bytes value: "{}"'.format(rate))
798
799    for field in ['device_read_iops', 'device_write_iops']:
800        if field in service_dict['blkio_config']:
801            for v in service_dict['blkio_config'].get(field, []):
802                try:
803                    v['rate'] = int(v.get('rate', 0))
804                except ValueError:
805                    raise ConfigurationError(
806                        'Invalid IOPS value: "{}". Must be a positive integer.'.format(v.get('rate'))
807                    )
808
809    return service_dict
810
811
812def process_healthcheck(service_dict):
813    if 'healthcheck' not in service_dict:
814        return service_dict
815
816    hc = service_dict['healthcheck']
817
818    if 'disable' in hc:
819        del hc['disable']
820        hc['test'] = ['NONE']
821
822    for field in ['interval', 'timeout', 'start_period']:
823        if field not in hc or isinstance(hc[field], six.integer_types):
824            continue
825        hc[field] = parse_nanoseconds_int(hc[field])
826
827    return service_dict
828
829
830def finalize_service_volumes(service_dict, environment):
831    if 'volumes' in service_dict:
832        finalized_volumes = []
833        normalize = environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS')
834        win_host = environment.get_boolean('COMPOSE_FORCE_WINDOWS_HOST')
835        for v in service_dict['volumes']:
836            if isinstance(v, dict):
837                finalized_volumes.append(MountSpec.parse(v, normalize, win_host))
838            else:
839                finalized_volumes.append(VolumeSpec.parse(v, normalize, win_host))
840
841        duplicate_mounts = []
842        mounts = [v.as_volume_spec() if isinstance(v, MountSpec) else v for v in finalized_volumes]
843        for mount in mounts:
844            if list(map(attrgetter('internal'), mounts)).count(mount.internal) > 1:
845                duplicate_mounts.append(mount.repr())
846
847        if duplicate_mounts:
848            raise ConfigurationError("Duplicate mount points: [%s]" % (
849                ', '.join(duplicate_mounts)))
850
851        service_dict['volumes'] = finalized_volumes
852
853    return service_dict
854
855
856def finalize_service(service_config, service_names, version, environment, compatibility):
857    service_dict = dict(service_config.config)
858
859    if 'environment' in service_dict or 'env_file' in service_dict:
860        service_dict['environment'] = resolve_environment(service_dict, environment)
861        service_dict.pop('env_file', None)
862
863    if 'volumes_from' in service_dict:
864        service_dict['volumes_from'] = [
865            VolumeFromSpec.parse(vf, service_names, version)
866            for vf in service_dict['volumes_from']
867        ]
868
869    service_dict = finalize_service_volumes(service_dict, environment)
870
871    if 'net' in service_dict:
872        network_mode = service_dict.pop('net')
873        container_name = get_container_name_from_network_mode(network_mode)
874        if container_name and container_name in service_names:
875            service_dict['network_mode'] = 'service:{}'.format(container_name)
876        else:
877            service_dict['network_mode'] = network_mode
878
879    if 'networks' in service_dict:
880        service_dict['networks'] = parse_networks(service_dict['networks'])
881
882    if 'restart' in service_dict:
883        service_dict['restart'] = parse_restart_spec(service_dict['restart'])
884
885    if 'secrets' in service_dict:
886        service_dict['secrets'] = [
887            types.ServiceSecret.parse(s) for s in service_dict['secrets']
888        ]
889
890    if 'configs' in service_dict:
891        service_dict['configs'] = [
892            types.ServiceConfig.parse(c) for c in service_dict['configs']
893        ]
894
895    normalize_build(service_dict, service_config.working_dir, environment)
896
897    if compatibility:
898        service_dict = translate_credential_spec_to_security_opt(service_dict)
899        service_dict, ignored_keys = translate_deploy_keys_to_container_config(
900            service_dict
901        )
902        if ignored_keys:
903            log.warn(
904                'The following deploy sub-keys are not supported in compatibility mode and have'
905                ' been ignored: {}'.format(', '.join(ignored_keys))
906            )
907
908    service_dict['name'] = service_config.name
909    return normalize_v1_service_format(service_dict)
910
911
912def translate_resource_keys_to_container_config(resources_dict, service_dict):
913    if 'limits' in resources_dict:
914        service_dict['mem_limit'] = resources_dict['limits'].get('memory')
915        if 'cpus' in resources_dict['limits']:
916            service_dict['cpus'] = float(resources_dict['limits']['cpus'])
917    if 'reservations' in resources_dict:
918        service_dict['mem_reservation'] = resources_dict['reservations'].get('memory')
919        if 'cpus' in resources_dict['reservations']:
920            return ['resources.reservations.cpus']
921    return []
922
923
924def convert_restart_policy(name):
925    try:
926        return {
927            'any': 'always',
928            'none': 'no',
929            'on-failure': 'on-failure'
930        }[name]
931    except KeyError:
932        raise ConfigurationError('Invalid restart policy "{}"'.format(name))
933
934
935def convert_credential_spec_to_security_opt(credential_spec):
936    if 'file' in credential_spec:
937        return 'file://{file}'.format(file=credential_spec['file'])
938    return 'registry://{registry}'.format(registry=credential_spec['registry'])
939
940
941def translate_credential_spec_to_security_opt(service_dict):
942    result = []
943
944    if 'credential_spec' in service_dict:
945        spec = convert_credential_spec_to_security_opt(service_dict['credential_spec'])
946        result.append('credentialspec={spec}'.format(spec=spec))
947
948    if result:
949        service_dict['security_opt'] = result
950
951    return service_dict
952
953
954def translate_deploy_keys_to_container_config(service_dict):
955    if 'credential_spec' in service_dict:
956        del service_dict['credential_spec']
957    if 'configs' in service_dict:
958        del service_dict['configs']
959
960    if 'deploy' not in service_dict:
961        return service_dict, []
962
963    deploy_dict = service_dict['deploy']
964    ignored_keys = [
965        k for k in ['endpoint_mode', 'labels', 'update_config', 'rollback_config', 'placement']
966        if k in deploy_dict
967    ]
968
969    if 'replicas' in deploy_dict and deploy_dict.get('mode', 'replicated') == 'replicated':
970        service_dict['scale'] = deploy_dict['replicas']
971
972    if 'restart_policy' in deploy_dict:
973        service_dict['restart'] = {
974            'Name': convert_restart_policy(deploy_dict['restart_policy'].get('condition', 'any')),
975            'MaximumRetryCount': deploy_dict['restart_policy'].get('max_attempts', 0)
976        }
977        for k in deploy_dict['restart_policy'].keys():
978            if k != 'condition' and k != 'max_attempts':
979                ignored_keys.append('restart_policy.{}'.format(k))
980
981    ignored_keys.extend(
982        translate_resource_keys_to_container_config(
983            deploy_dict.get('resources', {}), service_dict
984        )
985    )
986
987    del service_dict['deploy']
988
989    return service_dict, ignored_keys
990
991
992def normalize_v1_service_format(service_dict):
993    if 'log_driver' in service_dict or 'log_opt' in service_dict:
994        if 'logging' not in service_dict:
995            service_dict['logging'] = {}
996        if 'log_driver' in service_dict:
997            service_dict['logging']['driver'] = service_dict['log_driver']
998            del service_dict['log_driver']
999        if 'log_opt' in service_dict:
1000            service_dict['logging']['options'] = service_dict['log_opt']
1001            del service_dict['log_opt']
1002
1003    if 'dockerfile' in service_dict:
1004        service_dict['build'] = service_dict.get('build', {})
1005        service_dict['build'].update({
1006            'dockerfile': service_dict.pop('dockerfile')
1007        })
1008
1009    return service_dict
1010
1011
1012def merge_service_dicts_from_files(base, override, version):
1013    """When merging services from multiple files we need to merge the `extends`
1014    field. This is not handled by `merge_service_dicts()` which is used to
1015    perform the `extends`.
1016    """
1017    new_service = merge_service_dicts(base, override, version)
1018    if 'extends' in override:
1019        new_service['extends'] = override['extends']
1020    elif 'extends' in base:
1021        new_service['extends'] = base['extends']
1022    return new_service
1023
1024
1025class MergeDict(dict):
1026    """A dict-like object responsible for merging two dicts into one."""
1027
1028    def __init__(self, base, override):
1029        self.base = base
1030        self.override = override
1031
1032    def needs_merge(self, field):
1033        return field in self.base or field in self.override
1034
1035    def merge_field(self, field, merge_func, default=None):
1036        if not self.needs_merge(field):
1037            return
1038
1039        self[field] = merge_func(
1040            self.base.get(field, default),
1041            self.override.get(field, default))
1042
1043    def merge_mapping(self, field, parse_func=None):
1044        if not self.needs_merge(field):
1045            return
1046
1047        if parse_func is None:
1048            def parse_func(m):
1049                return m or {}
1050
1051        self[field] = parse_func(self.base.get(field))
1052        self[field].update(parse_func(self.override.get(field)))
1053
1054    def merge_sequence(self, field, parse_func):
1055        def parse_sequence_func(seq):
1056            return to_mapping((parse_func(item) for item in seq), 'merge_field')
1057
1058        if not self.needs_merge(field):
1059            return
1060
1061        merged = parse_sequence_func(self.base.get(field, []))
1062        merged.update(parse_sequence_func(self.override.get(field, [])))
1063        self[field] = [item.repr() for item in sorted(merged.values())]
1064
1065    def merge_scalar(self, field):
1066        if self.needs_merge(field):
1067            self[field] = self.override.get(field, self.base.get(field))
1068
1069
1070def merge_service_dicts(base, override, version):
1071    md = MergeDict(base, override)
1072
1073    md.merge_mapping('environment', parse_environment)
1074    md.merge_mapping('labels', parse_labels)
1075    md.merge_mapping('ulimits', parse_flat_dict)
1076    md.merge_mapping('sysctls', parse_sysctls)
1077    md.merge_mapping('depends_on', parse_depends_on)
1078    md.merge_mapping('storage_opt', parse_flat_dict)
1079    md.merge_sequence('links', ServiceLink.parse)
1080    md.merge_sequence('secrets', types.ServiceSecret.parse)
1081    md.merge_sequence('configs', types.ServiceConfig.parse)
1082    md.merge_sequence('security_opt', types.SecurityOpt.parse)
1083    md.merge_mapping('extra_hosts', parse_extra_hosts)
1084
1085    md.merge_field('networks', merge_networks, default={})
1086    for field in ['volumes', 'devices']:
1087        md.merge_field(field, merge_path_mappings)
1088
1089    for field in [
1090        'cap_add', 'cap_drop', 'expose', 'external_links',
1091        'volumes_from', 'device_cgroup_rules',
1092    ]:
1093        md.merge_field(field, merge_unique_items_lists, default=[])
1094
1095    for field in ['dns', 'dns_search', 'env_file', 'tmpfs']:
1096        md.merge_field(field, merge_list_or_string)
1097
1098    md.merge_field('logging', merge_logging, default={})
1099    merge_ports(md, base, override)
1100    md.merge_field('blkio_config', merge_blkio_config, default={})
1101    md.merge_field('healthcheck', merge_healthchecks, default={})
1102    md.merge_field('deploy', merge_deploy, default={})
1103
1104    for field in set(ALLOWED_KEYS) - set(md):
1105        md.merge_scalar(field)
1106
1107    if version == V1:
1108        legacy_v1_merge_image_or_build(md, base, override)
1109    elif md.needs_merge('build'):
1110        md['build'] = merge_build(md, base, override)
1111
1112    return dict(md)
1113
1114
1115def merge_unique_items_lists(base, override):
1116    override = [str(o) for o in override]
1117    base = [str(b) for b in base]
1118    return sorted(set().union(base, override))
1119
1120
1121def merge_healthchecks(base, override):
1122    if override.get('disabled') is True:
1123        return override
1124    result = base.copy()
1125    result.update(override)
1126    return result
1127
1128
1129def merge_ports(md, base, override):
1130    def parse_sequence_func(seq):
1131        acc = []
1132        for item in seq:
1133            acc.extend(ServicePort.parse(item))
1134        return to_mapping(acc, 'merge_field')
1135
1136    field = 'ports'
1137
1138    if not md.needs_merge(field):
1139        return
1140
1141    merged = parse_sequence_func(md.base.get(field, []))
1142    merged.update(parse_sequence_func(md.override.get(field, [])))
1143    md[field] = [item for item in sorted(merged.values(), key=lambda x: x.target)]
1144
1145
1146def merge_build(output, base, override):
1147    def to_dict(service):
1148        build_config = service.get('build', {})
1149        if isinstance(build_config, six.string_types):
1150            return {'context': build_config}
1151        return build_config
1152
1153    md = MergeDict(to_dict(base), to_dict(override))
1154    md.merge_scalar('context')
1155    md.merge_scalar('dockerfile')
1156    md.merge_scalar('network')
1157    md.merge_scalar('target')
1158    md.merge_scalar('shm_size')
1159    md.merge_scalar('isolation')
1160    md.merge_mapping('args', parse_build_arguments)
1161    md.merge_field('cache_from', merge_unique_items_lists, default=[])
1162    md.merge_mapping('labels', parse_labels)
1163    md.merge_mapping('extra_hosts', parse_extra_hosts)
1164    return dict(md)
1165
1166
1167def merge_deploy(base, override):
1168    md = MergeDict(base or {}, override or {})
1169    md.merge_scalar('mode')
1170    md.merge_scalar('endpoint_mode')
1171    md.merge_scalar('replicas')
1172    md.merge_mapping('labels', parse_labels)
1173    md.merge_mapping('update_config')
1174    md.merge_mapping('rollback_config')
1175    md.merge_mapping('restart_policy')
1176    if md.needs_merge('resources'):
1177        resources_md = MergeDict(md.base.get('resources') or {}, md.override.get('resources') or {})
1178        resources_md.merge_mapping('limits')
1179        resources_md.merge_field('reservations', merge_reservations, default={})
1180        md['resources'] = dict(resources_md)
1181    if md.needs_merge('placement'):
1182        placement_md = MergeDict(md.base.get('placement') or {}, md.override.get('placement') or {})
1183        placement_md.merge_field('constraints', merge_unique_items_lists, default=[])
1184        placement_md.merge_field('preferences', merge_unique_objects_lists, default=[])
1185        md['placement'] = dict(placement_md)
1186
1187    return dict(md)
1188
1189
1190def merge_networks(base, override):
1191    merged_networks = {}
1192    all_network_names = set(base) | set(override)
1193    base = {k: {} for k in base} if isinstance(base, list) else base
1194    override = {k: {} for k in override} if isinstance(override, list) else override
1195    for network_name in all_network_names:
1196        md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {})
1197        md.merge_field('aliases', merge_unique_items_lists, [])
1198        md.merge_field('link_local_ips', merge_unique_items_lists, [])
1199        md.merge_scalar('priority')
1200        md.merge_scalar('ipv4_address')
1201        md.merge_scalar('ipv6_address')
1202        merged_networks[network_name] = dict(md)
1203    return merged_networks
1204
1205
1206def merge_reservations(base, override):
1207    md = MergeDict(base, override)
1208    md.merge_scalar('cpus')
1209    md.merge_scalar('memory')
1210    md.merge_sequence('generic_resources', types.GenericResource.parse)
1211    return dict(md)
1212
1213
1214def merge_unique_objects_lists(base, override):
1215    result = dict((json_hash(i), i) for i in base + override)
1216    return [i[1] for i in sorted([(k, v) for k, v in result.items()], key=lambda x: x[0])]
1217
1218
1219def merge_blkio_config(base, override):
1220    md = MergeDict(base, override)
1221    md.merge_scalar('weight')
1222
1223    def merge_blkio_limits(base, override):
1224        index = dict((b['path'], b) for b in base)
1225        for o in override:
1226            index[o['path']] = o
1227
1228        return sorted(list(index.values()), key=lambda x: x['path'])
1229
1230    for field in [
1231            "device_read_bps", "device_read_iops", "device_write_bps",
1232            "device_write_iops", "weight_device",
1233    ]:
1234        md.merge_field(field, merge_blkio_limits, default=[])
1235
1236    return dict(md)
1237
1238
1239def merge_logging(base, override):
1240    md = MergeDict(base, override)
1241    md.merge_scalar('driver')
1242    if md.get('driver') == base.get('driver') or base.get('driver') is None:
1243        md.merge_mapping('options', lambda m: m or {})
1244    elif override.get('options'):
1245        md['options'] = override.get('options', {})
1246    return dict(md)
1247
1248
1249def legacy_v1_merge_image_or_build(output, base, override):
1250    output.pop('image', None)
1251    output.pop('build', None)
1252    if 'image' in override:
1253        output['image'] = override['image']
1254    elif 'build' in override:
1255        output['build'] = override['build']
1256    elif 'image' in base:
1257        output['image'] = base['image']
1258    elif 'build' in base:
1259        output['build'] = base['build']
1260
1261
1262def merge_environment(base, override):
1263    env = parse_environment(base)
1264    env.update(parse_environment(override))
1265    return env
1266
1267
1268def merge_labels(base, override):
1269    labels = parse_labels(base)
1270    labels.update(parse_labels(override))
1271    return labels
1272
1273
1274def split_kv(kvpair):
1275    if '=' in kvpair:
1276        return kvpair.split('=', 1)
1277    else:
1278        return kvpair, ''
1279
1280
1281def parse_dict_or_list(split_func, type_name, arguments):
1282    if not arguments:
1283        return {}
1284
1285    if isinstance(arguments, list):
1286        return dict(split_func(e) for e in arguments)
1287
1288    if isinstance(arguments, dict):
1289        return dict(arguments)
1290
1291    raise ConfigurationError(
1292        "%s \"%s\" must be a list or mapping," %
1293        (type_name, arguments)
1294    )
1295
1296
1297parse_build_arguments = functools.partial(parse_dict_or_list, split_env, 'build arguments')
1298parse_environment = functools.partial(parse_dict_or_list, split_env, 'environment')
1299parse_labels = functools.partial(parse_dict_or_list, split_kv, 'labels')
1300parse_networks = functools.partial(parse_dict_or_list, lambda k: (k, None), 'networks')
1301parse_sysctls = functools.partial(parse_dict_or_list, split_kv, 'sysctls')
1302parse_depends_on = functools.partial(
1303    parse_dict_or_list, lambda k: (k, {'condition': 'service_started'}), 'depends_on'
1304)
1305
1306
1307def parse_flat_dict(d):
1308    if not d:
1309        return {}
1310
1311    if isinstance(d, dict):
1312        return dict(d)
1313
1314    raise ConfigurationError("Invalid type: expected mapping")
1315
1316
1317def resolve_env_var(key, val, environment):
1318    if val is not None:
1319        return key, val
1320    elif environment and key in environment:
1321        return key, environment[key]
1322    else:
1323        return key, None
1324
1325
1326def resolve_volume_paths(working_dir, service_dict):
1327    return [
1328        resolve_volume_path(working_dir, volume)
1329        for volume in service_dict['volumes']
1330    ]
1331
1332
1333def resolve_volume_path(working_dir, volume):
1334    if isinstance(volume, dict):
1335        if volume.get('source', '').startswith(('.', '~')) and volume['type'] == 'bind':
1336            volume['source'] = expand_path(working_dir, volume['source'])
1337        return volume
1338
1339    mount_params = None
1340    container_path, mount_params = split_path_mapping(volume)
1341
1342    if mount_params is not None:
1343        host_path, mode = mount_params
1344        if host_path is None:
1345            return container_path
1346        if host_path.startswith('.'):
1347            host_path = expand_path(working_dir, host_path)
1348        host_path = os.path.expanduser(host_path)
1349        return u"{}:{}{}".format(host_path, container_path, (':' + mode if mode else ''))
1350
1351    return container_path
1352
1353
1354def normalize_build(service_dict, working_dir, environment):
1355
1356    if 'build' in service_dict:
1357        build = {}
1358        # Shortcut where specifying a string is treated as the build context
1359        if isinstance(service_dict['build'], six.string_types):
1360            build['context'] = service_dict.pop('build')
1361        else:
1362            build.update(service_dict['build'])
1363            if 'args' in build:
1364                build['args'] = build_string_dict(
1365                    resolve_build_args(build.get('args'), environment)
1366                )
1367
1368        service_dict['build'] = build
1369
1370
1371def resolve_build_path(working_dir, build_path):
1372    if is_url(build_path):
1373        return build_path
1374    return expand_path(working_dir, build_path)
1375
1376
1377def is_url(build_path):
1378    return build_path.startswith(DOCKER_VALID_URL_PREFIXES)
1379
1380
1381def validate_paths(service_dict):
1382    if 'build' in service_dict:
1383        build = service_dict.get('build', {})
1384
1385        if isinstance(build, six.string_types):
1386            build_path = build
1387        elif isinstance(build, dict) and 'context' in build:
1388            build_path = build['context']
1389        else:
1390            # We have a build section but no context, so nothing to validate
1391            return
1392
1393        if (
1394            not is_url(build_path) and
1395            (not os.path.exists(build_path) or not os.access(build_path, os.R_OK))
1396        ):
1397            raise ConfigurationError(
1398                "build path %s either does not exist, is not accessible, "
1399                "or is not a valid URL." % build_path)
1400
1401
1402def merge_path_mappings(base, override):
1403    d = dict_from_path_mappings(base)
1404    d.update(dict_from_path_mappings(override))
1405    return path_mappings_from_dict(d)
1406
1407
1408def dict_from_path_mappings(path_mappings):
1409    if path_mappings:
1410        return dict(split_path_mapping(v) for v in path_mappings)
1411    else:
1412        return {}
1413
1414
1415def path_mappings_from_dict(d):
1416    return [join_path_mapping(v) for v in sorted(d.items())]
1417
1418
1419def split_path_mapping(volume_path):
1420    """
1421    Ascertain if the volume_path contains a host path as well as a container
1422    path. Using splitdrive so windows absolute paths won't cause issues with
1423    splitting on ':'.
1424    """
1425    if isinstance(volume_path, dict):
1426        return (volume_path.get('target'), volume_path)
1427    drive, volume_config = splitdrive(volume_path)
1428
1429    if ':' in volume_config:
1430        (host, container) = volume_config.split(':', 1)
1431        container_drive, container_path = splitdrive(container)
1432        mode = None
1433        if ':' in container_path:
1434            container_path, mode = container_path.rsplit(':', 1)
1435
1436        return (container_drive + container_path, (drive + host, mode))
1437    else:
1438        return (volume_path, None)
1439
1440
1441def process_security_opt(service_dict):
1442    security_opts = service_dict.get('security_opt', [])
1443    result = []
1444    for value in security_opts:
1445        result.append(SecurityOpt.parse(value))
1446    if result:
1447        service_dict['security_opt'] = result
1448    return service_dict
1449
1450
1451def join_path_mapping(pair):
1452    (container, host) = pair
1453    if isinstance(host, dict):
1454        return host
1455    elif host is None:
1456        return container
1457    else:
1458        host, mode = host
1459        result = ":".join((host, container))
1460        if mode:
1461            result += ":" + mode
1462        return result
1463
1464
1465def expand_path(working_dir, path):
1466    return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path)))
1467
1468
1469def merge_list_or_string(base, override):
1470    return to_list(base) + to_list(override)
1471
1472
1473def to_list(value):
1474    if value is None:
1475        return []
1476    elif isinstance(value, six.string_types):
1477        return [value]
1478    else:
1479        return value
1480
1481
1482def to_mapping(sequence, key_field):
1483    return {getattr(item, key_field): item for item in sequence}
1484
1485
1486def has_uppercase(name):
1487    return any(char in string.ascii_uppercase for char in name)
1488
1489
1490def load_yaml(filename, encoding=None, binary=True):
1491    try:
1492        with io.open(filename, 'rb' if binary else 'r', encoding=encoding) as fh:
1493            return yaml.safe_load(fh)
1494    except (IOError, yaml.YAMLError, UnicodeDecodeError) as e:
1495        if encoding is None:
1496            # Sometimes the user's locale sets an encoding that doesn't match
1497            # the YAML files. Im such cases, retry once with the "default"
1498            # UTF-8 encoding
1499            return load_yaml(filename, encoding='utf-8-sig', binary=False)
1500        error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
1501        raise ConfigurationError(u"{}: {}".format(error_name, e))
1502