1from __future__ import absolute_import
2from __future__ import unicode_literals
3
4import itertools
5import logging
6import os
7import re
8import sys
9from collections import namedtuple
10from collections import OrderedDict
11from operator import attrgetter
12
13import enum
14import six
15from docker.errors import APIError
16from docker.errors import ImageNotFound
17from docker.errors import NotFound
18from docker.types import LogConfig
19from docker.types import Mount
20from docker.utils import version_gte
21from docker.utils import version_lt
22from docker.utils.ports import build_port_bindings
23from docker.utils.ports import split_port
24from docker.utils.utils import convert_tmpfs_mounts
25
26from . import __version__
27from . import const
28from . import progress_stream
29from .config import DOCKER_CONFIG_KEYS
30from .config import is_url
31from .config import merge_environment
32from .config import merge_labels
33from .config.errors import DependencyError
34from .config.types import MountSpec
35from .config.types import ServicePort
36from .config.types import VolumeSpec
37from .const import DEFAULT_TIMEOUT
38from .const import IS_WINDOWS_PLATFORM
39from .const import LABEL_CONFIG_HASH
40from .const import LABEL_CONTAINER_NUMBER
41from .const import LABEL_ONE_OFF
42from .const import LABEL_PROJECT
43from .const import LABEL_SERVICE
44from .const import LABEL_SLUG
45from .const import LABEL_VERSION
46from .const import NANOCPUS_SCALE
47from .const import WINDOWS_LONGPATH_PREFIX
48from .container import Container
49from .errors import HealthCheckFailed
50from .errors import NoHealthCheckConfigured
51from .errors import OperationFailedError
52from .parallel import parallel_execute
53from .progress_stream import stream_output
54from .progress_stream import StreamOutputError
55from .utils import generate_random_id
56from .utils import json_hash
57from .utils import parse_bytes
58from .utils import parse_seconds_float
59from .utils import truncate_id
60from .utils import unique_everseen
61
62
63log = logging.getLogger(__name__)
64
65
66HOST_CONFIG_KEYS = [
67    'cap_add',
68    'cap_drop',
69    'cgroup_parent',
70    'cpu_count',
71    'cpu_percent',
72    'cpu_period',
73    'cpu_quota',
74    'cpu_rt_period',
75    'cpu_rt_runtime',
76    'cpu_shares',
77    'cpus',
78    'cpuset',
79    'device_cgroup_rules',
80    'devices',
81    'dns',
82    'dns_search',
83    'dns_opt',
84    'env_file',
85    'extra_hosts',
86    'group_add',
87    'init',
88    'ipc',
89    'isolation',
90    'read_only',
91    'log_driver',
92    'log_opt',
93    'mem_limit',
94    'mem_reservation',
95    'memswap_limit',
96    'mem_swappiness',
97    'oom_kill_disable',
98    'oom_score_adj',
99    'pid',
100    'pids_limit',
101    'privileged',
102    'restart',
103    'runtime',
104    'security_opt',
105    'shm_size',
106    'storage_opt',
107    'sysctls',
108    'userns_mode',
109    'volumes_from',
110    'volume_driver',
111]
112
113CONDITION_STARTED = 'service_started'
114CONDITION_HEALTHY = 'service_healthy'
115
116
117class BuildError(Exception):
118    def __init__(self, service, reason):
119        self.service = service
120        self.reason = reason
121
122
123class NeedsBuildError(Exception):
124    def __init__(self, service):
125        self.service = service
126
127
128class NoSuchImageError(Exception):
129    pass
130
131
132ServiceName = namedtuple('ServiceName', 'project service number')
133
134
135ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
136
137
138@enum.unique
139class ConvergenceStrategy(enum.Enum):
140    """Enumeration for all possible convergence strategies. Values refer to
141    when containers should be recreated.
142    """
143    changed = 1
144    always = 2
145    never = 3
146
147    @property
148    def allows_recreate(self):
149        return self is not type(self).never
150
151
152@enum.unique
153class ImageType(enum.Enum):
154    """Enumeration for the types of images known to compose."""
155    none = 0
156    local = 1
157    all = 2
158
159
160@enum.unique
161class BuildAction(enum.Enum):
162    """Enumeration for the possible build actions."""
163    none = 0
164    force = 1
165    skip = 2
166
167
168class Service(object):
169    def __init__(
170        self,
171        name,
172        client=None,
173        project='default',
174        use_networking=False,
175        links=None,
176        volumes_from=None,
177        network_mode=None,
178        networks=None,
179        secrets=None,
180        scale=None,
181        pid_mode=None,
182        default_platform=None,
183        **options
184    ):
185        self.name = name
186        self.client = client
187        self.project = project
188        self.use_networking = use_networking
189        self.links = links or []
190        self.volumes_from = volumes_from or []
191        self.network_mode = network_mode or NetworkMode(None)
192        self.pid_mode = pid_mode or PidMode(None)
193        self.networks = networks or {}
194        self.secrets = secrets or []
195        self.scale_num = scale or 1
196        self.default_platform = default_platform
197        self.options = options
198
199    def __repr__(self):
200        return '<Service: {}>'.format(self.name)
201
202    def containers(self, stopped=False, one_off=False, filters=None, labels=None):
203        if filters is None:
204            filters = {}
205        filters.update({'label': self.labels(one_off=one_off) + (labels or [])})
206
207        result = list(filter(None, [
208            Container.from_ps(self.client, container)
209            for container in self.client.containers(
210                all=stopped,
211                filters=filters)])
212        )
213        if result:
214            return result
215
216        filters.update({'label': self.labels(one_off=one_off, legacy=True) + (labels or [])})
217        return list(
218            filter(
219                lambda c: c.has_legacy_proj_name(self.project), filter(None, [
220                    Container.from_ps(self.client, container)
221                    for container in self.client.containers(
222                        all=stopped,
223                        filters=filters)])
224            )
225        )
226
227    def get_container(self, number=1):
228        """Return a :class:`compose.container.Container` for this service. The
229        container must be active, and match `number`.
230        """
231        for container in self.containers(labels=['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]):
232            return container
233
234        raise ValueError("No container found for %s_%s" % (self.name, number))
235
236    def start(self, **options):
237        containers = self.containers(stopped=True)
238        for c in containers:
239            self.start_container_if_stopped(c, **options)
240        return containers
241
242    def show_scale_warnings(self, desired_num):
243        if self.custom_container_name and desired_num > 1:
244            log.warn('The "%s" service is using the custom container name "%s". '
245                     'Docker requires each container to have a unique name. '
246                     'Remove the custom name to scale the service.'
247                     % (self.name, self.custom_container_name))
248
249        if self.specifies_host_port() and desired_num > 1:
250            log.warn('The "%s" service specifies a port on the host. If multiple containers '
251                     'for this service are created on a single host, the port will clash.'
252                     % self.name)
253
254    def scale(self, desired_num, timeout=None):
255        """
256        Adjusts the number of containers to the specified number and ensures
257        they are running.
258
259        - creates containers until there are at least `desired_num`
260        - stops containers until there are at most `desired_num` running
261        - starts containers until there are at least `desired_num` running
262        - removes all stopped containers
263        """
264
265        self.show_scale_warnings(desired_num)
266
267        running_containers = self.containers(stopped=False)
268        num_running = len(running_containers)
269        for c in running_containers:
270            if not c.has_legacy_proj_name(self.project):
271                continue
272            log.info('Recreating container with legacy name %s' % c.name)
273            self.recreate_container(c, timeout, start_new_container=False)
274
275        if desired_num == num_running:
276            # do nothing as we already have the desired number
277            log.info('Desired container number already achieved')
278            return
279
280        if desired_num > num_running:
281            all_containers = self.containers(stopped=True)
282
283            if num_running != len(all_containers):
284                # we have some stopped containers, check for divergences
285                stopped_containers = [
286                    c for c in all_containers if not c.is_running
287                ]
288
289                # Remove containers that have diverged
290                divergent_containers = [
291                    c for c in stopped_containers if self._containers_have_diverged([c])
292                ]
293                for c in divergent_containers:
294                    c.remove()
295
296                all_containers = list(set(all_containers) - set(divergent_containers))
297
298            sorted_containers = sorted(all_containers, key=attrgetter('number'))
299            self._execute_convergence_start(
300                sorted_containers, desired_num, timeout, True, True
301            )
302
303        if desired_num < num_running:
304            num_to_stop = num_running - desired_num
305
306            sorted_running_containers = sorted(
307                running_containers,
308                key=attrgetter('number'))
309
310            self._downscale(sorted_running_containers[-num_to_stop:], timeout)
311
312    def create_container(self,
313                         one_off=False,
314                         previous_container=None,
315                         number=None,
316                         quiet=False,
317                         **override_options):
318        """
319        Create a container for this service. If the image doesn't exist, attempt to pull
320        it.
321        """
322        # This is only necessary for `scale` and `volumes_from`
323        # auto-creating containers to satisfy the dependency.
324        self.ensure_image_exists()
325
326        container_options = self._get_container_create_options(
327            override_options,
328            number or self._next_container_number(one_off=one_off),
329            one_off=one_off,
330            previous_container=previous_container,
331        )
332
333        if 'name' in container_options and not quiet:
334            log.info("Creating %s" % container_options['name'])
335
336        try:
337            return Container.create(self.client, **container_options)
338        except APIError as ex:
339            raise OperationFailedError("Cannot create container for service %s: %s" %
340                                       (self.name, ex.explanation))
341
342    def ensure_image_exists(self, do_build=BuildAction.none, silent=False):
343        if self.can_be_built() and do_build == BuildAction.force:
344            self.build()
345            return
346
347        try:
348            self.image()
349            return
350        except NoSuchImageError:
351            pass
352
353        if not self.can_be_built():
354            self.pull(silent=silent)
355            return
356
357        if do_build == BuildAction.skip:
358            raise NeedsBuildError(self)
359
360        self.build()
361        log.warn(
362            "Image for service {} was built because it did not already exist. To "
363            "rebuild this image you must use `docker-compose build` or "
364            "`docker-compose up --build`.".format(self.name))
365
366    def image(self):
367        try:
368            return self.client.inspect_image(self.image_name)
369        except ImageNotFound:
370            raise NoSuchImageError("Image '{}' not found".format(self.image_name))
371
372    @property
373    def image_name(self):
374        return self.options.get('image', '{project}_{s.name}'.format(
375            s=self, project=self.project.lstrip('_-')
376        ))
377
378    @property
379    def platform(self):
380        platform = self.options.get('platform')
381        if not platform and version_gte(self.client.api_version, '1.35'):
382            platform = self.default_platform
383        return platform
384
385    def convergence_plan(self, strategy=ConvergenceStrategy.changed):
386        containers = self.containers(stopped=True)
387
388        if not containers:
389            return ConvergencePlan('create', [])
390
391        if strategy is ConvergenceStrategy.never:
392            return ConvergencePlan('start', containers)
393
394        if (
395            strategy is ConvergenceStrategy.always or
396            self._containers_have_diverged(containers)
397        ):
398            return ConvergencePlan('recreate', containers)
399
400        stopped = [c for c in containers if not c.is_running]
401
402        if stopped:
403            return ConvergencePlan('start', stopped)
404
405        return ConvergencePlan('noop', containers)
406
407    def _containers_have_diverged(self, containers):
408        config_hash = None
409
410        try:
411            config_hash = self.config_hash
412        except NoSuchImageError as e:
413            log.debug(
414                'Service %s has diverged: %s',
415                self.name, six.text_type(e),
416            )
417            return True
418
419        has_diverged = False
420
421        for c in containers:
422            if c.has_legacy_proj_name(self.project):
423                log.debug('%s has diverged: Legacy project name' % c.name)
424                has_diverged = True
425                continue
426            container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
427            if container_config_hash != config_hash:
428                log.debug(
429                    '%s has diverged: %s != %s',
430                    c.name, container_config_hash, config_hash,
431                )
432                has_diverged = True
433
434        return has_diverged
435
436    def _execute_convergence_create(self, scale, detached, start):
437
438        i = self._next_container_number()
439
440        def create_and_start(service, n):
441            container = service.create_container(number=n, quiet=True)
442            if not detached:
443                container.attach_log_stream()
444            if start:
445                self.start_container(container)
446            return container
447
448        containers, errors = parallel_execute(
449            [
450                ServiceName(self.project, self.name, index)
451                for index in range(i, i + scale)
452            ],
453            lambda service_name: create_and_start(self, service_name.number),
454            lambda service_name: self.get_container_name(service_name.service, service_name.number),
455            "Creating"
456        )
457        for error in errors.values():
458            raise OperationFailedError(error)
459
460        return containers
461
462    def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
463                                      renew_anonymous_volumes):
464        if scale is not None and len(containers) > scale:
465            self._downscale(containers[scale:], timeout)
466            containers = containers[:scale]
467
468        def recreate(container):
469            return self.recreate_container(
470                container, timeout=timeout, attach_logs=not detached,
471                start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
472            )
473        containers, errors = parallel_execute(
474            containers,
475            recreate,
476            lambda c: c.name,
477            "Recreating",
478        )
479        for error in errors.values():
480            raise OperationFailedError(error)
481
482        if scale is not None and len(containers) < scale:
483            containers.extend(self._execute_convergence_create(
484                scale - len(containers), detached, start
485            ))
486        return containers
487
488    def _execute_convergence_start(self, containers, scale, timeout, detached, start):
489        if scale is not None and len(containers) > scale:
490            self._downscale(containers[scale:], timeout)
491            containers = containers[:scale]
492        if start:
493            _, errors = parallel_execute(
494                containers,
495                lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
496                lambda c: c.name,
497                "Starting",
498            )
499
500            for error in errors.values():
501                raise OperationFailedError(error)
502
503        if scale is not None and len(containers) < scale:
504            containers.extend(self._execute_convergence_create(
505                scale - len(containers), detached, start
506            ))
507        return containers
508
509    def _downscale(self, containers, timeout=None):
510        def stop_and_remove(container):
511            container.stop(timeout=self.stop_timeout(timeout))
512            container.remove()
513
514        parallel_execute(
515            containers,
516            stop_and_remove,
517            lambda c: c.name,
518            "Stopping and removing",
519        )
520
521    def execute_convergence_plan(self, plan, timeout=None, detached=False,
522                                 start=True, scale_override=None,
523                                 rescale=True, reset_container_image=False,
524                                 renew_anonymous_volumes=False):
525        (action, containers) = plan
526        scale = scale_override if scale_override is not None else self.scale_num
527        containers = sorted(containers, key=attrgetter('number'))
528
529        self.show_scale_warnings(scale)
530
531        if action == 'create':
532            return self._execute_convergence_create(
533                scale, detached, start
534            )
535
536        # The create action needs always needs an initial scale, but otherwise,
537        # we set scale to none in no-rescale scenarios (`run` dependencies)
538        if not rescale:
539            scale = None
540
541        if action == 'recreate':
542            if reset_container_image:
543                # Updating the image ID on the container object lets us recover old volumes if
544                # the new image uses them as well
545                img_id = self.image()['Id']
546                for c in containers:
547                    c.reset_image(img_id)
548            return self._execute_convergence_recreate(
549                containers, scale, timeout, detached, start,
550                renew_anonymous_volumes,
551            )
552
553        if action == 'start':
554            return self._execute_convergence_start(
555                containers, scale, timeout, detached, start
556            )
557
558        if action == 'noop':
559            if scale != len(containers):
560                return self._execute_convergence_start(
561                    containers, scale, timeout, detached, start
562                )
563            for c in containers:
564                log.info("%s is up-to-date" % c.name)
565
566            return containers
567
568        raise Exception("Invalid action: {}".format(action))
569
570    def recreate_container(self, container, timeout=None, attach_logs=False, start_new_container=True,
571                           renew_anonymous_volumes=False):
572        """Recreate a container.
573
574        The original container is renamed to a temporary name so that data
575        volumes can be copied to the new container, before the original
576        container is removed.
577        """
578
579        container.stop(timeout=self.stop_timeout(timeout))
580        container.rename_to_tmp_name()
581        new_container = self.create_container(
582            previous_container=container if not renew_anonymous_volumes else None,
583            number=container.number,
584            quiet=True,
585        )
586        if attach_logs:
587            new_container.attach_log_stream()
588        if start_new_container:
589            self.start_container(new_container)
590        container.remove()
591        return new_container
592
593    def stop_timeout(self, timeout):
594        if timeout is not None:
595            return timeout
596        timeout = parse_seconds_float(self.options.get('stop_grace_period'))
597        if timeout is not None:
598            return timeout
599        return DEFAULT_TIMEOUT
600
601    def start_container_if_stopped(self, container, attach_logs=False, quiet=False):
602        if not container.is_running:
603            if not quiet:
604                log.info("Starting %s" % container.name)
605            if attach_logs:
606                container.attach_log_stream()
607            return self.start_container(container)
608
609    def start_container(self, container, use_network_aliases=True):
610        self.connect_container_to_networks(container, use_network_aliases)
611        try:
612            container.start()
613        except APIError as ex:
614            raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
615        return container
616
617    @property
618    def prioritized_networks(self):
619        return OrderedDict(
620            sorted(
621                self.networks.items(),
622                key=lambda t: t[1].get('priority') or 0, reverse=True
623            )
624        )
625
626    def connect_container_to_networks(self, container, use_network_aliases=True):
627        connected_networks = container.get('NetworkSettings.Networks')
628
629        for network, netdefs in self.prioritized_networks.items():
630            if network in connected_networks:
631                if short_id_alias_exists(container, network):
632                    continue
633                self.client.disconnect_container_from_network(container.id, network)
634
635            aliases = self._get_aliases(netdefs, container) if use_network_aliases else []
636
637            self.client.connect_container_to_network(
638                container.id, network,
639                aliases=aliases,
640                ipv4_address=netdefs.get('ipv4_address', None),
641                ipv6_address=netdefs.get('ipv6_address', None),
642                links=self._get_links(False),
643                link_local_ips=netdefs.get('link_local_ips', None),
644            )
645
646    def remove_duplicate_containers(self, timeout=None):
647        for c in self.duplicate_containers():
648            log.info('Removing %s' % c.name)
649            c.stop(timeout=self.stop_timeout(timeout))
650            c.remove()
651
652    def duplicate_containers(self):
653        containers = sorted(
654            self.containers(stopped=True),
655            key=lambda c: c.get('Created'),
656        )
657
658        numbers = set()
659
660        for c in containers:
661            if c.number in numbers:
662                yield c
663            else:
664                numbers.add(c.number)
665
666    @property
667    def config_hash(self):
668        return json_hash(self.config_dict())
669
670    def config_dict(self):
671        def image_id():
672            try:
673                return self.image()['Id']
674            except NoSuchImageError:
675                return None
676
677        return {
678            'options': self.options,
679            'image_id': image_id(),
680            'links': self.get_link_names(),
681            'net': self.network_mode.id,
682            'networks': self.networks,
683            'volumes_from': [
684                (v.source.name, v.mode)
685                for v in self.volumes_from if isinstance(v.source, Service)
686            ],
687        }
688
689    def get_dependency_names(self):
690        net_name = self.network_mode.service_name
691        pid_namespace = self.pid_mode.service_name
692        return (
693            self.get_linked_service_names() +
694            self.get_volumes_from_names() +
695            ([net_name] if net_name else []) +
696            ([pid_namespace] if pid_namespace else []) +
697            list(self.options.get('depends_on', {}).keys())
698        )
699
700    def get_dependency_configs(self):
701        net_name = self.network_mode.service_name
702        pid_namespace = self.pid_mode.service_name
703
704        configs = dict(
705            [(name, None) for name in self.get_linked_service_names()]
706        )
707        configs.update(dict(
708            [(name, None) for name in self.get_volumes_from_names()]
709        ))
710        configs.update({net_name: None} if net_name else {})
711        configs.update({pid_namespace: None} if pid_namespace else {})
712        configs.update(self.options.get('depends_on', {}))
713        for svc, config in self.options.get('depends_on', {}).items():
714            if config['condition'] == CONDITION_STARTED:
715                configs[svc] = lambda s: True
716            elif config['condition'] == CONDITION_HEALTHY:
717                configs[svc] = lambda s: s.is_healthy()
718            else:
719                # The config schema already prevents this, but it might be
720                # bypassed if Compose is called programmatically.
721                raise ValueError(
722                    'depends_on condition "{}" is invalid.'.format(
723                        config['condition']
724                    )
725                )
726
727        return configs
728
729    def get_linked_service_names(self):
730        return [service.name for (service, _) in self.links]
731
732    def get_link_names(self):
733        return [(service.name, alias) for service, alias in self.links]
734
735    def get_volumes_from_names(self):
736        return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
737
738    def _next_container_number(self, one_off=False):
739        if one_off:
740            return None
741        containers = itertools.chain(
742            self._fetch_containers(
743                all=True,
744                filters={'label': self.labels(one_off=False)}
745            ), self._fetch_containers(
746                all=True,
747                filters={'label': self.labels(one_off=False, legacy=True)}
748            )
749        )
750        numbers = [c.number for c in containers if c.number is not None]
751        return 1 if not numbers else max(numbers) + 1
752
753    def _fetch_containers(self, **fetch_options):
754        # Account for containers that might have been removed since we fetched
755        # the list.
756        def soft_inspect(container):
757            try:
758                return Container.from_id(self.client, container['Id'])
759            except NotFound:
760                return None
761
762        return filter(None, [
763            soft_inspect(container)
764            for container in self.client.containers(**fetch_options)
765        ])
766
767    def _get_aliases(self, network, container=None):
768        return list(
769            {self.name} |
770            ({container.short_id} if container else set()) |
771            set(network.get('aliases', ()))
772        )
773
774    def build_default_networking_config(self):
775        if not self.networks:
776            return {}
777
778        network = self.networks[self.network_mode.id]
779        endpoint = {
780            'Aliases': self._get_aliases(network),
781            'IPAMConfig': {},
782        }
783
784        if network.get('ipv4_address'):
785            endpoint['IPAMConfig']['IPv4Address'] = network.get('ipv4_address')
786        if network.get('ipv6_address'):
787            endpoint['IPAMConfig']['IPv6Address'] = network.get('ipv6_address')
788
789        return {"EndpointsConfig": {self.network_mode.id: endpoint}}
790
791    def _get_links(self, link_to_self):
792        links = {}
793
794        for service, link_name in self.links:
795            for container in service.containers():
796                links[link_name or service.name] = container.name
797                links[container.name] = container.name
798                links[container.name_without_project] = container.name
799
800        if link_to_self:
801            for container in self.containers():
802                links[self.name] = container.name
803                links[container.name] = container.name
804                links[container.name_without_project] = container.name
805
806        for external_link in self.options.get('external_links') or []:
807            if ':' not in external_link:
808                link_name = external_link
809            else:
810                external_link, link_name = external_link.split(':')
811            links[link_name] = external_link
812
813        return [
814            (alias, container_name)
815            for (container_name, alias) in links.items()
816        ]
817
818    def _get_volumes_from(self):
819        return [build_volume_from(spec) for spec in self.volumes_from]
820
821    def _get_container_create_options(
822            self,
823            override_options,
824            number,
825            one_off=False,
826            previous_container=None):
827        add_config_hash = (not one_off and not override_options)
828        slug = generate_random_id() if one_off else None
829
830        container_options = dict(
831            (k, self.options[k])
832            for k in DOCKER_CONFIG_KEYS if k in self.options)
833        override_volumes = override_options.pop('volumes', [])
834        container_options.update(override_options)
835
836        if not container_options.get('name'):
837            container_options['name'] = self.get_container_name(self.name, number, slug)
838
839        container_options.setdefault('detach', True)
840
841        # If a qualified hostname was given, split it into an
842        # unqualified hostname and a domainname unless domainname
843        # was also given explicitly. This matches behavior
844        # until Docker Engine 1.11.0 - Docker API 1.23.
845        if (version_lt(self.client.api_version, '1.23') and
846                'hostname' in container_options and
847                'domainname' not in container_options and
848                '.' in container_options['hostname']):
849            parts = container_options['hostname'].partition('.')
850            container_options['hostname'] = parts[0]
851            container_options['domainname'] = parts[2]
852
853        if (version_gte(self.client.api_version, '1.25') and
854                'stop_grace_period' in self.options):
855            container_options['stop_timeout'] = self.stop_timeout(None)
856
857        if 'ports' in container_options or 'expose' in self.options:
858            container_options['ports'] = build_container_ports(
859                formatted_ports(container_options.get('ports', [])),
860                self.options)
861
862        if 'volumes' in container_options or override_volumes:
863            container_options['volumes'] = list(set(
864                container_options.get('volumes', []) + override_volumes
865            ))
866
867        container_options['environment'] = merge_environment(
868            self._parse_proxy_config(),
869            merge_environment(
870                self.options.get('environment'),
871                override_options.get('environment')
872            )
873        )
874
875        container_options['labels'] = merge_labels(
876            self.options.get('labels'),
877            override_options.get('labels'))
878
879        container_options, override_options = self._build_container_volume_options(
880            previous_container, container_options, override_options
881        )
882
883        container_options['image'] = self.image_name
884
885        container_options['labels'] = build_container_labels(
886            container_options.get('labels', {}),
887            self.labels(one_off=one_off),
888            number,
889            self.config_hash if add_config_hash else None,
890            slug
891        )
892
893        # Delete options which are only used in HostConfig
894        for key in HOST_CONFIG_KEYS:
895            container_options.pop(key, None)
896
897        container_options['host_config'] = self._get_container_host_config(
898            override_options,
899            one_off=one_off)
900
901        networking_config = self.build_default_networking_config()
902        if networking_config:
903            container_options['networking_config'] = networking_config
904
905        container_options['environment'] = format_environment(
906            container_options['environment'])
907        return container_options
908
909    def _build_container_volume_options(self, previous_container, container_options, override_options):
910        container_volumes = []
911        container_mounts = []
912        if 'volumes' in container_options:
913            container_volumes = [
914                v for v in container_options.get('volumes') if isinstance(v, VolumeSpec)
915            ]
916            container_mounts = [v for v in container_options.get('volumes') if isinstance(v, MountSpec)]
917
918        binds, affinity = merge_volume_bindings(
919            container_volumes, self.options.get('tmpfs') or [], previous_container,
920            container_mounts
921        )
922        container_options['environment'].update(affinity)
923
924        container_options['volumes'] = dict((v.internal, {}) for v in container_volumes or {})
925        if version_gte(self.client.api_version, '1.30'):
926            override_options['mounts'] = [build_mount(v) for v in container_mounts] or None
927        else:
928            # Workaround for 3.2 format
929            override_options['tmpfs'] = self.options.get('tmpfs') or []
930            for m in container_mounts:
931                if m.is_tmpfs:
932                    override_options['tmpfs'].append(m.target)
933                else:
934                    binds.append(m.legacy_repr())
935                    container_options['volumes'][m.target] = {}
936
937        secret_volumes = self.get_secret_volumes()
938        if secret_volumes:
939            if version_lt(self.client.api_version, '1.30'):
940                binds.extend(v.legacy_repr() for v in secret_volumes)
941                container_options['volumes'].update(
942                    (v.target, {}) for v in secret_volumes
943                )
944            else:
945                override_options['mounts'] = override_options.get('mounts') or []
946                override_options['mounts'].extend([build_mount(v) for v in secret_volumes])
947
948        # Remove possible duplicates (see e.g. https://github.com/docker/compose/issues/5885).
949        # unique_everseen preserves order. (see https://github.com/docker/compose/issues/6091).
950        override_options['binds'] = list(unique_everseen(binds))
951        return container_options, override_options
952
953    def _get_container_host_config(self, override_options, one_off=False):
954        options = dict(self.options, **override_options)
955
956        logging_dict = options.get('logging', None)
957        blkio_config = convert_blkio_config(options.get('blkio_config', None))
958        log_config = get_log_config(logging_dict)
959        init_path = None
960        if isinstance(options.get('init'), six.string_types):
961            init_path = options.get('init')
962            options['init'] = True
963
964        security_opt = [
965            o.value for o in options.get('security_opt')
966        ] if options.get('security_opt') else None
967
968        nano_cpus = None
969        if 'cpus' in options:
970            nano_cpus = int(options.get('cpus') * NANOCPUS_SCALE)
971
972        return self.client.create_host_config(
973            links=self._get_links(link_to_self=one_off),
974            port_bindings=build_port_bindings(
975                formatted_ports(options.get('ports', []))
976            ),
977            binds=options.get('binds'),
978            volumes_from=self._get_volumes_from(),
979            privileged=options.get('privileged', False),
980            network_mode=self.network_mode.mode,
981            devices=options.get('devices'),
982            dns=options.get('dns'),
983            dns_opt=options.get('dns_opt'),
984            dns_search=options.get('dns_search'),
985            restart_policy=options.get('restart'),
986            runtime=options.get('runtime'),
987            cap_add=options.get('cap_add'),
988            cap_drop=options.get('cap_drop'),
989            mem_limit=options.get('mem_limit'),
990            mem_reservation=options.get('mem_reservation'),
991            memswap_limit=options.get('memswap_limit'),
992            ulimits=build_ulimits(options.get('ulimits')),
993            log_config=log_config,
994            extra_hosts=options.get('extra_hosts'),
995            read_only=options.get('read_only'),
996            pid_mode=self.pid_mode.mode,
997            security_opt=security_opt,
998            ipc_mode=options.get('ipc'),
999            cgroup_parent=options.get('cgroup_parent'),
1000            cpu_quota=options.get('cpu_quota'),
1001            shm_size=options.get('shm_size'),
1002            sysctls=options.get('sysctls'),
1003            pids_limit=options.get('pids_limit'),
1004            tmpfs=options.get('tmpfs'),
1005            oom_kill_disable=options.get('oom_kill_disable'),
1006            oom_score_adj=options.get('oom_score_adj'),
1007            mem_swappiness=options.get('mem_swappiness'),
1008            group_add=options.get('group_add'),
1009            userns_mode=options.get('userns_mode'),
1010            init=options.get('init', None),
1011            init_path=init_path,
1012            isolation=options.get('isolation'),
1013            cpu_count=options.get('cpu_count'),
1014            cpu_percent=options.get('cpu_percent'),
1015            nano_cpus=nano_cpus,
1016            volume_driver=options.get('volume_driver'),
1017            cpuset_cpus=options.get('cpuset'),
1018            cpu_shares=options.get('cpu_shares'),
1019            storage_opt=options.get('storage_opt'),
1020            blkio_weight=blkio_config.get('weight'),
1021            blkio_weight_device=blkio_config.get('weight_device'),
1022            device_read_bps=blkio_config.get('device_read_bps'),
1023            device_read_iops=blkio_config.get('device_read_iops'),
1024            device_write_bps=blkio_config.get('device_write_bps'),
1025            device_write_iops=blkio_config.get('device_write_iops'),
1026            mounts=options.get('mounts'),
1027            device_cgroup_rules=options.get('device_cgroup_rules'),
1028            cpu_period=options.get('cpu_period'),
1029            cpu_rt_period=options.get('cpu_rt_period'),
1030            cpu_rt_runtime=options.get('cpu_rt_runtime'),
1031        )
1032
1033    def get_secret_volumes(self):
1034        def build_spec(secret):
1035            target = secret['secret'].target
1036            if target is None:
1037                target = '{}/{}'.format(const.SECRETS_PATH, secret['secret'].source)
1038            elif not os.path.isabs(target):
1039                target = '{}/{}'.format(const.SECRETS_PATH, target)
1040
1041            return MountSpec('bind', secret['file'], target, read_only=True)
1042
1043        return [build_spec(secret) for secret in self.secrets]
1044
1045    def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None,
1046              gzip=False):
1047        log.info('Building %s' % self.name)
1048
1049        build_opts = self.options.get('build', {})
1050
1051        build_args = build_opts.get('args', {}).copy()
1052        if build_args_override:
1053            build_args.update(build_args_override)
1054
1055        for k, v in self._parse_proxy_config().items():
1056            build_args.setdefault(k, v)
1057
1058        path = rewrite_build_path(build_opts.get('context'))
1059        if self.platform and version_lt(self.client.api_version, '1.35'):
1060            raise OperationFailedError(
1061                'Impossible to perform platform-targeted builds for API version < 1.35'
1062            )
1063
1064        build_output = self.client.build(
1065            path=path,
1066            tag=self.image_name,
1067            rm=True,
1068            forcerm=force_rm,
1069            pull=pull,
1070            nocache=no_cache,
1071            dockerfile=build_opts.get('dockerfile', None),
1072            cache_from=build_opts.get('cache_from', None),
1073            labels=build_opts.get('labels', None),
1074            buildargs=build_args,
1075            network_mode=build_opts.get('network', None),
1076            target=build_opts.get('target', None),
1077            shmsize=parse_bytes(build_opts.get('shm_size')) if build_opts.get('shm_size') else None,
1078            extra_hosts=build_opts.get('extra_hosts', None),
1079            container_limits={
1080                'memory': parse_bytes(memory) if memory else None
1081            },
1082            gzip=gzip,
1083            isolation=build_opts.get('isolation', self.options.get('isolation', None)),
1084            platform=self.platform,
1085        )
1086
1087        try:
1088            all_events = list(stream_output(build_output, sys.stdout))
1089        except StreamOutputError as e:
1090            raise BuildError(self, six.text_type(e))
1091
1092        # Ensure the HTTP connection is not reused for another
1093        # streaming command, as the Docker daemon can sometimes
1094        # complain about it
1095        self.client.close()
1096
1097        image_id = None
1098
1099        for event in all_events:
1100            if 'stream' in event:
1101                match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
1102                if match:
1103                    image_id = match.group(1)
1104
1105        if image_id is None:
1106            raise BuildError(self, event if all_events else 'Unknown')
1107
1108        return image_id
1109
1110    def can_be_built(self):
1111        return 'build' in self.options
1112
1113    def labels(self, one_off=False, legacy=False):
1114        proj_name = self.project if not legacy else re.sub(r'[_-]', '', self.project)
1115        return [
1116            '{0}={1}'.format(LABEL_PROJECT, proj_name),
1117            '{0}={1}'.format(LABEL_SERVICE, self.name),
1118            '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
1119        ]
1120
1121    @property
1122    def custom_container_name(self):
1123        return self.options.get('container_name')
1124
1125    def get_container_name(self, service_name, number, slug=None):
1126        if self.custom_container_name and slug is None:
1127            return self.custom_container_name
1128
1129        container_name = build_container_name(
1130            self.project, service_name, number, slug,
1131        )
1132        ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
1133        if container_name in ext_links_origins:
1134            raise DependencyError(
1135                'Service {0} has a self-referential external link: {1}'.format(
1136                    self.name, container_name
1137                )
1138            )
1139        return container_name
1140
1141    def remove_image(self, image_type):
1142        if not image_type or image_type == ImageType.none:
1143            return False
1144        if image_type == ImageType.local and self.options.get('image'):
1145            return False
1146
1147        log.info("Removing image %s", self.image_name)
1148        try:
1149            self.client.remove_image(self.image_name)
1150            return True
1151        except ImageNotFound:
1152            log.warning("Image %s not found.", self.image_name)
1153            return False
1154        except APIError as e:
1155            log.error("Failed to remove image for service %s: %s", self.name, e)
1156            return False
1157
1158    def specifies_host_port(self):
1159        def has_host_port(binding):
1160            if isinstance(binding, dict):
1161                external_bindings = binding.get('published')
1162            else:
1163                _, external_bindings = split_port(binding)
1164
1165            # there are no external bindings
1166            if external_bindings is None:
1167                return False
1168
1169            # we only need to check the first binding from the range
1170            external_binding = external_bindings[0]
1171
1172            # non-tuple binding means there is a host port specified
1173            if not isinstance(external_binding, tuple):
1174                return True
1175
1176            # extract actual host port from tuple of (host_ip, host_port)
1177            _, host_port = external_binding
1178            if host_port is not None:
1179                return True
1180
1181            return False
1182
1183        return any(has_host_port(binding) for binding in self.options.get('ports', []))
1184
1185    def _do_pull(self, repo, pull_kwargs, silent, ignore_pull_failures):
1186        try:
1187            output = self.client.pull(repo, **pull_kwargs)
1188            if silent:
1189                with open(os.devnull, 'w') as devnull:
1190                    for event in stream_output(output, devnull):
1191                        yield event
1192            else:
1193                for event in stream_output(output, sys.stdout):
1194                    yield event
1195        except (StreamOutputError, NotFound) as e:
1196            if not ignore_pull_failures:
1197                raise
1198            else:
1199                log.error(six.text_type(e))
1200
1201    def pull(self, ignore_pull_failures=False, silent=False, stream=False):
1202        if 'image' not in self.options:
1203            return
1204
1205        repo, tag, separator = parse_repository_tag(self.options['image'])
1206        kwargs = {
1207            'tag': tag or 'latest',
1208            'stream': True,
1209            'platform': self.platform,
1210        }
1211        if not silent:
1212            log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
1213
1214        if kwargs['platform'] and version_lt(self.client.api_version, '1.35'):
1215            raise OperationFailedError(
1216                'Impossible to perform platform-targeted pulls for API version < 1.35'
1217            )
1218
1219        event_stream = self._do_pull(repo, kwargs, silent, ignore_pull_failures)
1220        if stream:
1221            return event_stream
1222        return progress_stream.get_digest_from_pull(event_stream)
1223
1224    def push(self, ignore_push_failures=False):
1225        if 'image' not in self.options or 'build' not in self.options:
1226            return
1227
1228        repo, tag, separator = parse_repository_tag(self.options['image'])
1229        tag = tag or 'latest'
1230        log.info('Pushing %s (%s%s%s)...' % (self.name, repo, separator, tag))
1231        output = self.client.push(repo, tag=tag, stream=True)
1232
1233        try:
1234            return progress_stream.get_digest_from_push(
1235                stream_output(output, sys.stdout))
1236        except StreamOutputError as e:
1237            if not ignore_push_failures:
1238                raise
1239            else:
1240                log.error(six.text_type(e))
1241
1242    def is_healthy(self):
1243        """ Check that all containers for this service report healthy.
1244            Returns false if at least one healthcheck is pending.
1245            If an unhealthy container is detected, raise a HealthCheckFailed
1246            exception.
1247        """
1248        result = True
1249        for ctnr in self.containers():
1250            ctnr.inspect()
1251            status = ctnr.get('State.Health.Status')
1252            if status is None:
1253                raise NoHealthCheckConfigured(self.name)
1254            elif status == 'starting':
1255                result = False
1256            elif status == 'unhealthy':
1257                raise HealthCheckFailed(ctnr.short_id)
1258        return result
1259
1260    def _parse_proxy_config(self):
1261        client = self.client
1262        if 'proxies' not in client._general_configs:
1263            return {}
1264        docker_host = getattr(client, '_original_base_url', client.base_url)
1265        proxy_config = client._general_configs['proxies'].get(
1266            docker_host, client._general_configs['proxies'].get('default')
1267        ) or {}
1268
1269        permitted = {
1270            'ftpProxy': 'FTP_PROXY',
1271            'httpProxy': 'HTTP_PROXY',
1272            'httpsProxy': 'HTTPS_PROXY',
1273            'noProxy': 'NO_PROXY',
1274        }
1275
1276        result = {}
1277
1278        for k, v in proxy_config.items():
1279            if k not in permitted:
1280                continue
1281            result[permitted[k]] = result[permitted[k].lower()] = v
1282
1283        return result
1284
1285
1286def short_id_alias_exists(container, network):
1287    aliases = container.get(
1288        'NetworkSettings.Networks.{net}.Aliases'.format(net=network)) or ()
1289    return container.short_id in aliases
1290
1291
1292class PidMode(object):
1293    def __init__(self, mode):
1294        self._mode = mode
1295
1296    @property
1297    def mode(self):
1298        return self._mode
1299
1300    @property
1301    def service_name(self):
1302        return None
1303
1304
1305class ServicePidMode(PidMode):
1306    def __init__(self, service):
1307        self.service = service
1308
1309    @property
1310    def service_name(self):
1311        return self.service.name
1312
1313    @property
1314    def mode(self):
1315        containers = self.service.containers()
1316        if containers:
1317            return 'container:' + containers[0].id
1318
1319        log.warn(
1320            "Service %s is trying to use reuse the PID namespace "
1321            "of another service that is not running." % (self.service_name)
1322        )
1323        return None
1324
1325
1326class ContainerPidMode(PidMode):
1327    def __init__(self, container):
1328        self.container = container
1329        self._mode = 'container:{}'.format(container.id)
1330
1331
1332class NetworkMode(object):
1333    """A `standard` network mode (ex: host, bridge)"""
1334
1335    service_name = None
1336
1337    def __init__(self, network_mode):
1338        self.network_mode = network_mode
1339
1340    @property
1341    def id(self):
1342        return self.network_mode
1343
1344    mode = id
1345
1346
1347class ContainerNetworkMode(object):
1348    """A network mode that uses a container's network stack."""
1349
1350    service_name = None
1351
1352    def __init__(self, container):
1353        self.container = container
1354
1355    @property
1356    def id(self):
1357        return self.container.id
1358
1359    @property
1360    def mode(self):
1361        return 'container:' + self.container.id
1362
1363
1364class ServiceNetworkMode(object):
1365    """A network mode that uses a service's network stack."""
1366
1367    def __init__(self, service):
1368        self.service = service
1369
1370    @property
1371    def id(self):
1372        return self.service.name
1373
1374    service_name = id
1375
1376    @property
1377    def mode(self):
1378        containers = self.service.containers()
1379        if containers:
1380            return 'container:' + containers[0].id
1381
1382        log.warn("Service %s is trying to use reuse the network stack "
1383                 "of another service that is not running." % (self.id))
1384        return None
1385
1386
1387# Names
1388
1389
1390def build_container_name(project, service, number, slug=None):
1391    bits = [project.lstrip('-_'), service]
1392    if slug:
1393        bits.extend(['run', truncate_id(slug)])
1394    else:
1395        bits.append(str(number))
1396    return '_'.join(bits)
1397
1398
1399# Images
1400
1401def parse_repository_tag(repo_path):
1402    """Splits image identification into base image path, tag/digest
1403    and it's separator.
1404
1405    Example:
1406
1407    >>> parse_repository_tag('user/repo@sha256:digest')
1408    ('user/repo', 'sha256:digest', '@')
1409    >>> parse_repository_tag('user/repo:v1')
1410    ('user/repo', 'v1', ':')
1411    """
1412    tag_separator = ":"
1413    digest_separator = "@"
1414
1415    if digest_separator in repo_path:
1416        repo, tag = repo_path.rsplit(digest_separator, 1)
1417        return repo, tag, digest_separator
1418
1419    repo, tag = repo_path, ""
1420    if tag_separator in repo_path:
1421        repo, tag = repo_path.rsplit(tag_separator, 1)
1422        if "/" in tag:
1423            repo, tag = repo_path, ""
1424
1425    return repo, tag, tag_separator
1426
1427
1428# Volumes
1429
1430
1431def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
1432    """
1433        Return a list of volume bindings for a container. Container data volumes
1434        are replaced by those from the previous container.
1435        Anonymous mounts are updated in place.
1436    """
1437    affinity = {}
1438
1439    volume_bindings = OrderedDict(
1440        build_volume_binding(volume)
1441        for volume in volumes
1442        if volume.external
1443    )
1444
1445    if previous_container:
1446        old_volumes, old_mounts = get_container_data_volumes(
1447            previous_container, volumes, tmpfs, mounts
1448        )
1449        warn_on_masked_volume(volumes, old_volumes, previous_container.service)
1450        volume_bindings.update(
1451            build_volume_binding(volume) for volume in old_volumes
1452        )
1453
1454        if old_volumes or old_mounts:
1455            affinity = {'affinity:container': '=' + previous_container.id}
1456
1457    return list(volume_bindings.values()), affinity
1458
1459
1460def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option):
1461    """
1462        Find the container data volumes that are in `volumes_option`, and return
1463        a mapping of volume bindings for those volumes.
1464        Anonymous volume mounts are updated in place instead.
1465    """
1466    volumes = []
1467    volumes_option = volumes_option or []
1468
1469    container_mounts = dict(
1470        (mount['Destination'], mount)
1471        for mount in container.get('Mounts') or {}
1472    )
1473
1474    image_volumes = [
1475        VolumeSpec.parse(volume)
1476        for volume in
1477        container.image_config['ContainerConfig'].get('Volumes') or {}
1478    ]
1479
1480    for volume in set(volumes_option + image_volumes):
1481        # No need to preserve host volumes
1482        if volume.external:
1483            continue
1484
1485        # Attempting to rebind tmpfs volumes breaks: https://github.com/docker/compose/issues/4751
1486        if volume.internal in convert_tmpfs_mounts(tmpfs_option).keys():
1487            continue
1488
1489        mount = container_mounts.get(volume.internal)
1490
1491        # New volume, doesn't exist in the old container
1492        if not mount:
1493            continue
1494
1495        # Volume was previously a host volume, now it's a container volume
1496        if not mount.get('Name'):
1497            continue
1498
1499        # Volume (probably an image volume) is overridden by a mount in the service's config
1500        # and would cause a duplicate mountpoint error
1501        if volume.internal in [m.target for m in mounts_option]:
1502            continue
1503
1504        # Copy existing volume from old container
1505        volume = volume._replace(external=mount['Name'])
1506        volumes.append(volume)
1507
1508    updated_mounts = False
1509    for mount in mounts_option:
1510        if mount.type != 'volume':
1511            continue
1512
1513        ctnr_mount = container_mounts.get(mount.target)
1514        if not ctnr_mount or not ctnr_mount.get('Name'):
1515            continue
1516
1517        mount.source = ctnr_mount['Name']
1518        updated_mounts = True
1519
1520    return volumes, updated_mounts
1521
1522
1523def warn_on_masked_volume(volumes_option, container_volumes, service):
1524    container_volumes = dict(
1525        (volume.internal, volume.external)
1526        for volume in container_volumes)
1527
1528    for volume in volumes_option:
1529        if (
1530            volume.external and
1531            volume.internal in container_volumes and
1532            container_volumes.get(volume.internal) != volume.external
1533        ):
1534            log.warn((
1535                "Service \"{service}\" is using volume \"{volume}\" from the "
1536                "previous container. Host mapping \"{host_path}\" has no effect. "
1537                "Remove the existing containers (with `docker-compose rm {service}`) "
1538                "to use the host volume mapping."
1539            ).format(
1540                service=service,
1541                volume=volume.internal,
1542                host_path=volume.external))
1543
1544
1545def build_volume_binding(volume_spec):
1546    return volume_spec.internal, volume_spec.repr()
1547
1548
1549def build_volume_from(volume_from_spec):
1550    """
1551    volume_from can be either a service or a container. We want to return the
1552    container.id and format it into a string complete with the mode.
1553    """
1554    if isinstance(volume_from_spec.source, Service):
1555        containers = volume_from_spec.source.containers(stopped=True)
1556        if not containers:
1557            return "{}:{}".format(
1558                volume_from_spec.source.create_container().id,
1559                volume_from_spec.mode)
1560
1561        container = containers[0]
1562        return "{}:{}".format(container.id, volume_from_spec.mode)
1563    elif isinstance(volume_from_spec.source, Container):
1564        return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)
1565
1566
1567def build_mount(mount_spec):
1568    kwargs = {}
1569    if mount_spec.options:
1570        for option, sdk_name in mount_spec.options_map[mount_spec.type].items():
1571            if option in mount_spec.options:
1572                kwargs[sdk_name] = mount_spec.options[option]
1573
1574    return Mount(
1575        type=mount_spec.type, target=mount_spec.target, source=mount_spec.source,
1576        read_only=mount_spec.read_only, consistency=mount_spec.consistency, **kwargs
1577    )
1578
1579# Labels
1580
1581
1582def build_container_labels(label_options, service_labels, number, config_hash, slug):
1583    labels = dict(label_options or {})
1584    labels.update(label.split('=', 1) for label in service_labels)
1585    if number is not None:
1586        labels[LABEL_CONTAINER_NUMBER] = str(number)
1587    if slug is not None:
1588        labels[LABEL_SLUG] = slug
1589    labels[LABEL_VERSION] = __version__
1590
1591    if config_hash:
1592        log.debug("Added config hash: %s" % config_hash)
1593        labels[LABEL_CONFIG_HASH] = config_hash
1594
1595    return labels
1596
1597
1598# Ulimits
1599
1600
1601def build_ulimits(ulimit_config):
1602    if not ulimit_config:
1603        return None
1604    ulimits = []
1605    for limit_name, soft_hard_values in six.iteritems(ulimit_config):
1606        if isinstance(soft_hard_values, six.integer_types):
1607            ulimits.append({'name': limit_name, 'soft': soft_hard_values, 'hard': soft_hard_values})
1608        elif isinstance(soft_hard_values, dict):
1609            ulimit_dict = {'name': limit_name}
1610            ulimit_dict.update(soft_hard_values)
1611            ulimits.append(ulimit_dict)
1612
1613    return ulimits
1614
1615
1616def get_log_config(logging_dict):
1617    log_driver = logging_dict.get('driver', "") if logging_dict else ""
1618    log_options = logging_dict.get('options', None) if logging_dict else None
1619    return LogConfig(
1620        type=log_driver,
1621        config=log_options
1622    )
1623
1624
1625# TODO: remove once fix is available in docker-py
1626def format_environment(environment):
1627    def format_env(key, value):
1628        if value is None:
1629            return key
1630        if isinstance(value, six.binary_type):
1631            value = value.decode('utf-8')
1632        return '{key}={value}'.format(key=key, value=value)
1633    return [format_env(*item) for item in environment.items()]
1634
1635
1636# Ports
1637def formatted_ports(ports):
1638    result = []
1639    for port in ports:
1640        if isinstance(port, ServicePort):
1641            result.append(port.legacy_repr())
1642        else:
1643            result.append(port)
1644    return result
1645
1646
1647def build_container_ports(container_ports, options):
1648    ports = []
1649    all_ports = container_ports + options.get('expose', [])
1650    for port_range in all_ports:
1651        internal_range, _ = split_port(port_range)
1652        for port in internal_range:
1653            port = str(port)
1654            if '/' in port:
1655                port = tuple(port.split('/'))
1656            ports.append(port)
1657    return ports
1658
1659
1660def convert_blkio_config(blkio_config):
1661    result = {}
1662    if blkio_config is None:
1663        return result
1664
1665    result['weight'] = blkio_config.get('weight')
1666    for field in [
1667        "device_read_bps", "device_read_iops", "device_write_bps",
1668        "device_write_iops", "weight_device",
1669    ]:
1670        if field not in blkio_config:
1671            continue
1672        arr = []
1673        for item in blkio_config[field]:
1674            arr.append(dict([(k.capitalize(), v) for k, v in item.items()]))
1675        result[field] = arr
1676    return result
1677
1678
1679def rewrite_build_path(path):
1680    # python2 os.stat() doesn't support unicode on some UNIX, so we
1681    # encode it to a bytestring to be safe
1682    if not six.PY3 and not IS_WINDOWS_PLATFORM:
1683        path = path.encode('utf8')
1684
1685    if IS_WINDOWS_PLATFORM and not is_url(path) and not path.startswith(WINDOWS_LONGPATH_PREFIX):
1686        path = WINDOWS_LONGPATH_PREFIX + os.path.normpath(path)
1687
1688    return path
1689