1// Package daemon exposes the functions that occur on the host server 2// that the Docker daemon is running. 3// 4// In implementing the various functions of the daemon, there is often 5// a method-specific struct for configuring the runtime behavior. 6package daemon // import "github.com/docker/docker/daemon" 7 8import ( 9 "context" 10 "fmt" 11 "io/ioutil" 12 "net" 13 "os" 14 "path" 15 "path/filepath" 16 "runtime" 17 "strings" 18 "sync" 19 "time" 20 21 "google.golang.org/grpc" 22 23 "github.com/containerd/containerd" 24 "github.com/containerd/containerd/defaults" 25 "github.com/containerd/containerd/pkg/dialer" 26 "github.com/docker/docker/api/types" 27 containertypes "github.com/docker/docker/api/types/container" 28 "github.com/docker/docker/api/types/swarm" 29 "github.com/docker/docker/builder" 30 "github.com/docker/docker/container" 31 "github.com/docker/docker/daemon/config" 32 "github.com/docker/docker/daemon/discovery" 33 "github.com/docker/docker/daemon/events" 34 "github.com/docker/docker/daemon/exec" 35 "github.com/docker/docker/daemon/images" 36 "github.com/docker/docker/daemon/logger" 37 "github.com/docker/docker/daemon/network" 38 "github.com/docker/docker/errdefs" 39 "github.com/sirupsen/logrus" 40 // register graph drivers 41 _ "github.com/docker/docker/daemon/graphdriver/register" 42 "github.com/docker/docker/daemon/stats" 43 dmetadata "github.com/docker/docker/distribution/metadata" 44 "github.com/docker/docker/dockerversion" 45 "github.com/docker/docker/image" 46 "github.com/docker/docker/layer" 47 "github.com/docker/docker/libcontainerd" 48 "github.com/docker/docker/migrate/v1" 49 "github.com/docker/docker/pkg/idtools" 50 "github.com/docker/docker/pkg/locker" 51 "github.com/docker/docker/pkg/plugingetter" 52 "github.com/docker/docker/pkg/sysinfo" 53 "github.com/docker/docker/pkg/system" 54 "github.com/docker/docker/pkg/truncindex" 55 "github.com/docker/docker/plugin" 56 pluginexec "github.com/docker/docker/plugin/executor/containerd" 57 refstore "github.com/docker/docker/reference" 58 "github.com/docker/docker/registry" 59 "github.com/docker/docker/runconfig" 60 volumesservice "github.com/docker/docker/volume/service" 61 "github.com/docker/libnetwork" 62 "github.com/docker/libnetwork/cluster" 63 nwconfig "github.com/docker/libnetwork/config" 64 "github.com/pkg/errors" 65) 66 67// ContainersNamespace is the name of the namespace used for users containers 68const ContainersNamespace = "moby" 69 70var ( 71 errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform") 72) 73 74// Daemon holds information about the Docker daemon. 75type Daemon struct { 76 ID string 77 repository string 78 containers container.Store 79 containersReplica container.ViewDB 80 execCommands *exec.Store 81 imageService *images.ImageService 82 idIndex *truncindex.TruncIndex 83 configStore *config.Config 84 statsCollector *stats.Collector 85 defaultLogConfig containertypes.LogConfig 86 RegistryService registry.Service 87 EventsService *events.Events 88 netController libnetwork.NetworkController 89 volumes *volumesservice.VolumesService 90 discoveryWatcher discovery.Reloader 91 root string 92 seccompEnabled bool 93 apparmorEnabled bool 94 shutdown bool 95 idMapping *idtools.IdentityMapping 96 // TODO: move graphDrivers field to an InfoService 97 graphDrivers map[string]string // By operating system 98 99 PluginStore *plugin.Store // todo: remove 100 pluginManager *plugin.Manager 101 linkIndex *linkIndex 102 containerdCli *containerd.Client 103 containerd libcontainerd.Client 104 defaultIsolation containertypes.Isolation // Default isolation mode on Windows 105 clusterProvider cluster.Provider 106 cluster Cluster 107 genericResources []swarm.GenericResource 108 metricsPluginListener net.Listener 109 110 machineMemory uint64 111 112 seccompProfile []byte 113 seccompProfilePath string 114 115 diskUsageRunning int32 116 pruneRunning int32 117 hosts map[string]bool // hosts stores the addresses the daemon is listening on 118 startupDone chan struct{} 119 120 attachmentStore network.AttachmentStore 121 attachableNetworkLock *locker.Locker 122} 123 124// StoreHosts stores the addresses the daemon is listening on 125func (daemon *Daemon) StoreHosts(hosts []string) { 126 if daemon.hosts == nil { 127 daemon.hosts = make(map[string]bool) 128 } 129 for _, h := range hosts { 130 daemon.hosts[h] = true 131 } 132} 133 134// HasExperimental returns whether the experimental features of the daemon are enabled or not 135func (daemon *Daemon) HasExperimental() bool { 136 return daemon.configStore != nil && daemon.configStore.Experimental 137} 138 139func (daemon *Daemon) restore() error { 140 containers := make(map[string]*container.Container) 141 142 logrus.Info("Loading containers: start.") 143 144 dir, err := ioutil.ReadDir(daemon.repository) 145 if err != nil { 146 return err 147 } 148 149 for _, v := range dir { 150 id := v.Name() 151 container, err := daemon.load(id) 152 if err != nil { 153 logrus.Errorf("Failed to load container %v: %v", id, err) 154 continue 155 } 156 if !system.IsOSSupported(container.OS) { 157 logrus.Errorf("Failed to load container %v: %s (%q)", id, system.ErrNotSupportedOperatingSystem, container.OS) 158 continue 159 } 160 // Ignore the container if it does not support the current driver being used by the graph 161 currentDriverForContainerOS := daemon.graphDrivers[container.OS] 162 if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS { 163 rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS) 164 if err != nil { 165 logrus.Errorf("Failed to load container mount %v: %v", id, err) 166 continue 167 } 168 container.RWLayer = rwlayer 169 logrus.Debugf("Loaded container %v, isRunning: %v", container.ID, container.IsRunning()) 170 171 containers[container.ID] = container 172 } else { 173 logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) 174 } 175 } 176 177 removeContainers := make(map[string]*container.Container) 178 restartContainers := make(map[*container.Container]chan struct{}) 179 activeSandboxes := make(map[string]interface{}) 180 for id, c := range containers { 181 if err := daemon.registerName(c); err != nil { 182 logrus.Errorf("Failed to register container name %s: %s", c.ID, err) 183 delete(containers, id) 184 continue 185 } 186 if err := daemon.Register(c); err != nil { 187 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 188 delete(containers, id) 189 continue 190 } 191 192 // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. 193 // We should rewrite it to use the daemon defaults. 194 // Fixes https://github.com/docker/docker/issues/22536 195 if c.HostConfig.LogConfig.Type == "" { 196 if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { 197 logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) 198 continue 199 } 200 } 201 } 202 203 var ( 204 wg sync.WaitGroup 205 mapLock sync.Mutex 206 ) 207 for _, c := range containers { 208 wg.Add(1) 209 go func(c *container.Container) { 210 defer wg.Done() 211 daemon.backportMountSpec(c) 212 if err := daemon.checkpointAndSave(c); err != nil { 213 logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") 214 } 215 216 daemon.setStateCounter(c) 217 218 logrus.WithFields(logrus.Fields{ 219 "container": c.ID, 220 "running": c.IsRunning(), 221 "paused": c.IsPaused(), 222 }).Debug("restoring container") 223 224 var ( 225 err error 226 alive bool 227 ec uint32 228 exitedAt time.Time 229 ) 230 231 alive, _, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio) 232 if err != nil && !errdefs.IsNotFound(err) { 233 logrus.Errorf("Failed to restore container %s with containerd: %s", c.ID, err) 234 return 235 } 236 if !alive { 237 ec, exitedAt, err = daemon.containerd.DeleteTask(context.Background(), c.ID) 238 if err != nil && !errdefs.IsNotFound(err) { 239 logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID) 240 return 241 } 242 } else if !daemon.configStore.LiveRestoreEnabled { 243 if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) { 244 logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container") 245 return 246 } 247 } 248 249 if c.IsRunning() || c.IsPaused() { 250 c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking 251 252 if c.IsPaused() && alive { 253 s, err := daemon.containerd.Status(context.Background(), c.ID) 254 if err != nil { 255 logrus.WithError(err).WithField("container", c.ID). 256 Errorf("Failed to get container status") 257 } else { 258 logrus.WithField("container", c.ID).WithField("state", s). 259 Info("restored container paused") 260 switch s { 261 case libcontainerd.StatusPaused, libcontainerd.StatusPausing: 262 // nothing to do 263 case libcontainerd.StatusStopped: 264 alive = false 265 case libcontainerd.StatusUnknown: 266 logrus.WithField("container", c.ID). 267 Error("Unknown status for container during restore") 268 default: 269 // running 270 c.Lock() 271 c.Paused = false 272 daemon.setStateCounter(c) 273 if err := c.CheckpointTo(daemon.containersReplica); err != nil { 274 logrus.WithError(err).WithField("container", c.ID). 275 Error("Failed to update stopped container state") 276 } 277 c.Unlock() 278 } 279 } 280 } 281 282 if !alive { 283 c.Lock() 284 c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt}) 285 daemon.Cleanup(c) 286 if err := c.CheckpointTo(daemon.containersReplica); err != nil { 287 logrus.Errorf("Failed to update stopped container %s state: %v", c.ID, err) 288 } 289 c.Unlock() 290 } 291 292 // we call Mount and then Unmount to get BaseFs of the container 293 if err := daemon.Mount(c); err != nil { 294 // The mount is unlikely to fail. However, in case mount fails 295 // the container should be allowed to restore here. Some functionalities 296 // (like docker exec -u user) might be missing but container is able to be 297 // stopped/restarted/removed. 298 // See #29365 for related information. 299 // The error is only logged here. 300 logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) 301 } else { 302 if err := daemon.Unmount(c); err != nil { 303 logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) 304 } 305 } 306 307 c.ResetRestartManager(false) 308 if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { 309 options, err := daemon.buildSandboxOptions(c) 310 if err != nil { 311 logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) 312 } 313 mapLock.Lock() 314 activeSandboxes[c.NetworkSettings.SandboxID] = options 315 mapLock.Unlock() 316 } 317 } 318 319 // get list of containers we need to restart 320 321 // Do not autostart containers which 322 // has endpoints in a swarm scope 323 // network yet since the cluster is 324 // not initialized yet. We will start 325 // it after the cluster is 326 // initialized. 327 if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore { 328 mapLock.Lock() 329 restartContainers[c] = make(chan struct{}) 330 mapLock.Unlock() 331 } else if c.HostConfig != nil && c.HostConfig.AutoRemove { 332 mapLock.Lock() 333 removeContainers[c.ID] = c 334 mapLock.Unlock() 335 } 336 337 c.Lock() 338 if c.RemovalInProgress { 339 // We probably crashed in the middle of a removal, reset 340 // the flag. 341 // 342 // We DO NOT remove the container here as we do not 343 // know if the user had requested for either the 344 // associated volumes, network links or both to also 345 // be removed. So we put the container in the "dead" 346 // state and leave further processing up to them. 347 logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) 348 c.RemovalInProgress = false 349 c.Dead = true 350 if err := c.CheckpointTo(daemon.containersReplica); err != nil { 351 logrus.Errorf("Failed to update RemovalInProgress container %s state: %v", c.ID, err) 352 } 353 } 354 c.Unlock() 355 }(c) 356 } 357 wg.Wait() 358 daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) 359 if err != nil { 360 return fmt.Errorf("Error initializing network controller: %v", err) 361 } 362 363 // Now that all the containers are registered, register the links 364 for _, c := range containers { 365 if err := daemon.registerLinks(c, c.HostConfig); err != nil { 366 logrus.Errorf("failed to register link for container %s: %v", c.ID, err) 367 } 368 } 369 370 group := sync.WaitGroup{} 371 for c, notifier := range restartContainers { 372 group.Add(1) 373 374 go func(c *container.Container, chNotify chan struct{}) { 375 defer group.Done() 376 377 logrus.Debugf("Starting container %s", c.ID) 378 379 // ignore errors here as this is a best effort to wait for children to be 380 // running before we try to start the container 381 children := daemon.children(c) 382 timeout := time.After(5 * time.Second) 383 for _, child := range children { 384 if notifier, exists := restartContainers[child]; exists { 385 select { 386 case <-notifier: 387 case <-timeout: 388 } 389 } 390 } 391 392 // Make sure networks are available before starting 393 daemon.waitForNetworks(c) 394 if err := daemon.containerStart(c, "", "", true); err != nil { 395 logrus.Errorf("Failed to start container %s: %s", c.ID, err) 396 } 397 close(chNotify) 398 }(c, notifier) 399 400 } 401 group.Wait() 402 403 removeGroup := sync.WaitGroup{} 404 for id := range removeContainers { 405 removeGroup.Add(1) 406 go func(cid string) { 407 if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { 408 logrus.Errorf("Failed to remove container %s: %s", cid, err) 409 } 410 removeGroup.Done() 411 }(id) 412 } 413 removeGroup.Wait() 414 415 // any containers that were started above would already have had this done, 416 // however we need to now prepare the mountpoints for the rest of the containers as well. 417 // This shouldn't cause any issue running on the containers that already had this run. 418 // This must be run after any containers with a restart policy so that containerized plugins 419 // can have a chance to be running before we try to initialize them. 420 for _, c := range containers { 421 // if the container has restart policy, do not 422 // prepare the mountpoints since it has been done on restarting. 423 // This is to speed up the daemon start when a restart container 424 // has a volume and the volume driver is not available. 425 if _, ok := restartContainers[c]; ok { 426 continue 427 } else if _, ok := removeContainers[c.ID]; ok { 428 // container is automatically removed, skip it. 429 continue 430 } 431 432 group.Add(1) 433 go func(c *container.Container) { 434 defer group.Done() 435 if err := daemon.prepareMountPoints(c); err != nil { 436 logrus.Error(err) 437 } 438 }(c) 439 } 440 441 group.Wait() 442 443 logrus.Info("Loading containers: done.") 444 445 return nil 446} 447 448// RestartSwarmContainers restarts any autostart container which has a 449// swarm endpoint. 450func (daemon *Daemon) RestartSwarmContainers() { 451 group := sync.WaitGroup{} 452 for _, c := range daemon.List() { 453 if !c.IsRunning() && !c.IsPaused() { 454 // Autostart all the containers which has a 455 // swarm endpoint now that the cluster is 456 // initialized. 457 if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore { 458 group.Add(1) 459 go func(c *container.Container) { 460 defer group.Done() 461 if err := daemon.containerStart(c, "", "", true); err != nil { 462 logrus.Error(err) 463 } 464 }(c) 465 } 466 } 467 468 } 469 group.Wait() 470} 471 472// waitForNetworks is used during daemon initialization when starting up containers 473// It ensures that all of a container's networks are available before the daemon tries to start the container. 474// In practice it just makes sure the discovery service is available for containers which use a network that require discovery. 475func (daemon *Daemon) waitForNetworks(c *container.Container) { 476 if daemon.discoveryWatcher == nil { 477 return 478 } 479 // Make sure if the container has a network that requires discovery that the discovery service is available before starting 480 for netName := range c.NetworkSettings.Networks { 481 // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready 482 // Most likely this is because the K/V store used for discovery is in a container and needs to be started 483 if _, err := daemon.netController.NetworkByName(netName); err != nil { 484 if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { 485 continue 486 } 487 // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host 488 // FIXME: why is this slow??? 489 logrus.Debugf("Container %s waiting for network to be ready", c.Name) 490 select { 491 case <-daemon.discoveryWatcher.ReadyCh(): 492 case <-time.After(60 * time.Second): 493 } 494 return 495 } 496 } 497} 498 499func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { 500 return daemon.linkIndex.children(c) 501} 502 503// parents returns the names of the parent containers of the container 504// with the given name. 505func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { 506 return daemon.linkIndex.parents(c) 507} 508 509func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { 510 fullName := path.Join(parent.Name, alias) 511 if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil { 512 if err == container.ErrNameReserved { 513 logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) 514 return nil 515 } 516 return err 517 } 518 daemon.linkIndex.link(parent, child, fullName) 519 return nil 520} 521 522// DaemonJoinsCluster informs the daemon has joined the cluster and provides 523// the handler to query the cluster component 524func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { 525 daemon.setClusterProvider(clusterProvider) 526} 527 528// DaemonLeavesCluster informs the daemon has left the cluster 529func (daemon *Daemon) DaemonLeavesCluster() { 530 // Daemon is in charge of removing the attachable networks with 531 // connected containers when the node leaves the swarm 532 daemon.clearAttachableNetworks() 533 // We no longer need the cluster provider, stop it now so that 534 // the network agent will stop listening to cluster events. 535 daemon.setClusterProvider(nil) 536 // Wait for the networking cluster agent to stop 537 daemon.netController.AgentStopWait() 538 // Daemon is in charge of removing the ingress network when the 539 // node leaves the swarm. Wait for job to be done or timeout. 540 // This is called also on graceful daemon shutdown. We need to 541 // wait, because the ingress release has to happen before the 542 // network controller is stopped. 543 if done, err := daemon.ReleaseIngress(); err == nil { 544 select { 545 case <-done: 546 case <-time.After(5 * time.Second): 547 logrus.Warn("timeout while waiting for ingress network removal") 548 } 549 } else { 550 logrus.Warnf("failed to initiate ingress network removal: %v", err) 551 } 552 553 daemon.attachmentStore.ClearAttachments() 554} 555 556// setClusterProvider sets a component for querying the current cluster state. 557func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { 558 daemon.clusterProvider = clusterProvider 559 daemon.netController.SetClusterProvider(clusterProvider) 560 daemon.attachableNetworkLock = locker.New() 561} 562 563// IsSwarmCompatible verifies if the current daemon 564// configuration is compatible with the swarm mode 565func (daemon *Daemon) IsSwarmCompatible() error { 566 if daemon.configStore == nil { 567 return nil 568 } 569 return daemon.configStore.IsSwarmCompatible() 570} 571 572// NewDaemon sets up everything for the daemon to be able to service 573// requests from the webserver. 574func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.Store) (daemon *Daemon, err error) { 575 setDefaultMtu(config) 576 577 registryService, err := registry.NewService(config.ServiceOptions) 578 if err != nil { 579 return nil, err 580 } 581 582 // Ensure that we have a correct root key limit for launching containers. 583 if err := ModifyRootKeyLimit(); err != nil { 584 logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) 585 } 586 587 // Ensure we have compatible and valid configuration options 588 if err := verifyDaemonSettings(config); err != nil { 589 return nil, err 590 } 591 592 // Do we have a disabled network? 593 config.DisableBridge = isBridgeNetworkDisabled(config) 594 595 // Setup the resolv.conf 596 setupResolvConf(config) 597 598 // Verify the platform is supported as a daemon 599 if !platformSupported { 600 return nil, errSystemNotSupported 601 } 602 603 // Validate platform-specific requirements 604 if err := checkSystem(); err != nil { 605 return nil, err 606 } 607 608 idMapping, err := setupRemappedRoot(config) 609 if err != nil { 610 return nil, err 611 } 612 rootIDs := idMapping.RootPair() 613 if err := setupDaemonProcess(config); err != nil { 614 return nil, err 615 } 616 617 // set up the tmpDir to use a canonical path 618 tmp, err := prepareTempDir(config.Root, rootIDs) 619 if err != nil { 620 return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) 621 } 622 realTmp, err := getRealPath(tmp) 623 if err != nil { 624 return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) 625 } 626 if runtime.GOOS == "windows" { 627 if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) { 628 if err := system.MkdirAll(realTmp, 0700, ""); err != nil { 629 return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err) 630 } 631 } 632 os.Setenv("TEMP", realTmp) 633 os.Setenv("TMP", realTmp) 634 } else { 635 os.Setenv("TMPDIR", realTmp) 636 } 637 638 d := &Daemon{ 639 configStore: config, 640 PluginStore: pluginStore, 641 startupDone: make(chan struct{}), 642 } 643 // Ensure the daemon is properly shutdown if there is a failure during 644 // initialization 645 defer func() { 646 if err != nil { 647 if err := d.Shutdown(); err != nil { 648 logrus.Error(err) 649 } 650 } 651 }() 652 653 if err := d.setGenericResources(config); err != nil { 654 return nil, err 655 } 656 // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event 657 // on Windows to dump Go routine stacks 658 stackDumpDir := config.Root 659 if execRoot := config.GetExecRoot(); execRoot != "" { 660 stackDumpDir = execRoot 661 } 662 d.setupDumpStackTrap(stackDumpDir) 663 664 if err := d.setupSeccompProfile(); err != nil { 665 return nil, err 666 } 667 668 // Set the default isolation mode (only applicable on Windows) 669 if err := d.setDefaultIsolation(); err != nil { 670 return nil, fmt.Errorf("error setting default isolation mode: %v", err) 671 } 672 673 if err := configureMaxThreads(config); err != nil { 674 logrus.Warnf("Failed to configure golang's threads limit: %v", err) 675 } 676 677 if err := ensureDefaultAppArmorProfile(); err != nil { 678 logrus.Errorf(err.Error()) 679 } 680 681 daemonRepo := filepath.Join(config.Root, "containers") 682 if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil { 683 return nil, err 684 } 685 686 // Create the directory where we'll store the runtime scripts (i.e. in 687 // order to support runtimeArgs) 688 daemonRuntimes := filepath.Join(config.Root, "runtimes") 689 if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil { 690 return nil, err 691 } 692 if err := d.loadRuntimes(); err != nil { 693 return nil, err 694 } 695 696 if runtime.GOOS == "windows" { 697 if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil { 698 return nil, err 699 } 700 } 701 702 // On Windows we don't support the environment variable, or a user supplied graphdriver 703 // as Windows has no choice in terms of which graphdrivers to use. It's a case of 704 // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, 705 // lcow. Unix platforms however run a single graphdriver for all containers, and it can 706 // be set through an environment variable, a daemon start parameter, or chosen through 707 // initialization of the layerstore through driver priority order for example. 708 d.graphDrivers = make(map[string]string) 709 layerStores := make(map[string]layer.Store) 710 if runtime.GOOS == "windows" { 711 d.graphDrivers[runtime.GOOS] = "windowsfilter" 712 if system.LCOWSupported() { 713 d.graphDrivers["linux"] = "lcow" 714 } 715 } else { 716 driverName := os.Getenv("DOCKER_DRIVER") 717 if driverName == "" { 718 driverName = config.GraphDriver 719 } else { 720 logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) 721 } 722 d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead. 723 } 724 725 d.RegistryService = registryService 726 logger.RegisterPluginGetter(d.PluginStore) 727 728 metricsSockPath, err := d.listenMetricsSock() 729 if err != nil { 730 return nil, err 731 } 732 registerMetricsPluginCallback(d.PluginStore, metricsSockPath) 733 734 gopts := []grpc.DialOption{ 735 grpc.WithInsecure(), 736 grpc.WithBackoffMaxDelay(3 * time.Second), 737 grpc.WithDialer(dialer.Dialer), 738 739 // TODO(stevvooe): We may need to allow configuration of this on the client. 740 grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), 741 grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)), 742 } 743 if config.ContainerdAddr != "" { 744 d.containerdCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(ContainersNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second)) 745 if err != nil { 746 return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr) 747 } 748 } 749 750 createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) { 751 var pluginCli *containerd.Client 752 753 // Windows is not currently using containerd, keep the 754 // client as nil 755 if config.ContainerdAddr != "" { 756 pluginCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(pluginexec.PluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second)) 757 if err != nil { 758 return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr) 759 } 760 } 761 762 return pluginexec.New(ctx, getPluginExecRoot(config.Root), pluginCli, m) 763 } 764 765 // Plugin system initialization should happen before restore. Do not change order. 766 d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ 767 Root: filepath.Join(config.Root, "plugins"), 768 ExecRoot: getPluginExecRoot(config.Root), 769 Store: d.PluginStore, 770 CreateExecutor: createPluginExec, 771 RegistryService: registryService, 772 LiveRestoreEnabled: config.LiveRestoreEnabled, 773 LogPluginEvent: d.LogPluginEvent, // todo: make private 774 AuthzMiddleware: config.AuthzMiddleware, 775 }) 776 if err != nil { 777 return nil, errors.Wrap(err, "couldn't create plugin manager") 778 } 779 780 if err := d.setupDefaultLogConfig(); err != nil { 781 return nil, err 782 } 783 784 for operatingSystem, gd := range d.graphDrivers { 785 layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{ 786 Root: config.Root, 787 MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), 788 GraphDriver: gd, 789 GraphDriverOptions: config.GraphOptions, 790 IDMapping: idMapping, 791 PluginGetter: d.PluginStore, 792 ExperimentalEnabled: config.Experimental, 793 OS: operatingSystem, 794 }) 795 if err != nil { 796 return nil, err 797 } 798 } 799 800 // As layerstore initialization may set the driver 801 for os := range d.graphDrivers { 802 d.graphDrivers[os] = layerStores[os].DriverName() 803 } 804 805 // Configure and validate the kernels security support. Note this is a Linux/FreeBSD 806 // operation only, so it is safe to pass *just* the runtime OS graphdriver. 807 if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil { 808 return nil, err 809 } 810 811 imageRoot := filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS]) 812 ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) 813 if err != nil { 814 return nil, err 815 } 816 817 lgrMap := make(map[string]image.LayerGetReleaser) 818 for os, ls := range layerStores { 819 lgrMap[os] = ls 820 } 821 imageStore, err := image.NewImageStore(ifs, lgrMap) 822 if err != nil { 823 return nil, err 824 } 825 826 d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d) 827 if err != nil { 828 return nil, err 829 } 830 831 trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath) 832 if err != nil { 833 return nil, err 834 } 835 836 trustDir := filepath.Join(config.Root, "trust") 837 838 if err := system.MkdirAll(trustDir, 0700, ""); err != nil { 839 return nil, err 840 } 841 842 // We have a single tag/reference store for the daemon globally. However, it's 843 // stored under the graphdriver. On host platforms which only support a single 844 // container OS, but multiple selectable graphdrivers, this means depending on which 845 // graphdriver is chosen, the global reference store is under there. For 846 // platforms which support multiple container operating systems, this is slightly 847 // more problematic as where does the global ref store get located? Fortunately, 848 // for Windows, which is currently the only daemon supporting multiple container 849 // operating systems, the list of graphdrivers available isn't user configurable. 850 // For backwards compatibility, we just put it under the windowsfilter 851 // directory regardless. 852 refStoreLocation := filepath.Join(imageRoot, `repositories.json`) 853 rs, err := refstore.NewReferenceStore(refStoreLocation) 854 if err != nil { 855 return nil, fmt.Errorf("Couldn't create reference store repository: %s", err) 856 } 857 858 distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) 859 if err != nil { 860 return nil, err 861 } 862 863 // No content-addressability migration on Windows as it never supported pre-CA 864 if runtime.GOOS != "windows" { 865 migrationStart := time.Now() 866 if err := v1.Migrate(config.Root, d.graphDrivers[runtime.GOOS], layerStores[runtime.GOOS], imageStore, rs, distributionMetadataStore); err != nil { 867 logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) 868 } 869 logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) 870 } 871 872 // Discovery is only enabled when the daemon is launched with an address to advertise. When 873 // initialized, the daemon is registered and we can store the discovery backend as it's read-only 874 if err := d.initDiscovery(config); err != nil { 875 return nil, err 876 } 877 878 sysInfo := sysinfo.New(false) 879 // Check if Devices cgroup is mounted, it is hard requirement for container security, 880 // on Linux. 881 if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { 882 return nil, errors.New("Devices cgroup isn't mounted") 883 } 884 885 d.ID = trustKey.PublicKey().KeyID() 886 d.repository = daemonRepo 887 d.containers = container.NewMemoryStore() 888 if d.containersReplica, err = container.NewViewDB(); err != nil { 889 return nil, err 890 } 891 d.execCommands = exec.NewStore() 892 d.idIndex = truncindex.NewTruncIndex([]string{}) 893 d.statsCollector = d.newStatsCollector(1 * time.Second) 894 895 d.EventsService = events.New() 896 d.root = config.Root 897 d.idMapping = idMapping 898 d.seccompEnabled = sysInfo.Seccomp 899 d.apparmorEnabled = sysInfo.AppArmor 900 901 d.linkIndex = newLinkIndex() 902 903 // TODO: imageStore, distributionMetadataStore, and ReferenceStore are only 904 // used above to run migration. They could be initialized in ImageService 905 // if migration is called from daemon/images. layerStore might move as well. 906 d.imageService = images.NewImageService(images.ImageServiceConfig{ 907 ContainerStore: d.containers, 908 DistributionMetadataStore: distributionMetadataStore, 909 EventsService: d.EventsService, 910 ImageStore: imageStore, 911 LayerStores: layerStores, 912 MaxConcurrentDownloads: *config.MaxConcurrentDownloads, 913 MaxConcurrentUploads: *config.MaxConcurrentUploads, 914 ReferenceStore: rs, 915 RegistryService: registryService, 916 TrustKey: trustKey, 917 }) 918 919 go d.execCommandGC() 920 921 d.containerd, err = libcontainerd.NewClient(ctx, d.containerdCli, filepath.Join(config.ExecRoot, "containerd"), ContainersNamespace, d) 922 if err != nil { 923 return nil, err 924 } 925 926 if err := d.restore(); err != nil { 927 return nil, err 928 } 929 close(d.startupDone) 930 931 // FIXME: this method never returns an error 932 info, _ := d.SystemInfo() 933 934 engineInfo.WithValues( 935 dockerversion.Version, 936 dockerversion.GitCommit, 937 info.Architecture, 938 info.Driver, 939 info.KernelVersion, 940 info.OperatingSystem, 941 info.OSType, 942 info.ID, 943 ).Set(1) 944 engineCpus.Set(float64(info.NCPU)) 945 engineMemory.Set(float64(info.MemTotal)) 946 947 gd := "" 948 for os, driver := range d.graphDrivers { 949 if len(gd) > 0 { 950 gd += ", " 951 } 952 gd += driver 953 if len(d.graphDrivers) > 1 { 954 gd = fmt.Sprintf("%s (%s)", gd, os) 955 } 956 } 957 logrus.WithFields(logrus.Fields{ 958 "version": dockerversion.Version, 959 "commit": dockerversion.GitCommit, 960 "graphdriver(s)": gd, 961 }).Info("Docker daemon") 962 963 return d, nil 964} 965 966// DistributionServices returns services controlling daemon storage 967func (daemon *Daemon) DistributionServices() images.DistributionServices { 968 return daemon.imageService.DistributionServices() 969} 970 971func (daemon *Daemon) waitForStartupDone() { 972 <-daemon.startupDone 973} 974 975func (daemon *Daemon) shutdownContainer(c *container.Container) error { 976 stopTimeout := c.StopTimeout() 977 978 // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force 979 if err := daemon.containerStop(c, stopTimeout); err != nil { 980 return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) 981 } 982 983 // Wait without timeout for the container to exit. 984 // Ignore the result. 985 <-c.Wait(context.Background(), container.WaitConditionNotRunning) 986 return nil 987} 988 989// ShutdownTimeout returns the timeout (in seconds) before containers are forcibly 990// killed during shutdown. The default timeout can be configured both on the daemon 991// and per container, and the longest timeout will be used. A grace-period of 992// 5 seconds is added to the configured timeout. 993// 994// A negative (-1) timeout means "indefinitely", which means that containers 995// are not forcibly killed, and the daemon shuts down after all containers exit. 996func (daemon *Daemon) ShutdownTimeout() int { 997 shutdownTimeout := daemon.configStore.ShutdownTimeout 998 if shutdownTimeout < 0 { 999 return -1 1000 } 1001 if daemon.containers == nil { 1002 return shutdownTimeout 1003 } 1004 1005 graceTimeout := 5 1006 for _, c := range daemon.containers.List() { 1007 stopTimeout := c.StopTimeout() 1008 if stopTimeout < 0 { 1009 return -1 1010 } 1011 if stopTimeout+graceTimeout > shutdownTimeout { 1012 shutdownTimeout = stopTimeout + graceTimeout 1013 } 1014 } 1015 return shutdownTimeout 1016} 1017 1018// Shutdown stops the daemon. 1019func (daemon *Daemon) Shutdown() error { 1020 daemon.shutdown = true 1021 // Keep mounts and networking running on daemon shutdown if 1022 // we are to keep containers running and restore them. 1023 1024 if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { 1025 // check if there are any running containers, if none we should do some cleanup 1026 if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { 1027 // metrics plugins still need some cleanup 1028 daemon.cleanupMetricsPlugins() 1029 return nil 1030 } 1031 } 1032 1033 if daemon.containers != nil { 1034 logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout) 1035 logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout()) 1036 daemon.containers.ApplyAll(func(c *container.Container) { 1037 if !c.IsRunning() { 1038 return 1039 } 1040 logrus.Debugf("stopping %s", c.ID) 1041 if err := daemon.shutdownContainer(c); err != nil { 1042 logrus.Errorf("Stop container error: %v", err) 1043 return 1044 } 1045 if mountid, err := daemon.imageService.GetLayerMountID(c.ID, c.OS); err == nil { 1046 daemon.cleanupMountsByID(mountid) 1047 } 1048 logrus.Debugf("container stopped %s", c.ID) 1049 }) 1050 } 1051 1052 if daemon.volumes != nil { 1053 if err := daemon.volumes.Shutdown(); err != nil { 1054 logrus.Errorf("Error shutting down volume store: %v", err) 1055 } 1056 } 1057 1058 if daemon.imageService != nil { 1059 daemon.imageService.Cleanup() 1060 } 1061 1062 // If we are part of a cluster, clean up cluster's stuff 1063 if daemon.clusterProvider != nil { 1064 logrus.Debugf("start clean shutdown of cluster resources...") 1065 daemon.DaemonLeavesCluster() 1066 } 1067 1068 daemon.cleanupMetricsPlugins() 1069 1070 // Shutdown plugins after containers and layerstore. Don't change the order. 1071 daemon.pluginShutdown() 1072 1073 // trigger libnetwork Stop only if it's initialized 1074 if daemon.netController != nil { 1075 daemon.netController.Stop() 1076 } 1077 1078 if daemon.containerdCli != nil { 1079 daemon.containerdCli.Close() 1080 } 1081 1082 return daemon.cleanupMounts() 1083} 1084 1085// Mount sets container.BaseFS 1086// (is it not set coming in? why is it unset?) 1087func (daemon *Daemon) Mount(container *container.Container) error { 1088 if container.RWLayer == nil { 1089 return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") 1090 } 1091 dir, err := container.RWLayer.Mount(container.GetMountLabel()) 1092 if err != nil { 1093 return err 1094 } 1095 logrus.Debugf("container mounted via layerStore: %v", dir) 1096 1097 if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() { 1098 // The mount path reported by the graph driver should always be trusted on Windows, since the 1099 // volume path for a given mounted layer may change over time. This should only be an error 1100 // on non-Windows operating systems. 1101 if runtime.GOOS != "windows" { 1102 daemon.Unmount(container) 1103 return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", 1104 daemon.imageService.GraphDriverForOS(container.OS), container.ID, container.BaseFS, dir) 1105 } 1106 } 1107 container.BaseFS = dir // TODO: combine these fields 1108 return nil 1109} 1110 1111// Unmount unsets the container base filesystem 1112func (daemon *Daemon) Unmount(container *container.Container) error { 1113 if container.RWLayer == nil { 1114 return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") 1115 } 1116 if err := container.RWLayer.Unmount(); err != nil { 1117 logrus.Errorf("Error unmounting container %s: %s", container.ID, err) 1118 return err 1119 } 1120 1121 return nil 1122} 1123 1124// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker. 1125func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) { 1126 var v4Subnets []net.IPNet 1127 var v6Subnets []net.IPNet 1128 1129 managedNetworks := daemon.netController.Networks() 1130 1131 for _, managedNetwork := range managedNetworks { 1132 v4infos, v6infos := managedNetwork.Info().IpamInfo() 1133 for _, info := range v4infos { 1134 if info.IPAMData.Pool != nil { 1135 v4Subnets = append(v4Subnets, *info.IPAMData.Pool) 1136 } 1137 } 1138 for _, info := range v6infos { 1139 if info.IPAMData.Pool != nil { 1140 v6Subnets = append(v6Subnets, *info.IPAMData.Pool) 1141 } 1142 } 1143 } 1144 1145 return v4Subnets, v6Subnets 1146} 1147 1148// prepareTempDir prepares and returns the default directory to use 1149// for temporary files. 1150// If it doesn't exist, it is created. If it exists, its content is removed. 1151func prepareTempDir(rootDir string, rootIdentity idtools.Identity) (string, error) { 1152 var tmpDir string 1153 if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { 1154 tmpDir = filepath.Join(rootDir, "tmp") 1155 newName := tmpDir + "-old" 1156 if err := os.Rename(tmpDir, newName); err == nil { 1157 go func() { 1158 if err := os.RemoveAll(newName); err != nil { 1159 logrus.Warnf("failed to delete old tmp directory: %s", newName) 1160 } 1161 }() 1162 } else if !os.IsNotExist(err) { 1163 logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err) 1164 if err := os.RemoveAll(tmpDir); err != nil { 1165 logrus.Warnf("failed to delete old tmp directory: %s", tmpDir) 1166 } 1167 } 1168 } 1169 // We don't remove the content of tmpdir if it's not the default, 1170 // it may hold things that do not belong to us. 1171 return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIdentity) 1172} 1173 1174func (daemon *Daemon) setGenericResources(conf *config.Config) error { 1175 genericResources, err := config.ParseGenericResources(conf.NodeGenericResources) 1176 if err != nil { 1177 return err 1178 } 1179 1180 daemon.genericResources = genericResources 1181 1182 return nil 1183} 1184 1185func setDefaultMtu(conf *config.Config) { 1186 // do nothing if the config does not have the default 0 value. 1187 if conf.Mtu != 0 { 1188 return 1189 } 1190 conf.Mtu = config.DefaultNetworkMtu 1191} 1192 1193// IsShuttingDown tells whether the daemon is shutting down or not 1194func (daemon *Daemon) IsShuttingDown() bool { 1195 return daemon.shutdown 1196} 1197 1198// initDiscovery initializes the discovery watcher for this daemon. 1199func (daemon *Daemon) initDiscovery(conf *config.Config) error { 1200 advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise) 1201 if err != nil { 1202 if err == discovery.ErrDiscoveryDisabled { 1203 return nil 1204 } 1205 return err 1206 } 1207 1208 conf.ClusterAdvertise = advertise 1209 discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts) 1210 if err != nil { 1211 return fmt.Errorf("discovery initialization failed (%v)", err) 1212 } 1213 1214 daemon.discoveryWatcher = discoveryWatcher 1215 return nil 1216} 1217 1218func isBridgeNetworkDisabled(conf *config.Config) bool { 1219 return conf.BridgeConfig.Iface == config.DisableNetworkBridge 1220} 1221 1222func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { 1223 options := []nwconfig.Option{} 1224 if dconfig == nil { 1225 return options, nil 1226 } 1227 1228 options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) 1229 options = append(options, nwconfig.OptionDataDir(dconfig.Root)) 1230 options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) 1231 1232 dd := runconfig.DefaultDaemonNetworkMode() 1233 dn := runconfig.DefaultDaemonNetworkMode().NetworkName() 1234 options = append(options, nwconfig.OptionDefaultDriver(string(dd))) 1235 options = append(options, nwconfig.OptionDefaultNetwork(dn)) 1236 1237 if strings.TrimSpace(dconfig.ClusterStore) != "" { 1238 kv := strings.Split(dconfig.ClusterStore, "://") 1239 if len(kv) != 2 { 1240 return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL") 1241 } 1242 options = append(options, nwconfig.OptionKVProvider(kv[0])) 1243 options = append(options, nwconfig.OptionKVProviderURL(kv[1])) 1244 } 1245 if len(dconfig.ClusterOpts) > 0 { 1246 options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) 1247 } 1248 1249 if daemon.discoveryWatcher != nil { 1250 options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) 1251 } 1252 1253 if dconfig.ClusterAdvertise != "" { 1254 options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) 1255 } 1256 1257 options = append(options, nwconfig.OptionLabels(dconfig.Labels)) 1258 options = append(options, driverOptions(dconfig)...) 1259 1260 if len(dconfig.NetworkConfig.DefaultAddressPools.Value()) > 0 { 1261 options = append(options, nwconfig.OptionDefaultAddressPoolConfig(dconfig.NetworkConfig.DefaultAddressPools.Value())) 1262 } 1263 1264 if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { 1265 options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) 1266 } 1267 1268 if pg != nil { 1269 options = append(options, nwconfig.OptionPluginGetter(pg)) 1270 } 1271 1272 options = append(options, nwconfig.OptionNetworkControlPlaneMTU(dconfig.NetworkControlPlaneMTU)) 1273 1274 return options, nil 1275} 1276 1277// GetCluster returns the cluster 1278func (daemon *Daemon) GetCluster() Cluster { 1279 return daemon.cluster 1280} 1281 1282// SetCluster sets the cluster 1283func (daemon *Daemon) SetCluster(cluster Cluster) { 1284 daemon.cluster = cluster 1285} 1286 1287func (daemon *Daemon) pluginShutdown() { 1288 manager := daemon.pluginManager 1289 // Check for a valid manager object. In error conditions, daemon init can fail 1290 // and shutdown called, before plugin manager is initialized. 1291 if manager != nil { 1292 manager.Shutdown() 1293 } 1294} 1295 1296// PluginManager returns current pluginManager associated with the daemon 1297func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method 1298 return daemon.pluginManager 1299} 1300 1301// PluginGetter returns current pluginStore associated with the daemon 1302func (daemon *Daemon) PluginGetter() *plugin.Store { 1303 return daemon.PluginStore 1304} 1305 1306// CreateDaemonRoot creates the root for the daemon 1307func CreateDaemonRoot(config *config.Config) error { 1308 // get the canonical path to the Docker root directory 1309 var realRoot string 1310 if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { 1311 realRoot = config.Root 1312 } else { 1313 realRoot, err = getRealPath(config.Root) 1314 if err != nil { 1315 return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) 1316 } 1317 } 1318 1319 idMapping, err := setupRemappedRoot(config) 1320 if err != nil { 1321 return err 1322 } 1323 return setupDaemonRoot(config, realRoot, idMapping.RootPair()) 1324} 1325 1326// checkpointAndSave grabs a container lock to safely call container.CheckpointTo 1327func (daemon *Daemon) checkpointAndSave(container *container.Container) error { 1328 container.Lock() 1329 defer container.Unlock() 1330 if err := container.CheckpointTo(daemon.containersReplica); err != nil { 1331 return fmt.Errorf("Error saving container state: %v", err) 1332 } 1333 return nil 1334} 1335 1336// because the CLI sends a -1 when it wants to unset the swappiness value 1337// we need to clear it on the server side 1338func fixMemorySwappiness(resources *containertypes.Resources) { 1339 if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 { 1340 resources.MemorySwappiness = nil 1341 } 1342} 1343 1344// GetAttachmentStore returns current attachment store associated with the daemon 1345func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore { 1346 return &daemon.attachmentStore 1347} 1348 1349// IdentityMapping returns uid/gid mapping or a SID (in the case of Windows) for the builder 1350func (daemon *Daemon) IdentityMapping() *idtools.IdentityMapping { 1351 return daemon.idMapping 1352} 1353 1354// ImageService returns the Daemon's ImageService 1355func (daemon *Daemon) ImageService() *images.ImageService { 1356 return daemon.imageService 1357} 1358 1359// BuilderBackend returns the backend used by builder 1360func (daemon *Daemon) BuilderBackend() builder.Backend { 1361 return struct { 1362 *Daemon 1363 *images.ImageService 1364 }{daemon, daemon.imageService} 1365} 1366