1/* 2Package libnetwork provides the basic functionality and extension points to 3create network namespaces and allocate interfaces for containers to use. 4 5 networkType := "bridge" 6 7 // Create a new controller instance 8 driverOptions := options.Generic{} 9 genericOption := make(map[string]interface{}) 10 genericOption[netlabel.GenericData] = driverOptions 11 controller, err := libnetwork.New(config.OptionDriverConfig(networkType, genericOption)) 12 if err != nil { 13 return 14 } 15 16 // Create a network for containers to join. 17 // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can make use of 18 network, err := controller.NewNetwork(networkType, "network1", "") 19 if err != nil { 20 return 21 } 22 23 // For each new container: allocate IP and interfaces. The returned network 24 // settings will be used for container infos (inspect and such), as well as 25 // iptables rules for port publishing. This info is contained or accessible 26 // from the returned endpoint. 27 ep, err := network.CreateEndpoint("Endpoint1") 28 if err != nil { 29 return 30 } 31 32 // Create the sandbox for the container. 33 // NewSandbox accepts Variadic optional arguments which libnetwork can use. 34 sbx, err := controller.NewSandbox("container1", 35 libnetwork.OptionHostname("test"), 36 libnetwork.OptionDomainname("docker.io")) 37 38 // A sandbox can join the endpoint via the join api. 39 err = ep.Join(sbx) 40 if err != nil { 41 return 42 } 43*/ 44package libnetwork 45 46import ( 47 "fmt" 48 "net" 49 "path/filepath" 50 "runtime" 51 "strings" 52 "sync" 53 "time" 54 55 "github.com/docker/docker/pkg/discovery" 56 "github.com/docker/docker/pkg/plugingetter" 57 "github.com/docker/docker/pkg/plugins" 58 "github.com/docker/docker/pkg/stringid" 59 "github.com/docker/libnetwork/cluster" 60 "github.com/docker/libnetwork/config" 61 "github.com/docker/libnetwork/datastore" 62 "github.com/docker/libnetwork/diagnostic" 63 "github.com/docker/libnetwork/discoverapi" 64 "github.com/docker/libnetwork/driverapi" 65 "github.com/docker/libnetwork/drvregistry" 66 "github.com/docker/libnetwork/hostdiscovery" 67 "github.com/docker/libnetwork/ipamapi" 68 "github.com/docker/libnetwork/netlabel" 69 "github.com/docker/libnetwork/options" 70 "github.com/docker/libnetwork/osl" 71 "github.com/docker/libnetwork/types" 72 "github.com/moby/locker" 73 "github.com/pkg/errors" 74 "github.com/sirupsen/logrus" 75) 76 77// NetworkController provides the interface for controller instance which manages 78// networks. 79type NetworkController interface { 80 // ID provides a unique identity for the controller 81 ID() string 82 83 // BuiltinDrivers returns list of builtin drivers 84 BuiltinDrivers() []string 85 86 // BuiltinIPAMDrivers returns list of builtin ipam drivers 87 BuiltinIPAMDrivers() []string 88 89 // Config method returns the bootup configuration for the controller 90 Config() config.Config 91 92 // Create a new network. The options parameter carries network specific options. 93 NewNetwork(networkType, name string, id string, options ...NetworkOption) (Network, error) 94 95 // Networks returns the list of Network(s) managed by this controller. 96 Networks() []Network 97 98 // WalkNetworks uses the provided function to walk the Network(s) managed by this controller. 99 WalkNetworks(walker NetworkWalker) 100 101 // NetworkByName returns the Network which has the passed name. If not found, the error ErrNoSuchNetwork is returned. 102 NetworkByName(name string) (Network, error) 103 104 // NetworkByID returns the Network which has the passed id. If not found, the error ErrNoSuchNetwork is returned. 105 NetworkByID(id string) (Network, error) 106 107 // NewSandbox creates a new network sandbox for the passed container id 108 NewSandbox(containerID string, options ...SandboxOption) (Sandbox, error) 109 110 // Sandboxes returns the list of Sandbox(s) managed by this controller. 111 Sandboxes() []Sandbox 112 113 // WalkSandboxes uses the provided function to walk the Sandbox(s) managed by this controller. 114 WalkSandboxes(walker SandboxWalker) 115 116 // SandboxByID returns the Sandbox which has the passed id. If not found, a types.NotFoundError is returned. 117 SandboxByID(id string) (Sandbox, error) 118 119 // SandboxDestroy destroys a sandbox given a container ID 120 SandboxDestroy(id string) error 121 122 // Stop network controller 123 Stop() 124 125 // ReloadConfiguration updates the controller configuration 126 ReloadConfiguration(cfgOptions ...config.Option) error 127 128 // SetClusterProvider sets cluster provider 129 SetClusterProvider(provider cluster.Provider) 130 131 // Wait for agent initialization complete in libnetwork controller 132 AgentInitWait() 133 134 // Wait for agent to stop if running 135 AgentStopWait() 136 137 // SetKeys configures the encryption key for gossip and overlay data path 138 SetKeys(keys []*types.EncryptionKey) error 139 140 // StartDiagnostic start the network diagnostic mode 141 StartDiagnostic(port int) 142 // StopDiagnostic start the network diagnostic mode 143 StopDiagnostic() 144 // IsDiagnosticEnabled returns true if the diagnostic is enabled 145 IsDiagnosticEnabled() bool 146} 147 148// NetworkWalker is a client provided function which will be used to walk the Networks. 149// When the function returns true, the walk will stop. 150type NetworkWalker func(nw Network) bool 151 152// SandboxWalker is a client provided function which will be used to walk the Sandboxes. 153// When the function returns true, the walk will stop. 154type SandboxWalker func(sb Sandbox) bool 155 156type sandboxTable map[string]*sandbox 157 158type controller struct { 159 id string 160 drvRegistry *drvregistry.DrvRegistry 161 sandboxes sandboxTable 162 cfg *config.Config 163 stores []datastore.DataStore 164 discovery hostdiscovery.HostDiscovery 165 extKeyListener net.Listener 166 watchCh chan *endpoint 167 unWatchCh chan *endpoint 168 svcRecords map[string]svcInfo 169 nmap map[string]*netWatch 170 serviceBindings map[serviceKey]*service 171 defOsSbox osl.Sandbox 172 ingressSandbox *sandbox 173 sboxOnce sync.Once 174 agent *agent 175 networkLocker *locker.Locker 176 agentInitDone chan struct{} 177 agentStopDone chan struct{} 178 keys []*types.EncryptionKey 179 clusterConfigAvailable bool 180 DiagnosticServer *diagnostic.Server 181 sync.Mutex 182} 183 184type initializer struct { 185 fn drvregistry.InitFunc 186 ntype string 187} 188 189// New creates a new instance of network controller. 190func New(cfgOptions ...config.Option) (NetworkController, error) { 191 c := &controller{ 192 id: stringid.GenerateRandomID(), 193 cfg: config.ParseConfigOptions(cfgOptions...), 194 sandboxes: sandboxTable{}, 195 svcRecords: make(map[string]svcInfo), 196 serviceBindings: make(map[serviceKey]*service), 197 agentInitDone: make(chan struct{}), 198 networkLocker: locker.New(), 199 DiagnosticServer: diagnostic.New(), 200 } 201 c.DiagnosticServer.Init() 202 203 if err := c.initStores(); err != nil { 204 return nil, err 205 } 206 207 drvRegistry, err := drvregistry.New(c.getStore(datastore.LocalScope), c.getStore(datastore.GlobalScope), c.RegisterDriver, nil, c.cfg.PluginGetter) 208 if err != nil { 209 return nil, err 210 } 211 212 for _, i := range getInitializers(c.cfg.Daemon.Experimental) { 213 var dcfg map[string]interface{} 214 215 // External plugins don't need config passed through daemon. They can 216 // bootstrap themselves 217 if i.ntype != "remote" { 218 dcfg = c.makeDriverConfig(i.ntype) 219 } 220 221 if err := drvRegistry.AddDriver(i.ntype, i.fn, dcfg); err != nil { 222 return nil, err 223 } 224 } 225 226 if err = initIPAMDrivers(drvRegistry, nil, c.getStore(datastore.GlobalScope), c.cfg.Daemon.DefaultAddressPool); err != nil { 227 return nil, err 228 } 229 230 c.drvRegistry = drvRegistry 231 232 if c.cfg != nil && c.cfg.Cluster.Watcher != nil { 233 if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil { 234 // Failing to initialize discovery is a bad situation to be in. 235 // But it cannot fail creating the Controller 236 logrus.Errorf("Failed to Initialize Discovery : %v", err) 237 } 238 } 239 240 c.WalkNetworks(populateSpecial) 241 242 // Reserve pools first before doing cleanup. Otherwise the 243 // cleanups of endpoint/network and sandbox below will 244 // generate many unnecessary warnings 245 c.reservePools() 246 247 // Cleanup resources 248 c.sandboxCleanup(c.cfg.ActiveSandboxes) 249 c.cleanupLocalEndpoints() 250 c.networkCleanup() 251 252 if err := c.startExternalKeyListener(); err != nil { 253 return nil, err 254 } 255 256 setupArrangeUserFilterRule(c) 257 return c, nil 258} 259 260func (c *controller) SetClusterProvider(provider cluster.Provider) { 261 var sameProvider bool 262 c.Lock() 263 // Avoids to spawn multiple goroutine for the same cluster provider 264 if c.cfg.Daemon.ClusterProvider == provider { 265 // If the cluster provider is already set, there is already a go routine spawned 266 // that is listening for events, so nothing to do here 267 sameProvider = true 268 } else { 269 c.cfg.Daemon.ClusterProvider = provider 270 } 271 c.Unlock() 272 273 if provider == nil || sameProvider { 274 return 275 } 276 // We don't want to spawn a new go routine if the previous one did not exit yet 277 c.AgentStopWait() 278 go c.clusterAgentInit() 279} 280 281func isValidClusteringIP(addr string) bool { 282 return addr != "" && !net.ParseIP(addr).IsLoopback() && !net.ParseIP(addr).IsUnspecified() 283} 284 285// libnetwork side of agent depends on the keys. On the first receipt of 286// keys setup the agent. For subsequent key set handle the key change 287func (c *controller) SetKeys(keys []*types.EncryptionKey) error { 288 subsysKeys := make(map[string]int) 289 for _, key := range keys { 290 if key.Subsystem != subsysGossip && 291 key.Subsystem != subsysIPSec { 292 return fmt.Errorf("key received for unrecognized subsystem") 293 } 294 subsysKeys[key.Subsystem]++ 295 } 296 for s, count := range subsysKeys { 297 if count != keyringSize { 298 return fmt.Errorf("incorrect number of keys for subsystem %v", s) 299 } 300 } 301 302 agent := c.getAgent() 303 304 if agent == nil { 305 c.Lock() 306 c.keys = keys 307 c.Unlock() 308 return nil 309 } 310 return c.handleKeyChange(keys) 311} 312 313func (c *controller) getAgent() *agent { 314 c.Lock() 315 defer c.Unlock() 316 return c.agent 317} 318 319func (c *controller) clusterAgentInit() { 320 clusterProvider := c.cfg.Daemon.ClusterProvider 321 var keysAvailable bool 322 for { 323 eventType := <-clusterProvider.ListenClusterEvents() 324 // The events: EventSocketChange, EventNodeReady and EventNetworkKeysAvailable are not ordered 325 // when all the condition for the agent initialization are met then proceed with it 326 switch eventType { 327 case cluster.EventNetworkKeysAvailable: 328 // Validates that the keys are actually available before starting the initialization 329 // This will handle old spurious messages left on the channel 330 c.Lock() 331 keysAvailable = c.keys != nil 332 c.Unlock() 333 fallthrough 334 case cluster.EventSocketChange, cluster.EventNodeReady: 335 if keysAvailable && !c.isDistributedControl() { 336 c.agentOperationStart() 337 if err := c.agentSetup(clusterProvider); err != nil { 338 c.agentStopComplete() 339 } else { 340 c.agentInitComplete() 341 } 342 } 343 case cluster.EventNodeLeave: 344 c.agentOperationStart() 345 c.Lock() 346 c.keys = nil 347 c.Unlock() 348 349 // We are leaving the cluster. Make sure we 350 // close the gossip so that we stop all 351 // incoming gossip updates before cleaning up 352 // any remaining service bindings. But before 353 // deleting the networks since the networks 354 // should still be present when cleaning up 355 // service bindings 356 c.agentClose() 357 c.cleanupServiceDiscovery("") 358 c.cleanupServiceBindings("") 359 360 c.agentStopComplete() 361 362 return 363 } 364 } 365} 366 367// AgentInitWait waits for agent initialization to be completed in the controller. 368func (c *controller) AgentInitWait() { 369 c.Lock() 370 agentInitDone := c.agentInitDone 371 c.Unlock() 372 373 if agentInitDone != nil { 374 <-agentInitDone 375 } 376} 377 378// AgentStopWait waits for the Agent stop to be completed in the controller 379func (c *controller) AgentStopWait() { 380 c.Lock() 381 agentStopDone := c.agentStopDone 382 c.Unlock() 383 if agentStopDone != nil { 384 <-agentStopDone 385 } 386} 387 388// agentOperationStart marks the start of an Agent Init or Agent Stop 389func (c *controller) agentOperationStart() { 390 c.Lock() 391 if c.agentInitDone == nil { 392 c.agentInitDone = make(chan struct{}) 393 } 394 if c.agentStopDone == nil { 395 c.agentStopDone = make(chan struct{}) 396 } 397 c.Unlock() 398} 399 400// agentInitComplete notifies the successful completion of the Agent initialization 401func (c *controller) agentInitComplete() { 402 c.Lock() 403 if c.agentInitDone != nil { 404 close(c.agentInitDone) 405 c.agentInitDone = nil 406 } 407 c.Unlock() 408} 409 410// agentStopComplete notifies the successful completion of the Agent stop 411func (c *controller) agentStopComplete() { 412 c.Lock() 413 if c.agentStopDone != nil { 414 close(c.agentStopDone) 415 c.agentStopDone = nil 416 } 417 c.Unlock() 418} 419 420func (c *controller) makeDriverConfig(ntype string) map[string]interface{} { 421 if c.cfg == nil { 422 return nil 423 } 424 425 config := make(map[string]interface{}) 426 427 for _, label := range c.cfg.Daemon.Labels { 428 if !strings.HasPrefix(netlabel.Key(label), netlabel.DriverPrefix+"."+ntype) { 429 continue 430 } 431 432 config[netlabel.Key(label)] = netlabel.Value(label) 433 } 434 435 drvCfg, ok := c.cfg.Daemon.DriverCfg[ntype] 436 if ok { 437 for k, v := range drvCfg.(map[string]interface{}) { 438 config[k] = v 439 } 440 } 441 442 for k, v := range c.cfg.Scopes { 443 if !v.IsValid() { 444 continue 445 } 446 config[netlabel.MakeKVClient(k)] = discoverapi.DatastoreConfigData{ 447 Scope: k, 448 Provider: v.Client.Provider, 449 Address: v.Client.Address, 450 Config: v.Client.Config, 451 } 452 } 453 454 return config 455} 456 457var procReloadConfig = make(chan (bool), 1) 458 459func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error { 460 procReloadConfig <- true 461 defer func() { <-procReloadConfig }() 462 463 // For now we accept the configuration reload only as a mean to provide a global store config after boot. 464 // Refuse the configuration if it alters an existing datastore client configuration. 465 update := false 466 cfg := config.ParseConfigOptions(cfgOptions...) 467 468 for s := range c.cfg.Scopes { 469 if _, ok := cfg.Scopes[s]; !ok { 470 return types.ForbiddenErrorf("cannot accept new configuration because it removes an existing datastore client") 471 } 472 } 473 for s, nSCfg := range cfg.Scopes { 474 if eSCfg, ok := c.cfg.Scopes[s]; ok { 475 if eSCfg.Client.Provider != nSCfg.Client.Provider || 476 eSCfg.Client.Address != nSCfg.Client.Address { 477 return types.ForbiddenErrorf("cannot accept new configuration because it modifies an existing datastore client") 478 } 479 } else { 480 if err := c.initScopedStore(s, nSCfg); err != nil { 481 return err 482 } 483 update = true 484 } 485 } 486 if !update { 487 return nil 488 } 489 490 c.Lock() 491 c.cfg = cfg 492 c.Unlock() 493 494 var dsConfig *discoverapi.DatastoreConfigData 495 for scope, sCfg := range cfg.Scopes { 496 if scope == datastore.LocalScope || !sCfg.IsValid() { 497 continue 498 } 499 dsConfig = &discoverapi.DatastoreConfigData{ 500 Scope: scope, 501 Provider: sCfg.Client.Provider, 502 Address: sCfg.Client.Address, 503 Config: sCfg.Client.Config, 504 } 505 break 506 } 507 if dsConfig == nil { 508 return nil 509 } 510 511 c.drvRegistry.WalkIPAMs(func(name string, driver ipamapi.Ipam, cap *ipamapi.Capability) bool { 512 err := driver.DiscoverNew(discoverapi.DatastoreConfig, *dsConfig) 513 if err != nil { 514 logrus.Errorf("Failed to set datastore in driver %s: %v", name, err) 515 } 516 return false 517 }) 518 519 c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { 520 err := driver.DiscoverNew(discoverapi.DatastoreConfig, *dsConfig) 521 if err != nil { 522 logrus.Errorf("Failed to set datastore in driver %s: %v", name, err) 523 } 524 return false 525 }) 526 527 if c.discovery == nil && c.cfg.Cluster.Watcher != nil { 528 if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil { 529 logrus.Errorf("Failed to Initialize Discovery after configuration update: %v", err) 530 } 531 } 532 533 return nil 534} 535 536func (c *controller) ID() string { 537 return c.id 538} 539 540func (c *controller) BuiltinDrivers() []string { 541 drivers := []string{} 542 c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { 543 if driver.IsBuiltIn() { 544 drivers = append(drivers, name) 545 } 546 return false 547 }) 548 return drivers 549} 550 551func (c *controller) BuiltinIPAMDrivers() []string { 552 drivers := []string{} 553 c.drvRegistry.WalkIPAMs(func(name string, driver ipamapi.Ipam, cap *ipamapi.Capability) bool { 554 if driver.IsBuiltIn() { 555 drivers = append(drivers, name) 556 } 557 return false 558 }) 559 return drivers 560} 561 562func (c *controller) validateHostDiscoveryConfig() bool { 563 if c.cfg == nil || c.cfg.Cluster.Discovery == "" || c.cfg.Cluster.Address == "" { 564 return false 565 } 566 return true 567} 568 569func (c *controller) clusterHostID() string { 570 c.Lock() 571 defer c.Unlock() 572 if c.cfg == nil || c.cfg.Cluster.Address == "" { 573 return "" 574 } 575 addr := strings.Split(c.cfg.Cluster.Address, ":") 576 return addr[0] 577} 578 579func (c *controller) isNodeAlive(node string) bool { 580 if c.discovery == nil { 581 return false 582 } 583 584 nodes := c.discovery.Fetch() 585 for _, n := range nodes { 586 if n.String() == node { 587 return true 588 } 589 } 590 591 return false 592} 593 594func (c *controller) initDiscovery(watcher discovery.Watcher) error { 595 if c.cfg == nil { 596 return fmt.Errorf("discovery initialization requires a valid configuration") 597 } 598 599 c.discovery = hostdiscovery.NewHostDiscovery(watcher) 600 return c.discovery.Watch(c.activeCallback, c.hostJoinCallback, c.hostLeaveCallback) 601} 602 603func (c *controller) activeCallback() { 604 ds := c.getStore(datastore.GlobalScope) 605 if ds != nil && !ds.Active() { 606 ds.RestartWatch() 607 } 608} 609 610func (c *controller) hostJoinCallback(nodes []net.IP) { 611 c.processNodeDiscovery(nodes, true) 612} 613 614func (c *controller) hostLeaveCallback(nodes []net.IP) { 615 c.processNodeDiscovery(nodes, false) 616} 617 618func (c *controller) processNodeDiscovery(nodes []net.IP, add bool) { 619 c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { 620 c.pushNodeDiscovery(driver, capability, nodes, add) 621 return false 622 }) 623} 624 625func (c *controller) pushNodeDiscovery(d driverapi.Driver, cap driverapi.Capability, nodes []net.IP, add bool) { 626 var self net.IP 627 if c.cfg != nil { 628 addr := strings.Split(c.cfg.Cluster.Address, ":") 629 self = net.ParseIP(addr[0]) 630 // if external kvstore is not configured, try swarm-mode config 631 if self == nil { 632 if agent := c.getAgent(); agent != nil { 633 self = net.ParseIP(agent.advertiseAddr) 634 } 635 } 636 } 637 638 if d == nil || cap.ConnectivityScope != datastore.GlobalScope || nodes == nil { 639 return 640 } 641 642 for _, node := range nodes { 643 nodeData := discoverapi.NodeDiscoveryData{Address: node.String(), Self: node.Equal(self)} 644 var err error 645 if add { 646 err = d.DiscoverNew(discoverapi.NodeDiscovery, nodeData) 647 } else { 648 err = d.DiscoverDelete(discoverapi.NodeDiscovery, nodeData) 649 } 650 if err != nil { 651 logrus.Debugf("discovery notification error: %v", err) 652 } 653 } 654} 655 656func (c *controller) Config() config.Config { 657 c.Lock() 658 defer c.Unlock() 659 if c.cfg == nil { 660 return config.Config{} 661 } 662 return *c.cfg 663} 664 665func (c *controller) isManager() bool { 666 c.Lock() 667 defer c.Unlock() 668 if c.cfg == nil || c.cfg.Daemon.ClusterProvider == nil { 669 return false 670 } 671 return c.cfg.Daemon.ClusterProvider.IsManager() 672} 673 674func (c *controller) isAgent() bool { 675 c.Lock() 676 defer c.Unlock() 677 if c.cfg == nil || c.cfg.Daemon.ClusterProvider == nil { 678 return false 679 } 680 return c.cfg.Daemon.ClusterProvider.IsAgent() 681} 682 683func (c *controller) isDistributedControl() bool { 684 return !c.isManager() && !c.isAgent() 685} 686 687func (c *controller) GetPluginGetter() plugingetter.PluginGetter { 688 return c.drvRegistry.GetPluginGetter() 689} 690 691func (c *controller) RegisterDriver(networkType string, driver driverapi.Driver, capability driverapi.Capability) error { 692 c.Lock() 693 hd := c.discovery 694 c.Unlock() 695 696 if hd != nil { 697 c.pushNodeDiscovery(driver, capability, hd.Fetch(), true) 698 } 699 700 c.agentDriverNotify(driver) 701 return nil 702} 703 704// XXX This should be made driver agnostic. See comment below. 705const overlayDSROptionString = "dsr" 706 707// NewNetwork creates a new network of the specified network type. The options 708// are network specific and modeled in a generic way. 709func (c *controller) NewNetwork(networkType, name string, id string, options ...NetworkOption) (Network, error) { 710 var ( 711 cap *driverapi.Capability 712 err error 713 t *network 714 skipCfgEpCount bool 715 ) 716 717 if id != "" { 718 c.networkLocker.Lock(id) 719 defer c.networkLocker.Unlock(id) 720 721 if _, err = c.NetworkByID(id); err == nil { 722 return nil, NetworkNameError(id) 723 } 724 } 725 726 if !config.IsValidName(name) { 727 return nil, ErrInvalidName(name) 728 } 729 730 if id == "" { 731 id = stringid.GenerateRandomID() 732 } 733 734 defaultIpam := defaultIpamForNetworkType(networkType) 735 // Construct the network object 736 network := &network{ 737 name: name, 738 networkType: networkType, 739 generic: map[string]interface{}{netlabel.GenericData: make(map[string]string)}, 740 ipamType: defaultIpam, 741 id: id, 742 created: time.Now(), 743 ctrlr: c, 744 persist: true, 745 drvOnce: &sync.Once{}, 746 loadBalancerMode: loadBalancerModeDefault, 747 } 748 749 network.processOptions(options...) 750 if err = network.validateConfiguration(); err != nil { 751 return nil, err 752 } 753 754 // Reset network types, force local scope and skip allocation and 755 // plumbing for configuration networks. Reset of the config-only 756 // network drivers is needed so that this special network is not 757 // usable by old engine versions. 758 if network.configOnly { 759 network.scope = datastore.LocalScope 760 network.networkType = "null" 761 goto addToStore 762 } 763 764 _, cap, err = network.resolveDriver(network.networkType, true) 765 if err != nil { 766 return nil, err 767 } 768 769 if network.scope == datastore.LocalScope && cap.DataScope == datastore.GlobalScope { 770 return nil, types.ForbiddenErrorf("cannot downgrade network scope for %s networks", networkType) 771 772 } 773 if network.ingress && cap.DataScope != datastore.GlobalScope { 774 return nil, types.ForbiddenErrorf("Ingress network can only be global scope network") 775 } 776 777 // At this point the network scope is still unknown if not set by user 778 if (cap.DataScope == datastore.GlobalScope || network.scope == datastore.SwarmScope) && 779 !c.isDistributedControl() && !network.dynamic { 780 if c.isManager() { 781 // For non-distributed controlled environment, globalscoped non-dynamic networks are redirected to Manager 782 return nil, ManagerRedirectError(name) 783 } 784 return nil, types.ForbiddenErrorf("Cannot create a multi-host network from a worker node. Please create the network from a manager node.") 785 } 786 787 if network.scope == datastore.SwarmScope && c.isDistributedControl() { 788 return nil, types.ForbiddenErrorf("cannot create a swarm scoped network when swarm is not active") 789 } 790 791 // Make sure we have a driver available for this network type 792 // before we allocate anything. 793 if _, err := network.driver(true); err != nil { 794 return nil, err 795 } 796 797 // From this point on, we need the network specific configuration, 798 // which may come from a configuration-only network 799 if network.configFrom != "" { 800 t, err = c.getConfigNetwork(network.configFrom) 801 if err != nil { 802 return nil, types.NotFoundErrorf("configuration network %q does not exist", network.configFrom) 803 } 804 if err = t.applyConfigurationTo(network); err != nil { 805 return nil, types.InternalErrorf("Failed to apply configuration: %v", err) 806 } 807 network.generic[netlabel.Internal] = network.internal 808 defer func() { 809 if err == nil && !skipCfgEpCount { 810 if err := t.getEpCnt().IncEndpointCnt(); err != nil { 811 logrus.Warnf("Failed to update reference count for configuration network %q on creation of network %q: %v", 812 t.Name(), network.Name(), err) 813 } 814 } 815 }() 816 } 817 818 err = network.ipamAllocate() 819 if err != nil { 820 return nil, err 821 } 822 defer func() { 823 if err != nil { 824 network.ipamRelease() 825 } 826 }() 827 828 err = c.addNetwork(network) 829 if err != nil { 830 if _, ok := err.(types.MaskableError); ok { 831 // This error can be ignored and set this boolean 832 // value to skip a refcount increment for configOnly networks 833 skipCfgEpCount = true 834 } else { 835 return nil, err 836 } 837 } 838 defer func() { 839 if err != nil { 840 if e := network.deleteNetwork(); e != nil { 841 logrus.Warnf("couldn't roll back driver network on network %s creation failure: %v", network.name, err) 842 } 843 } 844 }() 845 846 // XXX If the driver type is "overlay" check the options for DSR 847 // being set. If so, set the network's load balancing mode to DSR. 848 // This should really be done in a network option, but due to 849 // time pressure to get this in without adding changes to moby, 850 // swarm and CLI, it is being implemented as a driver-specific 851 // option. Unfortunately, drivers can't influence the core 852 // "libnetwork.network" data type. Hence we need this hack code 853 // to implement in this manner. 854 if gval, ok := network.generic[netlabel.GenericData]; ok && network.networkType == "overlay" { 855 optMap := gval.(map[string]string) 856 if _, ok := optMap[overlayDSROptionString]; ok { 857 network.loadBalancerMode = loadBalancerModeDSR 858 } 859 } 860 861addToStore: 862 // First store the endpoint count, then the network. To avoid to 863 // end up with a datastore containing a network and not an epCnt, 864 // in case of an ungraceful shutdown during this function call. 865 epCnt := &endpointCnt{n: network} 866 if err = c.updateToStore(epCnt); err != nil { 867 return nil, err 868 } 869 defer func() { 870 if err != nil { 871 if e := c.deleteFromStore(epCnt); e != nil { 872 logrus.Warnf("could not rollback from store, epCnt %v on failure (%v): %v", epCnt, err, e) 873 } 874 } 875 }() 876 877 network.epCnt = epCnt 878 if err = c.updateToStore(network); err != nil { 879 return nil, err 880 } 881 defer func() { 882 if err != nil { 883 if e := c.deleteFromStore(network); e != nil { 884 logrus.Warnf("could not rollback from store, network %v on failure (%v): %v", network, err, e) 885 } 886 } 887 }() 888 889 if network.configOnly { 890 return network, nil 891 } 892 893 joinCluster(network) 894 defer func() { 895 if err != nil { 896 network.cancelDriverWatches() 897 if e := network.leaveCluster(); e != nil { 898 logrus.Warnf("Failed to leave agent cluster on network %s on failure (%v): %v", network.name, err, e) 899 } 900 } 901 }() 902 903 if network.hasLoadBalancerEndpoint() { 904 if err = network.createLoadBalancerSandbox(); err != nil { 905 return nil, err 906 } 907 } 908 909 if !c.isDistributedControl() { 910 c.Lock() 911 arrangeIngressFilterRule() 912 c.Unlock() 913 } 914 arrangeUserFilterRule() 915 916 return network, nil 917} 918 919var joinCluster NetworkWalker = func(nw Network) bool { 920 n := nw.(*network) 921 if n.configOnly { 922 return false 923 } 924 if err := n.joinCluster(); err != nil { 925 logrus.Errorf("Failed to join network %s (%s) into agent cluster: %v", n.Name(), n.ID(), err) 926 } 927 n.addDriverWatches() 928 return false 929} 930 931func (c *controller) reservePools() { 932 networks, err := c.getNetworksForScope(datastore.LocalScope) 933 if err != nil { 934 logrus.Warnf("Could not retrieve networks from local store during ipam allocation for existing networks: %v", err) 935 return 936 } 937 938 for _, n := range networks { 939 if n.configOnly { 940 continue 941 } 942 if !doReplayPoolReserve(n) { 943 continue 944 } 945 // Construct pseudo configs for the auto IP case 946 autoIPv4 := (len(n.ipamV4Config) == 0 || (len(n.ipamV4Config) == 1 && n.ipamV4Config[0].PreferredPool == "")) && len(n.ipamV4Info) > 0 947 autoIPv6 := (len(n.ipamV6Config) == 0 || (len(n.ipamV6Config) == 1 && n.ipamV6Config[0].PreferredPool == "")) && len(n.ipamV6Info) > 0 948 if autoIPv4 { 949 n.ipamV4Config = []*IpamConf{{PreferredPool: n.ipamV4Info[0].Pool.String()}} 950 } 951 if n.enableIPv6 && autoIPv6 { 952 n.ipamV6Config = []*IpamConf{{PreferredPool: n.ipamV6Info[0].Pool.String()}} 953 } 954 // Account current network gateways 955 for i, c := range n.ipamV4Config { 956 if c.Gateway == "" && n.ipamV4Info[i].Gateway != nil { 957 c.Gateway = n.ipamV4Info[i].Gateway.IP.String() 958 } 959 } 960 if n.enableIPv6 { 961 for i, c := range n.ipamV6Config { 962 if c.Gateway == "" && n.ipamV6Info[i].Gateway != nil { 963 c.Gateway = n.ipamV6Info[i].Gateway.IP.String() 964 } 965 } 966 } 967 // Reserve pools 968 if err := n.ipamAllocate(); err != nil { 969 logrus.Warnf("Failed to allocate ipam pool(s) for network %q (%s): %v", n.Name(), n.ID(), err) 970 } 971 // Reserve existing endpoints' addresses 972 ipam, _, err := n.getController().getIPAMDriver(n.ipamType) 973 if err != nil { 974 logrus.Warnf("Failed to retrieve ipam driver for network %q (%s) during address reservation", n.Name(), n.ID()) 975 continue 976 } 977 epl, err := n.getEndpointsFromStore() 978 if err != nil { 979 logrus.Warnf("Failed to retrieve list of current endpoints on network %q (%s)", n.Name(), n.ID()) 980 continue 981 } 982 for _, ep := range epl { 983 if ep.Iface() == nil { 984 logrus.Warnf("endpoint interface is empty for %q (%s)", ep.Name(), ep.ID()) 985 continue 986 } 987 if err := ep.assignAddress(ipam, true, ep.Iface().AddressIPv6() != nil); err != nil { 988 logrus.Warnf("Failed to reserve current address for endpoint %q (%s) on network %q (%s)", 989 ep.Name(), ep.ID(), n.Name(), n.ID()) 990 } 991 } 992 } 993} 994 995func doReplayPoolReserve(n *network) bool { 996 _, caps, err := n.getController().getIPAMDriver(n.ipamType) 997 if err != nil { 998 logrus.Warnf("Failed to retrieve ipam driver for network %q (%s): %v", n.Name(), n.ID(), err) 999 return false 1000 } 1001 return caps.RequiresRequestReplay 1002} 1003 1004func (c *controller) addNetwork(n *network) error { 1005 d, err := n.driver(true) 1006 if err != nil { 1007 return err 1008 } 1009 1010 // Create the network 1011 if err := d.CreateNetwork(n.id, n.generic, n, n.getIPData(4), n.getIPData(6)); err != nil { 1012 return err 1013 } 1014 1015 n.startResolver() 1016 1017 return nil 1018} 1019 1020func (c *controller) Networks() []Network { 1021 var list []Network 1022 1023 for _, n := range c.getNetworksFromStore() { 1024 if n.inDelete { 1025 continue 1026 } 1027 list = append(list, n) 1028 } 1029 1030 return list 1031} 1032 1033func (c *controller) WalkNetworks(walker NetworkWalker) { 1034 for _, n := range c.Networks() { 1035 if walker(n) { 1036 return 1037 } 1038 } 1039} 1040 1041func (c *controller) NetworkByName(name string) (Network, error) { 1042 if name == "" { 1043 return nil, ErrInvalidName(name) 1044 } 1045 var n Network 1046 1047 s := func(current Network) bool { 1048 if current.Name() == name { 1049 n = current 1050 return true 1051 } 1052 return false 1053 } 1054 1055 c.WalkNetworks(s) 1056 1057 if n == nil { 1058 return nil, ErrNoSuchNetwork(name) 1059 } 1060 1061 return n, nil 1062} 1063 1064func (c *controller) NetworkByID(id string) (Network, error) { 1065 if id == "" { 1066 return nil, ErrInvalidID(id) 1067 } 1068 1069 n, err := c.getNetworkFromStore(id) 1070 if err != nil { 1071 return nil, ErrNoSuchNetwork(id) 1072 } 1073 1074 return n, nil 1075} 1076 1077// NewSandbox creates a new sandbox for the passed container id 1078func (c *controller) NewSandbox(containerID string, options ...SandboxOption) (Sandbox, error) { 1079 if containerID == "" { 1080 return nil, types.BadRequestErrorf("invalid container ID") 1081 } 1082 1083 var sb *sandbox 1084 c.Lock() 1085 for _, s := range c.sandboxes { 1086 if s.containerID == containerID { 1087 // If not a stub, then we already have a complete sandbox. 1088 if !s.isStub { 1089 sbID := s.ID() 1090 c.Unlock() 1091 return nil, types.ForbiddenErrorf("container %s is already present in sandbox %s", containerID, sbID) 1092 } 1093 1094 // We already have a stub sandbox from the 1095 // store. Make use of it so that we don't lose 1096 // the endpoints from store but reset the 1097 // isStub flag. 1098 sb = s 1099 sb.isStub = false 1100 break 1101 } 1102 } 1103 c.Unlock() 1104 1105 sandboxID := stringid.GenerateRandomID() 1106 if runtime.GOOS == "windows" { 1107 sandboxID = containerID 1108 } 1109 1110 // Create sandbox and process options first. Key generation depends on an option 1111 if sb == nil { 1112 sb = &sandbox{ 1113 id: sandboxID, 1114 containerID: containerID, 1115 endpoints: []*endpoint{}, 1116 epPriority: map[string]int{}, 1117 populatedEndpoints: map[string]struct{}{}, 1118 config: containerConfig{}, 1119 controller: c, 1120 extDNS: []extDNSEntry{}, 1121 } 1122 } 1123 1124 sb.processOptions(options...) 1125 1126 c.Lock() 1127 if sb.ingress && c.ingressSandbox != nil { 1128 c.Unlock() 1129 return nil, types.ForbiddenErrorf("ingress sandbox already present") 1130 } 1131 1132 if sb.ingress { 1133 c.ingressSandbox = sb 1134 sb.config.hostsPath = filepath.Join(c.cfg.Daemon.DataDir, "/network/files/hosts") 1135 sb.config.resolvConfPath = filepath.Join(c.cfg.Daemon.DataDir, "/network/files/resolv.conf") 1136 sb.id = "ingress_sbox" 1137 } else if sb.loadBalancerNID != "" { 1138 sb.id = "lb_" + sb.loadBalancerNID 1139 } 1140 c.Unlock() 1141 1142 var err error 1143 defer func() { 1144 if err != nil { 1145 c.Lock() 1146 if sb.ingress { 1147 c.ingressSandbox = nil 1148 } 1149 c.Unlock() 1150 } 1151 }() 1152 1153 if err = sb.setupResolutionFiles(); err != nil { 1154 return nil, err 1155 } 1156 1157 if sb.config.useDefaultSandBox { 1158 c.sboxOnce.Do(func() { 1159 c.defOsSbox, err = osl.NewSandbox(sb.Key(), false, false) 1160 }) 1161 1162 if err != nil { 1163 c.sboxOnce = sync.Once{} 1164 return nil, fmt.Errorf("failed to create default sandbox: %v", err) 1165 } 1166 1167 sb.osSbox = c.defOsSbox 1168 } 1169 1170 if sb.osSbox == nil && !sb.config.useExternalKey { 1171 if sb.osSbox, err = osl.NewSandbox(sb.Key(), !sb.config.useDefaultSandBox, false); err != nil { 1172 return nil, fmt.Errorf("failed to create new osl sandbox: %v", err) 1173 } 1174 } 1175 1176 if sb.osSbox != nil { 1177 // Apply operating specific knobs on the load balancer sandbox 1178 sb.osSbox.ApplyOSTweaks(sb.oslTypes) 1179 } 1180 1181 c.Lock() 1182 c.sandboxes[sb.id] = sb 1183 c.Unlock() 1184 defer func() { 1185 if err != nil { 1186 c.Lock() 1187 delete(c.sandboxes, sb.id) 1188 c.Unlock() 1189 } 1190 }() 1191 1192 err = sb.storeUpdate() 1193 if err != nil { 1194 return nil, fmt.Errorf("failed to update the store state of sandbox: %v", err) 1195 } 1196 1197 return sb, nil 1198} 1199 1200func (c *controller) Sandboxes() []Sandbox { 1201 c.Lock() 1202 defer c.Unlock() 1203 1204 list := make([]Sandbox, 0, len(c.sandboxes)) 1205 for _, s := range c.sandboxes { 1206 // Hide stub sandboxes from libnetwork users 1207 if s.isStub { 1208 continue 1209 } 1210 1211 list = append(list, s) 1212 } 1213 1214 return list 1215} 1216 1217func (c *controller) WalkSandboxes(walker SandboxWalker) { 1218 for _, sb := range c.Sandboxes() { 1219 if walker(sb) { 1220 return 1221 } 1222 } 1223} 1224 1225func (c *controller) SandboxByID(id string) (Sandbox, error) { 1226 if id == "" { 1227 return nil, ErrInvalidID(id) 1228 } 1229 c.Lock() 1230 s, ok := c.sandboxes[id] 1231 c.Unlock() 1232 if !ok { 1233 return nil, types.NotFoundErrorf("sandbox %s not found", id) 1234 } 1235 return s, nil 1236} 1237 1238// SandboxDestroy destroys a sandbox given a container ID 1239func (c *controller) SandboxDestroy(id string) error { 1240 var sb *sandbox 1241 c.Lock() 1242 for _, s := range c.sandboxes { 1243 if s.containerID == id { 1244 sb = s 1245 break 1246 } 1247 } 1248 c.Unlock() 1249 1250 // It is not an error if sandbox is not available 1251 if sb == nil { 1252 return nil 1253 } 1254 1255 return sb.Delete() 1256} 1257 1258// SandboxContainerWalker returns a Sandbox Walker function which looks for an existing Sandbox with the passed containerID 1259func SandboxContainerWalker(out *Sandbox, containerID string) SandboxWalker { 1260 return func(sb Sandbox) bool { 1261 if sb.ContainerID() == containerID { 1262 *out = sb 1263 return true 1264 } 1265 return false 1266 } 1267} 1268 1269// SandboxKeyWalker returns a Sandbox Walker function which looks for an existing Sandbox with the passed key 1270func SandboxKeyWalker(out *Sandbox, key string) SandboxWalker { 1271 return func(sb Sandbox) bool { 1272 if sb.Key() == key { 1273 *out = sb 1274 return true 1275 } 1276 return false 1277 } 1278} 1279 1280func (c *controller) loadDriver(networkType string) error { 1281 var err error 1282 1283 if pg := c.GetPluginGetter(); pg != nil { 1284 _, err = pg.Get(networkType, driverapi.NetworkPluginEndpointType, plugingetter.Lookup) 1285 } else { 1286 _, err = plugins.Get(networkType, driverapi.NetworkPluginEndpointType) 1287 } 1288 1289 if err != nil { 1290 if errors.Cause(err) == plugins.ErrNotFound { 1291 return types.NotFoundErrorf(err.Error()) 1292 } 1293 return err 1294 } 1295 1296 return nil 1297} 1298 1299func (c *controller) loadIPAMDriver(name string) error { 1300 var err error 1301 1302 if pg := c.GetPluginGetter(); pg != nil { 1303 _, err = pg.Get(name, ipamapi.PluginEndpointType, plugingetter.Lookup) 1304 } else { 1305 _, err = plugins.Get(name, ipamapi.PluginEndpointType) 1306 } 1307 1308 if err != nil { 1309 if errors.Cause(err) == plugins.ErrNotFound { 1310 return types.NotFoundErrorf(err.Error()) 1311 } 1312 return err 1313 } 1314 1315 return nil 1316} 1317 1318func (c *controller) getIPAMDriver(name string) (ipamapi.Ipam, *ipamapi.Capability, error) { 1319 id, cap := c.drvRegistry.IPAM(name) 1320 if id == nil { 1321 // Might be a plugin name. Try loading it 1322 if err := c.loadIPAMDriver(name); err != nil { 1323 return nil, nil, err 1324 } 1325 1326 // Now that we resolved the plugin, try again looking up the registry 1327 id, cap = c.drvRegistry.IPAM(name) 1328 if id == nil { 1329 return nil, nil, types.BadRequestErrorf("invalid ipam driver: %q", name) 1330 } 1331 } 1332 1333 return id, cap, nil 1334} 1335 1336func (c *controller) Stop() { 1337 c.closeStores() 1338 c.stopExternalKeyListener() 1339 osl.GC() 1340} 1341 1342// StartDiagnostic start the network dias mode 1343func (c *controller) StartDiagnostic(port int) { 1344 c.Lock() 1345 if !c.DiagnosticServer.IsDiagnosticEnabled() { 1346 c.DiagnosticServer.EnableDiagnostic("127.0.0.1", port) 1347 } 1348 c.Unlock() 1349} 1350 1351// StopDiagnostic start the network dias mode 1352func (c *controller) StopDiagnostic() { 1353 c.Lock() 1354 if c.DiagnosticServer.IsDiagnosticEnabled() { 1355 c.DiagnosticServer.DisableDiagnostic() 1356 } 1357 c.Unlock() 1358} 1359 1360// IsDiagnosticEnabled returns true if the dias is enabled 1361func (c *controller) IsDiagnosticEnabled() bool { 1362 c.Lock() 1363 defer c.Unlock() 1364 return c.DiagnosticServer.IsDiagnosticEnabled() 1365} 1366 1367func (c *controller) iptablesEnabled() bool { 1368 c.Lock() 1369 defer c.Unlock() 1370 1371 if c.cfg == nil { 1372 return false 1373 } 1374 // parse map cfg["bridge"]["generic"]["EnableIPTable"] 1375 cfgBridge, ok := c.cfg.Daemon.DriverCfg["bridge"].(map[string]interface{}) 1376 if !ok { 1377 return false 1378 } 1379 cfgGeneric, ok := cfgBridge[netlabel.GenericData].(options.Generic) 1380 if !ok { 1381 return false 1382 } 1383 enabled, ok := cfgGeneric["EnableIPTables"].(bool) 1384 if !ok { 1385 // unless user explicitly stated, assume iptable is enabled 1386 enabled = true 1387 } 1388 return enabled 1389} 1390