1package command 2 3import ( 4 "context" 5 "crypto/sha256" 6 "encoding/base64" 7 "encoding/hex" 8 "fmt" 9 "io" 10 "io/ioutil" 11 "net" 12 "net/http" 13 "net/url" 14 "os" 15 "path/filepath" 16 "runtime" 17 "sort" 18 "strconv" 19 "strings" 20 "sync" 21 "time" 22 23 "github.com/hashicorp/vault/helper/metricsutil" 24 25 metrics "github.com/armon/go-metrics" 26 "github.com/armon/go-metrics/circonus" 27 "github.com/armon/go-metrics/datadog" 28 "github.com/armon/go-metrics/prometheus" 29 "github.com/hashicorp/errwrap" 30 log "github.com/hashicorp/go-hclog" 31 multierror "github.com/hashicorp/go-multierror" 32 sockaddr "github.com/hashicorp/go-sockaddr" 33 "github.com/hashicorp/vault/audit" 34 "github.com/hashicorp/vault/command/server" 35 serverseal "github.com/hashicorp/vault/command/server/seal" 36 "github.com/hashicorp/vault/helper/builtinplugins" 37 gatedwriter "github.com/hashicorp/vault/helper/gated-writer" 38 "github.com/hashicorp/vault/helper/namespace" 39 "github.com/hashicorp/vault/helper/reload" 40 vaulthttp "github.com/hashicorp/vault/http" 41 "github.com/hashicorp/vault/sdk/helper/jsonutil" 42 "github.com/hashicorp/vault/sdk/helper/logging" 43 "github.com/hashicorp/vault/sdk/helper/mlock" 44 "github.com/hashicorp/vault/sdk/helper/parseutil" 45 "github.com/hashicorp/vault/sdk/logical" 46 "github.com/hashicorp/vault/sdk/physical" 47 "github.com/hashicorp/vault/sdk/version" 48 "github.com/hashicorp/vault/vault" 49 vaultseal "github.com/hashicorp/vault/vault/seal" 50 shamirseal "github.com/hashicorp/vault/vault/seal/shamir" 51 "github.com/mitchellh/cli" 52 testing "github.com/mitchellh/go-testing-interface" 53 "github.com/posener/complete" 54 "google.golang.org/grpc/grpclog" 55) 56 57var _ cli.Command = (*ServerCommand)(nil) 58var _ cli.CommandAutocomplete = (*ServerCommand)(nil) 59 60var memProfilerEnabled = false 61 62const storageMigrationLock = "core/migration" 63 64type ServerCommand struct { 65 *BaseCommand 66 67 AuditBackends map[string]audit.Factory 68 CredentialBackends map[string]logical.Factory 69 LogicalBackends map[string]logical.Factory 70 PhysicalBackends map[string]physical.Factory 71 72 ShutdownCh chan struct{} 73 SighupCh chan struct{} 74 SigUSR2Ch chan struct{} 75 76 WaitGroup *sync.WaitGroup 77 78 logWriter io.Writer 79 logGate *gatedwriter.Writer 80 logger log.Logger 81 82 cleanupGuard sync.Once 83 84 reloadFuncsLock *sync.RWMutex 85 reloadFuncs *map[string][]reload.ReloadFunc 86 startedCh chan (struct{}) // for tests 87 reloadedCh chan (struct{}) // for tests 88 89 // new stuff 90 flagConfigs []string 91 flagLogLevel string 92 flagLogFormat string 93 flagDev bool 94 flagDevRootTokenID string 95 flagDevListenAddr string 96 flagDevNoStoreToken bool 97 flagDevPluginDir string 98 flagDevPluginInit bool 99 flagDevHA bool 100 flagDevLatency int 101 flagDevLatencyJitter int 102 flagDevLeasedKV bool 103 flagDevKVV1 bool 104 flagDevSkipInit bool 105 flagDevThreeNode bool 106 flagDevFourCluster bool 107 flagDevTransactional bool 108 flagDevAutoSeal bool 109 flagTestVerifyOnly bool 110 flagCombineLogs bool 111 flagTestServerConfig bool 112 flagDevConsul bool 113} 114 115type ServerListener struct { 116 net.Listener 117 config map[string]interface{} 118 maxRequestSize int64 119 maxRequestDuration time.Duration 120} 121 122func (c *ServerCommand) Synopsis() string { 123 return "Start a Vault server" 124} 125 126func (c *ServerCommand) Help() string { 127 helpText := ` 128Usage: vault server [options] 129 130 This command starts a Vault server that responds to API requests. By default, 131 Vault will start in a "sealed" state. The Vault cluster must be initialized 132 before use, usually by the "vault operator init" command. Each Vault server must 133 also be unsealed using the "vault operator unseal" command or the API before the 134 server can respond to requests. 135 136 Start a server with a configuration file: 137 138 $ vault server -config=/etc/vault/config.hcl 139 140 Run in "dev" mode: 141 142 $ vault server -dev -dev-root-token-id="root" 143 144 For a full list of examples, please see the documentation. 145 146` + c.Flags().Help() 147 return strings.TrimSpace(helpText) 148} 149 150func (c *ServerCommand) Flags() *FlagSets { 151 set := c.flagSet(FlagSetHTTP) 152 153 f := set.NewFlagSet("Command Options") 154 155 f.StringSliceVar(&StringSliceVar{ 156 Name: "config", 157 Target: &c.flagConfigs, 158 Completion: complete.PredictOr( 159 complete.PredictFiles("*.hcl"), 160 complete.PredictFiles("*.json"), 161 complete.PredictDirs("*"), 162 ), 163 Usage: "Path to a configuration file or directory of configuration " + 164 "files. This flag can be specified multiple times to load multiple " + 165 "configurations. If the path is a directory, all files which end in " + 166 ".hcl or .json are loaded.", 167 }) 168 169 f.StringVar(&StringVar{ 170 Name: "log-level", 171 Target: &c.flagLogLevel, 172 Default: notSetValue, 173 EnvVar: "VAULT_LOG_LEVEL", 174 Completion: complete.PredictSet("trace", "debug", "info", "warn", "err"), 175 Usage: "Log verbosity level. Supported values (in order of detail) are " + 176 "\"trace\", \"debug\", \"info\", \"warn\", and \"err\".", 177 }) 178 179 f.StringVar(&StringVar{ 180 Name: "log-format", 181 Target: &c.flagLogFormat, 182 Default: notSetValue, 183 // EnvVar can't be just "VAULT_LOG_FORMAT", because more than one env var name is supported 184 // for backwards compatibility reasons. 185 // See github.com/hashicorp/vault/sdk/helper/logging.ParseEnvLogFormat() 186 Completion: complete.PredictSet("standard", "json"), 187 Usage: `Log format. Supported values are "standard" and "json".`, 188 }) 189 190 f = set.NewFlagSet("Dev Options") 191 192 f.BoolVar(&BoolVar{ 193 Name: "dev", 194 Target: &c.flagDev, 195 Usage: "Enable development mode. In this mode, Vault runs in-memory and " + 196 "starts unsealed. As the name implies, do not run \"dev\" mode in " + 197 "production.", 198 }) 199 200 f.StringVar(&StringVar{ 201 Name: "dev-root-token-id", 202 Target: &c.flagDevRootTokenID, 203 Default: "", 204 EnvVar: "VAULT_DEV_ROOT_TOKEN_ID", 205 Usage: "Initial root token. This only applies when running in \"dev\" " + 206 "mode.", 207 }) 208 209 f.StringVar(&StringVar{ 210 Name: "dev-listen-address", 211 Target: &c.flagDevListenAddr, 212 Default: "127.0.0.1:8200", 213 EnvVar: "VAULT_DEV_LISTEN_ADDRESS", 214 Usage: "Address to bind to in \"dev\" mode.", 215 }) 216 f.BoolVar(&BoolVar{ 217 Name: "dev-no-store-token", 218 Target: &c.flagDevNoStoreToken, 219 Default: false, 220 Usage: "Do not persist the dev root token to the token helper " + 221 "(usually the local filesystem) for use in future requests. " + 222 "The token will only be displayed in the command output.", 223 }) 224 225 // Internal-only flags to follow. 226 // 227 // Why hello there little source code reader! Welcome to the Vault source 228 // code. The remaining options are intentionally undocumented and come with 229 // no warranty or backwards-compatibility promise. Do not use these flags 230 // in production. Do not build automation using these flags. Unless you are 231 // developing against Vault, you should not need any of these flags. 232 233 f.StringVar(&StringVar{ 234 Name: "dev-plugin-dir", 235 Target: &c.flagDevPluginDir, 236 Default: "", 237 Completion: complete.PredictDirs("*"), 238 Hidden: true, 239 }) 240 241 f.BoolVar(&BoolVar{ 242 Name: "dev-plugin-init", 243 Target: &c.flagDevPluginInit, 244 Default: true, 245 Hidden: true, 246 }) 247 248 f.BoolVar(&BoolVar{ 249 Name: "dev-ha", 250 Target: &c.flagDevHA, 251 Default: false, 252 Hidden: true, 253 }) 254 255 f.BoolVar(&BoolVar{ 256 Name: "dev-transactional", 257 Target: &c.flagDevTransactional, 258 Default: false, 259 Hidden: true, 260 }) 261 262 f.IntVar(&IntVar{ 263 Name: "dev-latency", 264 Target: &c.flagDevLatency, 265 Hidden: true, 266 }) 267 268 f.IntVar(&IntVar{ 269 Name: "dev-latency-jitter", 270 Target: &c.flagDevLatencyJitter, 271 Hidden: true, 272 }) 273 274 f.BoolVar(&BoolVar{ 275 Name: "dev-leased-kv", 276 Target: &c.flagDevLeasedKV, 277 Default: false, 278 Hidden: true, 279 }) 280 281 f.BoolVar(&BoolVar{ 282 Name: "dev-kv-v1", 283 Target: &c.flagDevKVV1, 284 Default: false, 285 Hidden: true, 286 }) 287 288 f.BoolVar(&BoolVar{ 289 Name: "dev-auto-seal", 290 Target: &c.flagDevAutoSeal, 291 Default: false, 292 Hidden: true, 293 }) 294 295 f.BoolVar(&BoolVar{ 296 Name: "dev-skip-init", 297 Target: &c.flagDevSkipInit, 298 Default: false, 299 Hidden: true, 300 }) 301 302 f.BoolVar(&BoolVar{ 303 Name: "dev-three-node", 304 Target: &c.flagDevThreeNode, 305 Default: false, 306 Hidden: true, 307 }) 308 309 f.BoolVar(&BoolVar{ 310 Name: "dev-four-cluster", 311 Target: &c.flagDevFourCluster, 312 Default: false, 313 Hidden: true, 314 }) 315 316 f.BoolVar(&BoolVar{ 317 Name: "dev-consul", 318 Target: &c.flagDevConsul, 319 Default: false, 320 Hidden: true, 321 }) 322 323 // TODO: should the below flags be public? 324 f.BoolVar(&BoolVar{ 325 Name: "combine-logs", 326 Target: &c.flagCombineLogs, 327 Default: false, 328 Hidden: true, 329 }) 330 331 f.BoolVar(&BoolVar{ 332 Name: "test-verify-only", 333 Target: &c.flagTestVerifyOnly, 334 Default: false, 335 Hidden: true, 336 }) 337 338 f.BoolVar(&BoolVar{ 339 Name: "test-server-config", 340 Target: &c.flagTestServerConfig, 341 Default: false, 342 Hidden: true, 343 }) 344 345 // End internal-only flags. 346 347 return set 348} 349 350func (c *ServerCommand) AutocompleteArgs() complete.Predictor { 351 return complete.PredictNothing 352} 353 354func (c *ServerCommand) AutocompleteFlags() complete.Flags { 355 return c.Flags().Completions() 356} 357 358func (c *ServerCommand) Run(args []string) int { 359 f := c.Flags() 360 361 if err := f.Parse(args); err != nil { 362 c.UI.Error(err.Error()) 363 return 1 364 } 365 366 // Automatically enable dev mode if other dev flags are provided. 367 if c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster || c.flagDevAutoSeal || c.flagDevKVV1 { 368 c.flagDev = true 369 } 370 371 // Validation 372 if !c.flagDev { 373 switch { 374 case len(c.flagConfigs) == 0: 375 c.UI.Error("Must specify at least one config path using -config") 376 return 1 377 case c.flagDevRootTokenID != "": 378 c.UI.Warn(wrapAtLength( 379 "You cannot specify a custom root token ID outside of \"dev\" mode. " + 380 "Your request has been ignored.")) 381 c.flagDevRootTokenID = "" 382 } 383 } 384 385 // Load the configuration 386 var config *server.Config 387 if c.flagDev { 388 var devStorageType string 389 switch { 390 case c.flagDevConsul: 391 devStorageType = "consul" 392 case c.flagDevHA && c.flagDevTransactional: 393 devStorageType = "inmem_transactional_ha" 394 case !c.flagDevHA && c.flagDevTransactional: 395 devStorageType = "inmem_transactional" 396 case c.flagDevHA && !c.flagDevTransactional: 397 devStorageType = "inmem_ha" 398 default: 399 devStorageType = "inmem" 400 } 401 config = server.DevConfig(devStorageType) 402 if c.flagDevListenAddr != "" { 403 config.Listeners[0].Config["address"] = c.flagDevListenAddr 404 } 405 } 406 for _, path := range c.flagConfigs { 407 current, err := server.LoadConfig(path) 408 if err != nil { 409 c.UI.Error(fmt.Sprintf("Error loading configuration from %s: %s", path, err)) 410 return 1 411 } 412 413 if config == nil { 414 config = current 415 } else { 416 config = config.Merge(current) 417 } 418 } 419 420 // Ensure at least one config was found. 421 if config == nil { 422 c.UI.Output(wrapAtLength( 423 "No configuration files found. Please provide configurations with the " + 424 "-config flag. If you are supplying the path to a directory, please " + 425 "ensure the directory contains files with the .hcl or .json " + 426 "extension.")) 427 return 1 428 } 429 430 // Create a logger. We wrap it in a gated writer so that it doesn't 431 // start logging too early. 432 c.logGate = &gatedwriter.Writer{Writer: os.Stderr} 433 c.logWriter = c.logGate 434 if c.flagCombineLogs { 435 c.logWriter = os.Stdout 436 } 437 var level log.Level 438 var logLevelWasNotSet bool 439 logLevelString := c.flagLogLevel 440 c.flagLogLevel = strings.ToLower(strings.TrimSpace(c.flagLogLevel)) 441 switch c.flagLogLevel { 442 case notSetValue, "": 443 logLevelWasNotSet = true 444 logLevelString = "info" 445 level = log.Info 446 case "trace": 447 level = log.Trace 448 case "debug": 449 level = log.Debug 450 case "notice", "info": 451 level = log.Info 452 case "warn", "warning": 453 level = log.Warn 454 case "err", "error": 455 level = log.Error 456 default: 457 c.UI.Error(fmt.Sprintf("Unknown log level: %s", c.flagLogLevel)) 458 return 1 459 } 460 461 logFormat := logging.UnspecifiedFormat 462 if c.flagLogFormat != notSetValue { 463 var err error 464 logFormat, err = logging.ParseLogFormat(c.flagLogFormat) 465 if err != nil { 466 c.UI.Error(err.Error()) 467 return 1 468 } 469 } 470 if logFormat == logging.UnspecifiedFormat { 471 logFormat = logging.ParseEnvLogFormat() 472 } 473 if logFormat == logging.UnspecifiedFormat { 474 var err error 475 logFormat, err = logging.ParseLogFormat(config.LogFormat) 476 if err != nil { 477 c.UI.Error(err.Error()) 478 return 1 479 } 480 } 481 482 if c.flagDevThreeNode || c.flagDevFourCluster { 483 c.logger = log.New(&log.LoggerOptions{ 484 Mutex: &sync.Mutex{}, 485 Output: c.logWriter, 486 Level: log.Trace, 487 }) 488 } else { 489 c.logger = log.New(&log.LoggerOptions{ 490 Output: c.logWriter, 491 Level: level, 492 // Note that if logFormat is either unspecified or standard, then 493 // the resulting logger's format will be standard. 494 JSONFormat: logFormat == logging.JSONFormat, 495 }) 496 } 497 498 allLoggers := []log.Logger{c.logger} 499 500 // adjust log level based on config setting 501 if config.LogLevel != "" && logLevelWasNotSet { 502 configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel)) 503 logLevelString = configLogLevel 504 switch configLogLevel { 505 case "trace": 506 c.logger.SetLevel(log.Trace) 507 case "debug": 508 c.logger.SetLevel(log.Debug) 509 case "notice", "info", "": 510 c.logger.SetLevel(log.Info) 511 case "warn", "warning": 512 c.logger.SetLevel(log.Warn) 513 case "err", "error": 514 c.logger.SetLevel(log.Error) 515 default: 516 c.UI.Error(fmt.Sprintf("Unknown log level: %s", config.LogLevel)) 517 return 1 518 } 519 } 520 521 // create GRPC logger 522 namedGRPCLogFaker := c.logger.Named("grpclogfaker") 523 allLoggers = append(allLoggers, namedGRPCLogFaker) 524 grpclog.SetLogger(&grpclogFaker{ 525 logger: namedGRPCLogFaker, 526 log: os.Getenv("VAULT_GRPC_LOGGING") != "", 527 }) 528 529 if memProfilerEnabled { 530 c.startMemProfiler() 531 } 532 533 // Ensure that a backend is provided 534 if config.Storage == nil { 535 c.UI.Output("A storage backend must be specified") 536 return 1 537 } 538 539 if config.DefaultMaxRequestDuration != 0 { 540 vault.DefaultMaxRequestDuration = config.DefaultMaxRequestDuration 541 } 542 543 // If mlockall(2) isn't supported, show a warning. We disable this in dev 544 // because it is quite scary to see when first using Vault. We also disable 545 // this if the user has explicitly disabled mlock in configuration. 546 if !c.flagDev && !config.DisableMlock && !mlock.Supported() { 547 c.UI.Warn(wrapAtLength( 548 "WARNING! mlock is not supported on this system! An mlockall(2)-like " + 549 "syscall to prevent memory from being swapped to disk is not " + 550 "supported on this system. For better security, only run Vault on " + 551 "systems where this call is supported. If you are running Vault " + 552 "in a Docker container, provide the IPC_LOCK cap to the container.")) 553 } 554 555 metricsHelper, err := c.setupTelemetry(config) 556 if err != nil { 557 c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) 558 return 1 559 } 560 561 // Initialize the backend 562 factory, exists := c.PhysicalBackends[config.Storage.Type] 563 if !exists { 564 c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type)) 565 return 1 566 } 567 if config.Storage.Type == "raft" && len(config.ClusterAddr) == 0 { 568 c.UI.Error("Cluster address must be set when using raft storage") 569 return 1 570 } 571 namedStorageLogger := c.logger.Named("storage." + config.Storage.Type) 572 allLoggers = append(allLoggers, namedStorageLogger) 573 backend, err := factory(config.Storage.Config, namedStorageLogger) 574 if err != nil { 575 c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", config.Storage.Type, err)) 576 return 1 577 } 578 579 // Prevent server startup if migration is active 580 if c.storageMigrationActive(backend) { 581 return 1 582 } 583 584 infoKeys := make([]string, 0, 10) 585 info := make(map[string]string) 586 info["log level"] = logLevelString 587 infoKeys = append(infoKeys, "log level") 588 589 var barrierSeal vault.Seal 590 var unwrapSeal vault.Seal 591 592 var sealConfigError error 593 if c.flagDevAutoSeal { 594 barrierSeal = vault.NewAutoSeal(vaultseal.NewTestSeal(nil)) 595 } else { 596 // Handle the case where no seal is provided 597 switch len(config.Seals) { 598 case 0: 599 config.Seals = append(config.Seals, &server.Seal{Type: vaultseal.Shamir}) 600 case 1: 601 // If there's only one seal and it's disabled assume they want to 602 // migrate to a shamir seal and simply didn't provide it 603 if config.Seals[0].Disabled { 604 config.Seals = append(config.Seals, &server.Seal{Type: vaultseal.Shamir}) 605 } 606 } 607 for _, configSeal := range config.Seals { 608 sealType := vaultseal.Shamir 609 if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { 610 sealType = os.Getenv("VAULT_SEAL_TYPE") 611 configSeal.Type = sealType 612 } else { 613 sealType = configSeal.Type 614 } 615 616 var seal vault.Seal 617 sealLogger := c.logger.Named(sealType) 618 allLoggers = append(allLoggers, sealLogger) 619 seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("shamir")))) 620 if sealConfigError != nil { 621 if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { 622 c.UI.Error(fmt.Sprintf( 623 "Error parsing Seal configuration: %s", sealConfigError)) 624 return 1 625 } 626 } 627 if seal == nil { 628 c.UI.Error(fmt.Sprintf( 629 "After configuring seal nil returned, seal type was %s", sealType)) 630 return 1 631 } 632 633 if configSeal.Disabled { 634 unwrapSeal = seal 635 } else { 636 barrierSeal = seal 637 } 638 639 // Ensure that the seal finalizer is called, even if using verify-only 640 defer func() { 641 err = seal.Finalize(context.Background()) 642 if err != nil { 643 c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) 644 } 645 }() 646 647 } 648 } 649 650 if barrierSeal == nil { 651 c.UI.Error(fmt.Sprintf("Could not create barrier seal! Most likely proper Seal configuration information was not set, but no error was generated.")) 652 return 1 653 } 654 655 coreConfig := &vault.CoreConfig{ 656 Physical: backend, 657 RedirectAddr: config.Storage.RedirectAddr, 658 HAPhysical: nil, 659 Seal: barrierSeal, 660 AuditBackends: c.AuditBackends, 661 CredentialBackends: c.CredentialBackends, 662 LogicalBackends: c.LogicalBackends, 663 Logger: c.logger, 664 DisableCache: config.DisableCache, 665 DisableMlock: config.DisableMlock, 666 MaxLeaseTTL: config.MaxLeaseTTL, 667 DefaultLeaseTTL: config.DefaultLeaseTTL, 668 ClusterName: config.ClusterName, 669 CacheSize: config.CacheSize, 670 PluginDirectory: config.PluginDirectory, 671 EnableUI: config.EnableUI, 672 EnableRaw: config.EnableRawEndpoint, 673 DisableSealWrap: config.DisableSealWrap, 674 DisablePerformanceStandby: config.DisablePerformanceStandby, 675 DisableIndexing: config.DisableIndexing, 676 AllLoggers: allLoggers, 677 BuiltinRegistry: builtinplugins.Registry, 678 DisableKeyEncodingChecks: config.DisablePrintableCheck, 679 MetricsHelper: metricsHelper, 680 } 681 if c.flagDev { 682 coreConfig.DevToken = c.flagDevRootTokenID 683 if c.flagDevLeasedKV { 684 coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory 685 } 686 if c.flagDevPluginDir != "" { 687 coreConfig.PluginDirectory = c.flagDevPluginDir 688 } 689 if c.flagDevLatency > 0 { 690 injectLatency := time.Duration(c.flagDevLatency) * time.Millisecond 691 if _, txnOK := backend.(physical.Transactional); txnOK { 692 coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger) 693 } else { 694 coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger) 695 } 696 } 697 } 698 699 if c.flagDevThreeNode { 700 return c.enableThreeNodeDevCluster(coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) 701 } 702 703 if c.flagDevFourCluster { 704 return c.enableFourClusterDev(coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) 705 } 706 707 var disableClustering bool 708 709 // Initialize the separate HA storage backend, if it exists 710 var ok bool 711 if config.HAStorage != nil { 712 factory, exists := c.PhysicalBackends[config.HAStorage.Type] 713 if !exists { 714 c.UI.Error(fmt.Sprintf("Unknown HA storage type %s", config.HAStorage.Type)) 715 return 1 716 717 } 718 habackend, err := factory(config.HAStorage.Config, c.logger) 719 if err != nil { 720 c.UI.Error(fmt.Sprintf( 721 "Error initializing HA storage of type %s: %s", config.HAStorage.Type, err)) 722 return 1 723 724 } 725 726 if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok { 727 c.UI.Error("Specified HA storage does not support HA") 728 return 1 729 } 730 731 if !coreConfig.HAPhysical.HAEnabled() { 732 c.UI.Error("Specified HA storage has HA support disabled; please consult documentation") 733 return 1 734 } 735 736 coreConfig.RedirectAddr = config.HAStorage.RedirectAddr 737 disableClustering = config.HAStorage.DisableClustering 738 if !disableClustering { 739 coreConfig.ClusterAddr = config.HAStorage.ClusterAddr 740 } 741 } else { 742 if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok { 743 coreConfig.RedirectAddr = config.Storage.RedirectAddr 744 disableClustering = config.Storage.DisableClustering 745 if !disableClustering { 746 coreConfig.ClusterAddr = config.Storage.ClusterAddr 747 } 748 } 749 } 750 751 if envRA := os.Getenv("VAULT_API_ADDR"); envRA != "" { 752 coreConfig.RedirectAddr = envRA 753 } else if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" { 754 coreConfig.RedirectAddr = envRA 755 } else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" { 756 coreConfig.RedirectAddr = envAA 757 } 758 759 // Attempt to detect the redirect address, if possible 760 if coreConfig.RedirectAddr == "" { 761 c.logger.Warn("no `api_addr` value specified in config or in VAULT_API_ADDR; falling back to detection if possible, but this value should be manually set") 762 } 763 var detect physical.RedirectDetect 764 if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() { 765 detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect) 766 } else { 767 detect, ok = coreConfig.Physical.(physical.RedirectDetect) 768 } 769 if ok && coreConfig.RedirectAddr == "" { 770 redirect, err := c.detectRedirect(detect, config) 771 if err != nil { 772 c.UI.Error(fmt.Sprintf("Error detecting api address: %s", err)) 773 } else if redirect == "" { 774 c.UI.Error("Failed to detect api address") 775 } else { 776 coreConfig.RedirectAddr = redirect 777 } 778 } 779 if coreConfig.RedirectAddr == "" && c.flagDev { 780 coreConfig.RedirectAddr = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"]) 781 } 782 783 // After the redirect bits are sorted out, if no cluster address was 784 // explicitly given, derive one from the redirect addr 785 if disableClustering { 786 coreConfig.ClusterAddr = "" 787 } else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" { 788 coreConfig.ClusterAddr = envCA 789 } else { 790 var addrToUse string 791 switch { 792 case coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "": 793 addrToUse = coreConfig.RedirectAddr 794 case c.flagDev: 795 addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"]) 796 default: 797 goto CLUSTER_SYNTHESIS_COMPLETE 798 } 799 u, err := url.ParseRequestURI(addrToUse) 800 if err != nil { 801 c.UI.Error(fmt.Sprintf( 802 "Error parsing synthesized cluster address %s: %v", addrToUse, err)) 803 return 1 804 } 805 host, port, err := net.SplitHostPort(u.Host) 806 if err != nil { 807 // This sucks, as it's a const in the function but not exported in the package 808 if strings.Contains(err.Error(), "missing port in address") { 809 host = u.Host 810 port = "443" 811 } else { 812 c.UI.Error(fmt.Sprintf("Error parsing api address: %v", err)) 813 return 1 814 } 815 } 816 nPort, err := strconv.Atoi(port) 817 if err != nil { 818 c.UI.Error(fmt.Sprintf( 819 "Error parsing synthesized address; failed to convert %q to a numeric: %v", port, err)) 820 return 1 821 } 822 u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1)) 823 // Will always be TLS-secured 824 u.Scheme = "https" 825 coreConfig.ClusterAddr = u.String() 826 } 827 828CLUSTER_SYNTHESIS_COMPLETE: 829 830 if coreConfig.RedirectAddr == coreConfig.ClusterAddr && len(coreConfig.RedirectAddr) != 0 { 831 c.UI.Error(fmt.Sprintf( 832 "Address %q used for both API and cluster addresses", coreConfig.RedirectAddr)) 833 return 1 834 } 835 836 if coreConfig.ClusterAddr != "" { 837 // Force https as we'll always be TLS-secured 838 u, err := url.ParseRequestURI(coreConfig.ClusterAddr) 839 if err != nil { 840 c.UI.Error(fmt.Sprintf("Error parsing cluster address %s: %v", coreConfig.ClusterAddr, err)) 841 return 11 842 } 843 u.Scheme = "https" 844 coreConfig.ClusterAddr = u.String() 845 } 846 847 // Override the UI enabling config by the environment variable 848 if enableUI := os.Getenv("VAULT_UI"); enableUI != "" { 849 var err error 850 coreConfig.EnableUI, err = strconv.ParseBool(enableUI) 851 if err != nil { 852 c.UI.Output("Error parsing the environment variable VAULT_UI") 853 return 1 854 } 855 } 856 857 // Initialize the core 858 core, newCoreError := vault.NewCore(coreConfig) 859 if newCoreError != nil { 860 if vault.IsFatalError(newCoreError) { 861 c.UI.Error(fmt.Sprintf("Error initializing core: %s", newCoreError)) 862 return 1 863 } 864 } 865 866 // Copy the reload funcs pointers back 867 c.reloadFuncs = coreConfig.ReloadFuncs 868 c.reloadFuncsLock = coreConfig.ReloadFuncsLock 869 870 // Compile server information for output later 871 info["storage"] = config.Storage.Type 872 info["mlock"] = fmt.Sprintf( 873 "supported: %v, enabled: %v", 874 mlock.Supported(), !config.DisableMlock && mlock.Supported()) 875 infoKeys = append(infoKeys, "mlock", "storage") 876 877 if coreConfig.ClusterAddr != "" { 878 info["cluster address"] = coreConfig.ClusterAddr 879 infoKeys = append(infoKeys, "cluster address") 880 } 881 if coreConfig.RedirectAddr != "" { 882 info["api address"] = coreConfig.RedirectAddr 883 infoKeys = append(infoKeys, "api address") 884 } 885 886 if config.HAStorage != nil { 887 info["HA storage"] = config.HAStorage.Type 888 infoKeys = append(infoKeys, "HA storage") 889 } else { 890 // If the storage supports HA, then note it 891 if coreConfig.HAPhysical != nil { 892 if coreConfig.HAPhysical.HAEnabled() { 893 info["storage"] += " (HA available)" 894 } else { 895 info["storage"] += " (HA disabled)" 896 } 897 } 898 } 899 900 clusterAddrs := []*net.TCPAddr{} 901 902 // Initialize the listeners 903 lns := make([]ServerListener, 0, len(config.Listeners)) 904 c.reloadFuncsLock.Lock() 905 for i, lnConfig := range config.Listeners { 906 ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, c.logWriter, c.UI) 907 if err != nil { 908 c.UI.Error(fmt.Sprintf("Error initializing listener of type %s: %s", lnConfig.Type, err)) 909 return 1 910 } 911 912 if reloadFunc != nil { 913 relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type] 914 relSlice = append(relSlice, reloadFunc) 915 (*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice 916 } 917 918 if !disableClustering && lnConfig.Type == "tcp" { 919 var addrRaw interface{} 920 var addr string 921 var ok bool 922 if addrRaw, ok = lnConfig.Config["cluster_address"]; ok { 923 addr = addrRaw.(string) 924 tcpAddr, err := net.ResolveTCPAddr("tcp", addr) 925 if err != nil { 926 c.UI.Error(fmt.Sprintf("Error resolving cluster_address: %s", err)) 927 return 1 928 } 929 clusterAddrs = append(clusterAddrs, tcpAddr) 930 } else { 931 tcpAddr, ok := ln.Addr().(*net.TCPAddr) 932 if !ok { 933 c.UI.Error("Failed to parse tcp listener") 934 return 1 935 } 936 clusterAddr := &net.TCPAddr{ 937 IP: tcpAddr.IP, 938 Port: tcpAddr.Port + 1, 939 } 940 clusterAddrs = append(clusterAddrs, clusterAddr) 941 addr = clusterAddr.String() 942 } 943 props["cluster address"] = addr 944 } 945 946 var maxRequestSize int64 = vaulthttp.DefaultMaxRequestSize 947 if valRaw, ok := lnConfig.Config["max_request_size"]; ok { 948 val, err := parseutil.ParseInt(valRaw) 949 if err != nil { 950 c.UI.Error(fmt.Sprintf("Could not parse max_request_size value %v", valRaw)) 951 return 1 952 } 953 954 if val >= 0 { 955 maxRequestSize = val 956 } 957 } 958 props["max_request_size"] = fmt.Sprintf("%d", maxRequestSize) 959 960 var maxRequestDuration time.Duration = vault.DefaultMaxRequestDuration 961 if valRaw, ok := lnConfig.Config["max_request_duration"]; ok { 962 val, err := parseutil.ParseDurationSecond(valRaw) 963 if err != nil { 964 c.UI.Error(fmt.Sprintf("Could not parse max_request_duration value %v", valRaw)) 965 return 1 966 } 967 968 if val >= 0 { 969 maxRequestDuration = val 970 } 971 } 972 props["max_request_duration"] = fmt.Sprintf("%s", maxRequestDuration.String()) 973 974 lns = append(lns, ServerListener{ 975 Listener: ln, 976 config: lnConfig.Config, 977 maxRequestSize: maxRequestSize, 978 maxRequestDuration: maxRequestDuration, 979 }) 980 981 // Store the listener props for output later 982 key := fmt.Sprintf("listener %d", i+1) 983 propsList := make([]string, 0, len(props)) 984 for k, v := range props { 985 propsList = append(propsList, fmt.Sprintf( 986 "%s: %q", k, v)) 987 } 988 sort.Strings(propsList) 989 infoKeys = append(infoKeys, key) 990 info[key] = fmt.Sprintf( 991 "%s (%s)", lnConfig.Type, strings.Join(propsList, ", ")) 992 993 } 994 c.reloadFuncsLock.Unlock() 995 if !disableClustering { 996 if c.logger.IsDebug() { 997 c.logger.Debug("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs) 998 } 999 } 1000 1001 // Make sure we close all listeners from this point on 1002 listenerCloseFunc := func() { 1003 for _, ln := range lns { 1004 ln.Listener.Close() 1005 } 1006 } 1007 1008 defer c.cleanupGuard.Do(listenerCloseFunc) 1009 1010 infoKeys = append(infoKeys, "version") 1011 verInfo := version.GetVersion() 1012 info["version"] = verInfo.FullVersionNumber(false) 1013 if verInfo.Revision != "" { 1014 info["version sha"] = strings.Trim(verInfo.Revision, "'") 1015 infoKeys = append(infoKeys, "version sha") 1016 } 1017 infoKeys = append(infoKeys, "cgo") 1018 info["cgo"] = "disabled" 1019 if version.CgoEnabled { 1020 info["cgo"] = "enabled" 1021 } 1022 1023 // Server configuration output 1024 padding := 24 1025 sort.Strings(infoKeys) 1026 c.UI.Output("==> Vault server configuration:\n") 1027 for _, k := range infoKeys { 1028 c.UI.Output(fmt.Sprintf( 1029 "%s%s: %s", 1030 strings.Repeat(" ", padding-len(k)), 1031 strings.Title(k), 1032 info[k])) 1033 } 1034 c.UI.Output("") 1035 1036 // Tests might not want to start a vault server and just want to verify 1037 // the configuration. 1038 if c.flagTestVerifyOnly { 1039 return 0 1040 } 1041 1042 // This needs to happen before we first unseal, so before we trigger dev 1043 // mode if it's set 1044 core.SetClusterListenerAddrs(clusterAddrs) 1045 core.SetClusterHandler(vaulthttp.Handler(&vault.HandlerProperties{ 1046 Core: core, 1047 })) 1048 1049 // Before unsealing with stored keys, setup seal migration if needed 1050 if err := adjustCoreForSealMigration(c.logger, core, barrierSeal, unwrapSeal); err != nil { 1051 c.UI.Error(err.Error()) 1052 return 1 1053 } 1054 1055 // Attempt unsealing in a background goroutine. This is needed for when a 1056 // Vault cluster with multiple servers is configured with auto-unseal but is 1057 // uninitialized. Once one server initializes the storage backend, this 1058 // goroutine will pick up the unseal keys and unseal this instance. 1059 if !core.IsInSealMigration() { 1060 go func() { 1061 for { 1062 err := core.UnsealWithStoredKeys(context.Background()) 1063 if err == nil { 1064 return 1065 } 1066 1067 if vault.IsFatalError(err) { 1068 c.logger.Error("error unsealing core", "error", err) 1069 return 1070 } else { 1071 c.logger.Warn("failed to unseal core", "error", err) 1072 } 1073 1074 select { 1075 case <-c.ShutdownCh: 1076 return 1077 case <-time.After(5 * time.Second): 1078 } 1079 } 1080 }() 1081 } 1082 1083 // Perform service discovery registrations and initialization of 1084 // HTTP server after the verifyOnly check. 1085 1086 // Instantiate the wait group 1087 c.WaitGroup = &sync.WaitGroup{} 1088 1089 // If the backend supports service discovery, run service discovery 1090 if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() { 1091 sd, ok := coreConfig.HAPhysical.(physical.ServiceDiscovery) 1092 if ok { 1093 activeFunc := func() bool { 1094 if isLeader, _, _, err := core.Leader(); err == nil { 1095 return isLeader 1096 } 1097 return false 1098 } 1099 1100 if err := sd.RunServiceDiscovery(c.WaitGroup, c.ShutdownCh, coreConfig.RedirectAddr, activeFunc, core.Sealed, core.PerfStandby); err != nil { 1101 c.UI.Error(fmt.Sprintf("Error initializing service discovery: %v", err)) 1102 return 1 1103 } 1104 } 1105 } 1106 1107 // If we're in Dev mode, then initialize the core 1108 if c.flagDev && !c.flagDevSkipInit { 1109 init, err := c.enableDev(core, coreConfig) 1110 if err != nil { 1111 c.UI.Error(fmt.Sprintf("Error initializing Dev mode: %s", err)) 1112 return 1 1113 } 1114 1115 var plugins, pluginsNotLoaded []string 1116 if c.flagDevPluginDir != "" && c.flagDevPluginInit { 1117 1118 f, err := os.Open(c.flagDevPluginDir) 1119 if err != nil { 1120 c.UI.Error(fmt.Sprintf("Error reading plugin dir: %s", err)) 1121 return 1 1122 } 1123 1124 list, err := f.Readdirnames(0) 1125 f.Close() 1126 if err != nil { 1127 c.UI.Error(fmt.Sprintf("Error listing plugins: %s", err)) 1128 return 1 1129 } 1130 1131 for _, name := range list { 1132 path := filepath.Join(f.Name(), name) 1133 if err := c.addPlugin(path, init.RootToken, core); err != nil { 1134 if !errwrap.Contains(err, vault.ErrPluginBadType.Error()) { 1135 c.UI.Error(fmt.Sprintf("Error enabling plugin %s: %s", name, err)) 1136 return 1 1137 } 1138 pluginsNotLoaded = append(pluginsNotLoaded, name) 1139 continue 1140 } 1141 plugins = append(plugins, name) 1142 } 1143 1144 sort.Strings(plugins) 1145 } 1146 1147 // Print the big dev mode warning! 1148 c.UI.Warn(wrapAtLength( 1149 "WARNING! dev mode is enabled! In this mode, Vault runs entirely " + 1150 "in-memory and starts unsealed with a single unseal key. The root " + 1151 "token is already authenticated to the CLI, so you can immediately " + 1152 "begin using Vault.")) 1153 c.UI.Warn("") 1154 c.UI.Warn("You may need to set the following environment variable:") 1155 c.UI.Warn("") 1156 1157 endpointURL := "http://" + config.Listeners[0].Config["address"].(string) 1158 if runtime.GOOS == "windows" { 1159 c.UI.Warn("PowerShell:") 1160 c.UI.Warn(fmt.Sprintf(" $env:VAULT_ADDR=\"%s\"", endpointURL)) 1161 c.UI.Warn("cmd.exe:") 1162 c.UI.Warn(fmt.Sprintf(" set VAULT_ADDR=%s", endpointURL)) 1163 } else { 1164 c.UI.Warn(fmt.Sprintf(" $ export VAULT_ADDR='%s'", endpointURL)) 1165 } 1166 1167 // Unseal key is not returned if stored shares is supported 1168 if len(init.SecretShares) > 0 { 1169 c.UI.Warn("") 1170 c.UI.Warn(wrapAtLength( 1171 "The unseal key and root token are displayed below in case you want " + 1172 "to seal/unseal the Vault or re-authenticate.")) 1173 c.UI.Warn("") 1174 c.UI.Warn(fmt.Sprintf("Unseal Key: %s", base64.StdEncoding.EncodeToString(init.SecretShares[0]))) 1175 } 1176 1177 if len(init.RecoveryShares) > 0 { 1178 c.UI.Warn("") 1179 c.UI.Warn(wrapAtLength( 1180 "The recovery key and root token are displayed below in case you want " + 1181 "to seal/unseal the Vault or re-authenticate.")) 1182 c.UI.Warn("") 1183 c.UI.Warn(fmt.Sprintf("Recovery Key: %s", base64.StdEncoding.EncodeToString(init.RecoveryShares[0]))) 1184 } 1185 1186 c.UI.Warn(fmt.Sprintf("Root Token: %s", init.RootToken)) 1187 1188 if len(plugins) > 0 { 1189 c.UI.Warn("") 1190 c.UI.Warn(wrapAtLength( 1191 "The following dev plugins are registered in the catalog:")) 1192 for _, p := range plugins { 1193 c.UI.Warn(fmt.Sprintf(" - %s", p)) 1194 } 1195 } 1196 1197 if len(pluginsNotLoaded) > 0 { 1198 c.UI.Warn("") 1199 c.UI.Warn(wrapAtLength( 1200 "The following dev plugins FAILED to be registered in the catalog due to unknown type:")) 1201 for _, p := range pluginsNotLoaded { 1202 c.UI.Warn(fmt.Sprintf(" - %s", p)) 1203 } 1204 } 1205 1206 c.UI.Warn("") 1207 c.UI.Warn(wrapAtLength( 1208 "Development mode should NOT be used in production installations!")) 1209 c.UI.Warn("") 1210 } 1211 1212 // Initialize the HTTP servers 1213 for _, ln := range lns { 1214 handler := vaulthttp.Handler(&vault.HandlerProperties{ 1215 Core: core, 1216 MaxRequestSize: ln.maxRequestSize, 1217 MaxRequestDuration: ln.maxRequestDuration, 1218 DisablePrintableCheck: config.DisablePrintableCheck, 1219 }) 1220 1221 // We perform validation on the config earlier, we can just cast here 1222 if _, ok := ln.config["x_forwarded_for_authorized_addrs"]; ok { 1223 hopSkips := ln.config["x_forwarded_for_hop_skips"].(int) 1224 authzdAddrs := ln.config["x_forwarded_for_authorized_addrs"].([]*sockaddr.SockAddrMarshaler) 1225 rejectNotPresent := ln.config["x_forwarded_for_reject_not_present"].(bool) 1226 rejectNonAuthz := ln.config["x_forwarded_for_reject_not_authorized"].(bool) 1227 if len(authzdAddrs) > 0 { 1228 handler = vaulthttp.WrapForwardedForHandler(handler, authzdAddrs, rejectNotPresent, rejectNonAuthz, hopSkips) 1229 } 1230 } 1231 1232 // server defaults 1233 server := &http.Server{ 1234 Handler: handler, 1235 ReadHeaderTimeout: 10 * time.Second, 1236 ReadTimeout: 30 * time.Second, 1237 IdleTimeout: 5 * time.Minute, 1238 ErrorLog: c.logger.StandardLogger(nil), 1239 } 1240 1241 // override server defaults with config values for read/write/idle timeouts if configured 1242 if readHeaderTimeoutInterface, ok := ln.config["http_read_header_timeout"]; ok { 1243 readHeaderTimeout, err := parseutil.ParseDurationSecond(readHeaderTimeoutInterface) 1244 if err != nil { 1245 c.UI.Error(fmt.Sprintf("Could not parse a time value for http_read_header_timeout %v", readHeaderTimeout)) 1246 return 1 1247 } 1248 server.ReadHeaderTimeout = readHeaderTimeout 1249 } 1250 1251 if readTimeoutInterface, ok := ln.config["http_read_timeout"]; ok { 1252 readTimeout, err := parseutil.ParseDurationSecond(readTimeoutInterface) 1253 if err != nil { 1254 c.UI.Error(fmt.Sprintf("Could not parse a time value for http_read_timeout %v", readTimeout)) 1255 return 1 1256 } 1257 server.ReadTimeout = readTimeout 1258 } 1259 1260 if writeTimeoutInterface, ok := ln.config["http_write_timeout"]; ok { 1261 writeTimeout, err := parseutil.ParseDurationSecond(writeTimeoutInterface) 1262 if err != nil { 1263 c.UI.Error(fmt.Sprintf("Could not parse a time value for http_write_timeout %v", writeTimeout)) 1264 return 1 1265 } 1266 server.WriteTimeout = writeTimeout 1267 } 1268 1269 if idleTimeoutInterface, ok := ln.config["http_idle_timeout"]; ok { 1270 idleTimeout, err := parseutil.ParseDurationSecond(idleTimeoutInterface) 1271 if err != nil { 1272 c.UI.Error(fmt.Sprintf("Could not parse a time value for http_idle_timeout %v", idleTimeout)) 1273 return 1 1274 } 1275 server.IdleTimeout = idleTimeout 1276 } 1277 1278 // server config tests can exit now 1279 if c.flagTestServerConfig { 1280 continue 1281 } 1282 1283 go server.Serve(ln.Listener) 1284 } 1285 1286 if c.flagTestServerConfig { 1287 return 0 1288 } 1289 1290 if sealConfigError != nil { 1291 init, err := core.Initialized(context.Background()) 1292 if err != nil { 1293 c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err)) 1294 return 1 1295 } 1296 if init { 1297 c.UI.Error("Vault is initialized but no Seal key could be loaded") 1298 return 1 1299 } 1300 } 1301 1302 if newCoreError != nil { 1303 c.UI.Warn(wrapAtLength( 1304 "WARNING! A non-fatal error occurred during initialization. Please " + 1305 "check the logs for more information.")) 1306 c.UI.Warn("") 1307 } 1308 1309 // Output the header that the server has started 1310 if !c.flagCombineLogs { 1311 c.UI.Output("==> Vault server started! Log data will stream in below:\n") 1312 } 1313 1314 // Inform any tests that the server is ready 1315 select { 1316 case c.startedCh <- struct{}{}: 1317 default: 1318 } 1319 1320 // Release the log gate. 1321 c.logGate.Flush() 1322 1323 // Write out the PID to the file now that server has successfully started 1324 if err := c.storePidFile(config.PidFile); err != nil { 1325 c.UI.Error(fmt.Sprintf("Error storing PID: %s", err)) 1326 return 1 1327 } 1328 1329 defer func() { 1330 if err := c.removePidFile(config.PidFile); err != nil { 1331 c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) 1332 } 1333 }() 1334 1335 // Wait for shutdown 1336 shutdownTriggered := false 1337 1338 for !shutdownTriggered { 1339 select { 1340 case <-c.ShutdownCh: 1341 c.UI.Output("==> Vault shutdown triggered") 1342 1343 // Stop the listeners so that we don't process further client requests. 1344 c.cleanupGuard.Do(listenerCloseFunc) 1345 1346 // Shutdown will wait until after Vault is sealed, which means the 1347 // request forwarding listeners will also be closed (and also 1348 // waited for). 1349 if err := core.Shutdown(); err != nil { 1350 c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err)) 1351 } 1352 1353 shutdownTriggered = true 1354 1355 case <-c.SighupCh: 1356 c.UI.Output("==> Vault reload triggered") 1357 1358 // Check for new log level 1359 var config *server.Config 1360 var level log.Level 1361 for _, path := range c.flagConfigs { 1362 current, err := server.LoadConfig(path) 1363 if err != nil { 1364 c.logger.Error("could not reload config", "path", path, "error", err) 1365 goto RUNRELOADFUNCS 1366 } 1367 1368 if config == nil { 1369 config = current 1370 } else { 1371 config = config.Merge(current) 1372 } 1373 } 1374 1375 // Ensure at least one config was found. 1376 if config == nil { 1377 c.logger.Error("no config found at reload time") 1378 goto RUNRELOADFUNCS 1379 } 1380 1381 if config.LogLevel != "" { 1382 configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel)) 1383 switch configLogLevel { 1384 case "trace": 1385 level = log.Trace 1386 case "debug": 1387 level = log.Debug 1388 case "notice", "info", "": 1389 level = log.Info 1390 case "warn", "warning": 1391 level = log.Warn 1392 case "err", "error": 1393 level = log.Error 1394 default: 1395 c.logger.Error("unknown log level found on reload", "level", config.LogLevel) 1396 goto RUNRELOADFUNCS 1397 } 1398 core.SetLogLevel(level) 1399 } 1400 1401 RUNRELOADFUNCS: 1402 if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs); err != nil { 1403 c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) 1404 } 1405 1406 case <-c.SigUSR2Ch: 1407 buf := make([]byte, 32*1024*1024) 1408 n := runtime.Stack(buf[:], true) 1409 c.logger.Info("goroutine trace", "stack", string(buf[:n])) 1410 } 1411 } 1412 1413 // Wait for dependent goroutines to complete 1414 c.WaitGroup.Wait() 1415 return 0 1416} 1417 1418func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig) (*vault.InitResult, error) { 1419 ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) 1420 1421 var recoveryConfig *vault.SealConfig 1422 barrierConfig := &vault.SealConfig{ 1423 SecretShares: 1, 1424 SecretThreshold: 1, 1425 } 1426 1427 if core.SealAccess().RecoveryKeySupported() { 1428 recoveryConfig = &vault.SealConfig{ 1429 SecretShares: 1, 1430 SecretThreshold: 1, 1431 } 1432 } 1433 1434 if core.SealAccess().StoredKeysSupported() { 1435 barrierConfig.StoredShares = 1 1436 } 1437 1438 // Initialize it with a basic single key 1439 init, err := core.Initialize(ctx, &vault.InitParams{ 1440 BarrierConfig: barrierConfig, 1441 RecoveryConfig: recoveryConfig, 1442 }) 1443 if err != nil { 1444 return nil, err 1445 } 1446 1447 // Handle unseal with stored keys 1448 if core.SealAccess().StoredKeysSupported() { 1449 err := core.UnsealWithStoredKeys(ctx) 1450 if err != nil { 1451 return nil, err 1452 } 1453 } else { 1454 // Copy the key so that it can be zeroed 1455 key := make([]byte, len(init.SecretShares[0])) 1456 copy(key, init.SecretShares[0]) 1457 1458 // Unseal the core 1459 unsealed, err := core.Unseal(key) 1460 if err != nil { 1461 return nil, err 1462 } 1463 if !unsealed { 1464 return nil, fmt.Errorf("failed to unseal Vault for dev mode") 1465 } 1466 } 1467 1468 isLeader, _, _, err := core.Leader() 1469 if err != nil && err != vault.ErrHANotEnabled { 1470 return nil, errwrap.Wrapf("failed to check active status: {{err}}", err) 1471 } 1472 if err == nil { 1473 leaderCount := 5 1474 for !isLeader { 1475 if leaderCount == 0 { 1476 buf := make([]byte, 1<<16) 1477 runtime.Stack(buf, true) 1478 return nil, fmt.Errorf("failed to get active status after five seconds; call stack is\n%s\n", buf) 1479 } 1480 time.Sleep(1 * time.Second) 1481 isLeader, _, _, err = core.Leader() 1482 if err != nil { 1483 return nil, errwrap.Wrapf("failed to check active status: {{err}}", err) 1484 } 1485 leaderCount-- 1486 } 1487 } 1488 1489 // Generate a dev root token if one is provided in the flag 1490 if coreConfig.DevToken != "" { 1491 req := &logical.Request{ 1492 ID: "dev-gen-root", 1493 Operation: logical.UpdateOperation, 1494 ClientToken: init.RootToken, 1495 Path: "auth/token/create", 1496 Data: map[string]interface{}{ 1497 "id": coreConfig.DevToken, 1498 "policies": []string{"root"}, 1499 "no_parent": true, 1500 "no_default_policy": true, 1501 }, 1502 } 1503 resp, err := core.HandleRequest(ctx, req) 1504 if err != nil { 1505 return nil, errwrap.Wrapf(fmt.Sprintf("failed to create root token with ID %q: {{err}}", coreConfig.DevToken), err) 1506 } 1507 if resp == nil { 1508 return nil, fmt.Errorf("nil response when creating root token with ID %q", coreConfig.DevToken) 1509 } 1510 if resp.Auth == nil { 1511 return nil, fmt.Errorf("nil auth when creating root token with ID %q", coreConfig.DevToken) 1512 } 1513 1514 init.RootToken = resp.Auth.ClientToken 1515 1516 req.ID = "dev-revoke-init-root" 1517 req.Path = "auth/token/revoke-self" 1518 req.Data = nil 1519 resp, err = core.HandleRequest(ctx, req) 1520 if err != nil { 1521 return nil, errwrap.Wrapf("failed to revoke initial root token: {{err}}", err) 1522 } 1523 } 1524 1525 // Set the token 1526 if !c.flagDevNoStoreToken { 1527 tokenHelper, err := c.TokenHelper() 1528 if err != nil { 1529 return nil, err 1530 } 1531 if err := tokenHelper.Store(init.RootToken); err != nil { 1532 return nil, err 1533 } 1534 } 1535 1536 kvVer := "2" 1537 if c.flagDevKVV1 || c.flagDevLeasedKV { 1538 kvVer = "1" 1539 } 1540 req := &logical.Request{ 1541 Operation: logical.UpdateOperation, 1542 ClientToken: init.RootToken, 1543 Path: "sys/mounts/secret", 1544 Data: map[string]interface{}{ 1545 "type": "kv", 1546 "path": "secret/", 1547 "description": "key/value secret storage", 1548 "options": map[string]string{ 1549 "version": kvVer, 1550 }, 1551 }, 1552 } 1553 resp, err := core.HandleRequest(ctx, req) 1554 if err != nil { 1555 return nil, errwrap.Wrapf("error creating default K/V store: {{err}}", err) 1556 } 1557 if resp.IsError() { 1558 return nil, errwrap.Wrapf("failed to create default K/V store: {{err}}", resp.Error()) 1559 } 1560 1561 return init, nil 1562} 1563 1564func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int { 1565 testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{ 1566 HandlerFunc: vaulthttp.Handler, 1567 BaseListenAddress: c.flagDevListenAddr, 1568 Logger: c.logger, 1569 TempDir: tempDir, 1570 }) 1571 defer c.cleanupGuard.Do(testCluster.Cleanup) 1572 1573 info["cluster parameters path"] = testCluster.TempDir 1574 infoKeys = append(infoKeys, "cluster parameters path") 1575 1576 for i, core := range testCluster.Cores { 1577 info[fmt.Sprintf("node %d api address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String()) 1578 infoKeys = append(infoKeys, fmt.Sprintf("node %d api address", i)) 1579 } 1580 1581 infoKeys = append(infoKeys, "version") 1582 verInfo := version.GetVersion() 1583 info["version"] = verInfo.FullVersionNumber(false) 1584 if verInfo.Revision != "" { 1585 info["version sha"] = strings.Trim(verInfo.Revision, "'") 1586 infoKeys = append(infoKeys, "version sha") 1587 } 1588 infoKeys = append(infoKeys, "cgo") 1589 info["cgo"] = "disabled" 1590 if version.CgoEnabled { 1591 info["cgo"] = "enabled" 1592 } 1593 1594 // Server configuration output 1595 padding := 24 1596 sort.Strings(infoKeys) 1597 c.UI.Output("==> Vault server configuration:\n") 1598 for _, k := range infoKeys { 1599 c.UI.Output(fmt.Sprintf( 1600 "%s%s: %s", 1601 strings.Repeat(" ", padding-len(k)), 1602 strings.Title(k), 1603 info[k])) 1604 } 1605 c.UI.Output("") 1606 1607 for _, core := range testCluster.Cores { 1608 core.Server.Handler = vaulthttp.Handler(&vault.HandlerProperties{ 1609 Core: core.Core, 1610 }) 1611 core.SetClusterHandler(core.Server.Handler) 1612 } 1613 1614 testCluster.Start() 1615 1616 ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) 1617 1618 if base.DevToken != "" { 1619 req := &logical.Request{ 1620 ID: "dev-gen-root", 1621 Operation: logical.UpdateOperation, 1622 ClientToken: testCluster.RootToken, 1623 Path: "auth/token/create", 1624 Data: map[string]interface{}{ 1625 "id": base.DevToken, 1626 "policies": []string{"root"}, 1627 "no_parent": true, 1628 "no_default_policy": true, 1629 }, 1630 } 1631 resp, err := testCluster.Cores[0].HandleRequest(ctx, req) 1632 if err != nil { 1633 c.UI.Error(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err)) 1634 return 1 1635 } 1636 if resp == nil { 1637 c.UI.Error(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken)) 1638 return 1 1639 } 1640 if resp.Auth == nil { 1641 c.UI.Error(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken)) 1642 return 1 1643 } 1644 1645 testCluster.RootToken = resp.Auth.ClientToken 1646 1647 req.ID = "dev-revoke-init-root" 1648 req.Path = "auth/token/revoke-self" 1649 req.Data = nil 1650 resp, err = testCluster.Cores[0].HandleRequest(ctx, req) 1651 if err != nil { 1652 c.UI.Output(fmt.Sprintf("failed to revoke initial root token: %s", err)) 1653 return 1 1654 } 1655 } 1656 1657 // Set the token 1658 tokenHelper, err := c.TokenHelper() 1659 if err != nil { 1660 c.UI.Error(fmt.Sprintf("Error getting token helper: %s", err)) 1661 return 1 1662 } 1663 if err := tokenHelper.Store(testCluster.RootToken); err != nil { 1664 c.UI.Error(fmt.Sprintf("Error storing in token helper: %s", err)) 1665 return 1 1666 } 1667 1668 if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0755); err != nil { 1669 c.UI.Error(fmt.Sprintf("Error writing token to tempfile: %s", err)) 1670 return 1 1671 } 1672 1673 c.UI.Output(fmt.Sprintf( 1674 "==> Three node dev mode is enabled\n\n" + 1675 "The unseal key and root token are reproduced below in case you\n" + 1676 "want to seal/unseal the Vault or play with authentication.\n", 1677 )) 1678 1679 for i, key := range testCluster.BarrierKeys { 1680 c.UI.Output(fmt.Sprintf( 1681 "Unseal Key %d: %s", 1682 i+1, base64.StdEncoding.EncodeToString(key), 1683 )) 1684 } 1685 1686 c.UI.Output(fmt.Sprintf( 1687 "\nRoot Token: %s\n", testCluster.RootToken, 1688 )) 1689 1690 c.UI.Output(fmt.Sprintf( 1691 "\nUseful env vars:\n"+ 1692 "VAULT_TOKEN=%s\n"+ 1693 "VAULT_ADDR=%s\n"+ 1694 "VAULT_CACERT=%s/ca_cert.pem\n", 1695 testCluster.RootToken, 1696 testCluster.Cores[0].Client.Address(), 1697 testCluster.TempDir, 1698 )) 1699 1700 // Output the header that the server has started 1701 c.UI.Output("==> Vault server started! Log data will stream in below:\n") 1702 1703 // Inform any tests that the server is ready 1704 select { 1705 case c.startedCh <- struct{}{}: 1706 default: 1707 } 1708 1709 // Release the log gate. 1710 c.logGate.Flush() 1711 1712 // Wait for shutdown 1713 shutdownTriggered := false 1714 1715 for !shutdownTriggered { 1716 select { 1717 case <-c.ShutdownCh: 1718 c.UI.Output("==> Vault shutdown triggered") 1719 1720 // Stop the listeners so that we don't process further client requests. 1721 c.cleanupGuard.Do(testCluster.Cleanup) 1722 1723 // Shutdown will wait until after Vault is sealed, which means the 1724 // request forwarding listeners will also be closed (and also 1725 // waited for). 1726 for _, core := range testCluster.Cores { 1727 if err := core.Shutdown(); err != nil { 1728 c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err)) 1729 } 1730 } 1731 1732 shutdownTriggered = true 1733 1734 case <-c.SighupCh: 1735 c.UI.Output("==> Vault reload triggered") 1736 for _, core := range testCluster.Cores { 1737 if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil); err != nil { 1738 c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) 1739 } 1740 } 1741 } 1742 } 1743 1744 return 0 1745} 1746 1747// addPlugin adds any plugins to the catalog 1748func (c *ServerCommand) addPlugin(path, token string, core *vault.Core) error { 1749 // Get the sha256 of the file at the given path. 1750 pluginSum := func(p string) (string, error) { 1751 hasher := sha256.New() 1752 f, err := os.Open(p) 1753 if err != nil { 1754 return "", err 1755 } 1756 defer f.Close() 1757 if _, err := io.Copy(hasher, f); err != nil { 1758 return "", err 1759 } 1760 return hex.EncodeToString(hasher.Sum(nil)), nil 1761 } 1762 1763 // Mount any test plugins. We do this explicitly before we inform tests of 1764 // a completely booted server intentionally. 1765 sha256sum, err := pluginSum(path) 1766 if err != nil { 1767 return err 1768 } 1769 1770 // Default the name to the basename of the binary 1771 name := filepath.Base(path) 1772 1773 // File a request against core to enable the plugin 1774 req := &logical.Request{ 1775 Operation: logical.UpdateOperation, 1776 ClientToken: token, 1777 Path: fmt.Sprintf("sys/plugins/catalog/%s", name), 1778 Data: map[string]interface{}{ 1779 "sha256": sha256sum, 1780 "command": name, 1781 }, 1782 } 1783 ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) 1784 if _, err := core.HandleRequest(ctx, req); err != nil { 1785 return err 1786 } 1787 1788 return nil 1789} 1790 1791// detectRedirect is used to attempt redirect address detection 1792func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect, 1793 config *server.Config) (string, error) { 1794 // Get the hostname 1795 host, err := detect.DetectHostAddr() 1796 if err != nil { 1797 return "", err 1798 } 1799 1800 // set [] for ipv6 addresses 1801 if strings.Contains(host, ":") && !strings.Contains(host, "]") { 1802 host = "[" + host + "]" 1803 } 1804 1805 // Default the port and scheme 1806 scheme := "https" 1807 port := 8200 1808 1809 // Attempt to detect overrides 1810 for _, list := range config.Listeners { 1811 // Only attempt TCP 1812 if list.Type != "tcp" { 1813 continue 1814 } 1815 1816 // Check if TLS is disabled 1817 if val, ok := list.Config["tls_disable"]; ok { 1818 disable, err := parseutil.ParseBool(val) 1819 if err != nil { 1820 return "", errwrap.Wrapf("tls_disable: {{err}}", err) 1821 } 1822 1823 if disable { 1824 scheme = "http" 1825 } 1826 } 1827 1828 // Check for address override 1829 var addr string 1830 addrRaw, ok := list.Config["address"] 1831 if !ok { 1832 addr = "127.0.0.1:8200" 1833 } else { 1834 addr = addrRaw.(string) 1835 } 1836 1837 // Check for localhost 1838 hostStr, portStr, err := net.SplitHostPort(addr) 1839 if err != nil { 1840 continue 1841 } 1842 if hostStr == "127.0.0.1" { 1843 host = hostStr 1844 } 1845 1846 // Check for custom port 1847 listPort, err := strconv.Atoi(portStr) 1848 if err != nil { 1849 continue 1850 } 1851 port = listPort 1852 } 1853 1854 // Build a URL 1855 url := &url.URL{ 1856 Scheme: scheme, 1857 Host: fmt.Sprintf("%s:%d", host, port), 1858 } 1859 1860 // Return the URL string 1861 return url.String(), nil 1862} 1863 1864// setupTelemetry is used to setup the telemetry sub-systems and returns the in-memory sink to be used in http configuration 1865func (c *ServerCommand) setupTelemetry(config *server.Config) (*metricsutil.MetricsHelper, error) { 1866 /* Setup telemetry 1867 Aggregate on 10 second intervals for 1 minute. Expose the 1868 metrics over stderr when there is a SIGUSR1 received. 1869 */ 1870 inm := metrics.NewInmemSink(10*time.Second, time.Minute) 1871 metrics.DefaultInmemSignal(inm) 1872 1873 var telConfig *server.Telemetry 1874 if config.Telemetry != nil { 1875 telConfig = config.Telemetry 1876 } else { 1877 telConfig = &server.Telemetry{} 1878 } 1879 1880 metricsConf := metrics.DefaultConfig("vault") 1881 metricsConf.EnableHostname = !telConfig.DisableHostname 1882 1883 // Configure the statsite sink 1884 var fanout metrics.FanoutSink 1885 var prometheusEnabled bool 1886 1887 // Configure the Prometheus sink 1888 if telConfig.PrometheusRetentionTime != 0 { 1889 prometheusEnabled = true 1890 prometheusOpts := prometheus.PrometheusOpts{ 1891 Expiration: telConfig.PrometheusRetentionTime, 1892 } 1893 1894 sink, err := prometheus.NewPrometheusSinkFrom(prometheusOpts) 1895 if err != nil { 1896 return nil, err 1897 } 1898 fanout = append(fanout, sink) 1899 } 1900 1901 metricHelper := metricsutil.NewMetricsHelper(inm, prometheusEnabled) 1902 1903 if telConfig.StatsiteAddr != "" { 1904 sink, err := metrics.NewStatsiteSink(telConfig.StatsiteAddr) 1905 if err != nil { 1906 return nil, err 1907 } 1908 fanout = append(fanout, sink) 1909 } 1910 1911 // Configure the statsd sink 1912 if telConfig.StatsdAddr != "" { 1913 sink, err := metrics.NewStatsdSink(telConfig.StatsdAddr) 1914 if err != nil { 1915 return nil, err 1916 } 1917 fanout = append(fanout, sink) 1918 } 1919 1920 // Configure the Circonus sink 1921 if telConfig.CirconusAPIToken != "" || telConfig.CirconusCheckSubmissionURL != "" { 1922 cfg := &circonus.Config{} 1923 cfg.Interval = telConfig.CirconusSubmissionInterval 1924 cfg.CheckManager.API.TokenKey = telConfig.CirconusAPIToken 1925 cfg.CheckManager.API.TokenApp = telConfig.CirconusAPIApp 1926 cfg.CheckManager.API.URL = telConfig.CirconusAPIURL 1927 cfg.CheckManager.Check.SubmissionURL = telConfig.CirconusCheckSubmissionURL 1928 cfg.CheckManager.Check.ID = telConfig.CirconusCheckID 1929 cfg.CheckManager.Check.ForceMetricActivation = telConfig.CirconusCheckForceMetricActivation 1930 cfg.CheckManager.Check.InstanceID = telConfig.CirconusCheckInstanceID 1931 cfg.CheckManager.Check.SearchTag = telConfig.CirconusCheckSearchTag 1932 cfg.CheckManager.Check.DisplayName = telConfig.CirconusCheckDisplayName 1933 cfg.CheckManager.Check.Tags = telConfig.CirconusCheckTags 1934 cfg.CheckManager.Broker.ID = telConfig.CirconusBrokerID 1935 cfg.CheckManager.Broker.SelectTag = telConfig.CirconusBrokerSelectTag 1936 1937 if cfg.CheckManager.API.TokenApp == "" { 1938 cfg.CheckManager.API.TokenApp = "vault" 1939 } 1940 1941 if cfg.CheckManager.Check.DisplayName == "" { 1942 cfg.CheckManager.Check.DisplayName = "Vault" 1943 } 1944 1945 if cfg.CheckManager.Check.SearchTag == "" { 1946 cfg.CheckManager.Check.SearchTag = "service:vault" 1947 } 1948 1949 sink, err := circonus.NewCirconusSink(cfg) 1950 if err != nil { 1951 return nil, err 1952 } 1953 sink.Start() 1954 fanout = append(fanout, sink) 1955 } 1956 1957 if telConfig.DogStatsDAddr != "" { 1958 var tags []string 1959 1960 if telConfig.DogStatsDTags != nil { 1961 tags = telConfig.DogStatsDTags 1962 } 1963 1964 sink, err := datadog.NewDogStatsdSink(telConfig.DogStatsDAddr, metricsConf.HostName) 1965 if err != nil { 1966 return nil, errwrap.Wrapf("failed to start DogStatsD sink: {{err}}", err) 1967 } 1968 sink.SetTags(tags) 1969 fanout = append(fanout, sink) 1970 } 1971 1972 // Initialize the global sink 1973 if len(fanout) > 1 { 1974 // Hostname enabled will create poor quality metrics name for prometheus 1975 if !telConfig.DisableHostname { 1976 c.UI.Warn("telemetry.disable_hostname has been set to false. Recommended setting is true for Prometheus to avoid poorly named metrics.") 1977 } 1978 } else { 1979 metricsConf.EnableHostname = false 1980 } 1981 fanout = append(fanout, inm) 1982 _, err := metrics.NewGlobal(metricsConf, fanout) 1983 1984 if err != nil { 1985 return nil, err 1986 } 1987 1988 return metricHelper, nil 1989} 1990 1991func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reload.ReloadFunc, configPath []string) error { 1992 lock.RLock() 1993 defer lock.RUnlock() 1994 1995 var reloadErrors *multierror.Error 1996 1997 for k, relFuncs := range *reloadFuncs { 1998 switch { 1999 case strings.HasPrefix(k, "listener|"): 2000 for _, relFunc := range relFuncs { 2001 if relFunc != nil { 2002 if err := relFunc(nil); err != nil { 2003 reloadErrors = multierror.Append(reloadErrors, errwrap.Wrapf("error encountered reloading listener: {{err}}", err)) 2004 } 2005 } 2006 } 2007 2008 case strings.HasPrefix(k, "audit_file|"): 2009 for _, relFunc := range relFuncs { 2010 if relFunc != nil { 2011 if err := relFunc(nil); err != nil { 2012 reloadErrors = multierror.Append(reloadErrors, errwrap.Wrapf(fmt.Sprintf("error encountered reloading file audit device at path %q: {{err}}", strings.TrimPrefix(k, "audit_file|")), err)) 2013 } 2014 } 2015 } 2016 } 2017 } 2018 2019 // Send a message that we reloaded. This prevents "guessing" sleep times 2020 // in tests. 2021 select { 2022 case c.reloadedCh <- struct{}{}: 2023 default: 2024 } 2025 2026 return reloadErrors.ErrorOrNil() 2027} 2028 2029// storePidFile is used to write out our PID to a file if necessary 2030func (c *ServerCommand) storePidFile(pidPath string) error { 2031 // Quit fast if no pidfile 2032 if pidPath == "" { 2033 return nil 2034 } 2035 2036 // Open the PID file 2037 pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) 2038 if err != nil { 2039 return errwrap.Wrapf("could not open pid file: {{err}}", err) 2040 } 2041 defer pidFile.Close() 2042 2043 // Write out the PID 2044 pid := os.Getpid() 2045 _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) 2046 if err != nil { 2047 return errwrap.Wrapf("could not write to pid file: {{err}}", err) 2048 } 2049 return nil 2050} 2051 2052// removePidFile is used to cleanup the PID file if necessary 2053func (c *ServerCommand) removePidFile(pidPath string) error { 2054 if pidPath == "" { 2055 return nil 2056 } 2057 return os.Remove(pidPath) 2058} 2059 2060// storageMigrationActive checks and warns against in-progress storage migrations. 2061// This function will block until storage is available. 2062func (c *ServerCommand) storageMigrationActive(backend physical.Backend) bool { 2063 first := true 2064 2065 for { 2066 migrationStatus, err := CheckStorageMigration(backend) 2067 if err == nil { 2068 if migrationStatus != nil { 2069 startTime := migrationStatus.Start.Format(time.RFC3339) 2070 c.UI.Error(wrapAtLength(fmt.Sprintf("ERROR! Storage migration in progress (started: %s). "+ 2071 "Server startup is prevented until the migration completes. Use 'vault operator migrate -reset' "+ 2072 "to force clear the migration lock.", startTime))) 2073 return true 2074 } 2075 return false 2076 } 2077 if first { 2078 first = false 2079 c.UI.Warn("\nWARNING! Unable to read storage migration status.") 2080 2081 // unexpected state, so stop buffering log messages 2082 c.logGate.Flush() 2083 } 2084 c.logger.Warn("storage migration check error", "error", err.Error()) 2085 2086 select { 2087 case <-time.After(2 * time.Second): 2088 case <-c.ShutdownCh: 2089 return true 2090 } 2091 } 2092} 2093 2094type StorageMigrationStatus struct { 2095 Start time.Time `json:"start"` 2096} 2097 2098func CheckStorageMigration(b physical.Backend) (*StorageMigrationStatus, error) { 2099 entry, err := b.Get(context.Background(), storageMigrationLock) 2100 2101 if err != nil { 2102 return nil, err 2103 } 2104 2105 if entry == nil { 2106 return nil, nil 2107 } 2108 2109 var status StorageMigrationStatus 2110 if err := jsonutil.DecodeJSON(entry.Value, &status); err != nil { 2111 return nil, err 2112 } 2113 2114 return &status, nil 2115} 2116 2117func SetStorageMigration(b physical.Backend, active bool) error { 2118 if !active { 2119 return b.Delete(context.Background(), storageMigrationLock) 2120 } 2121 2122 status := StorageMigrationStatus{ 2123 Start: time.Now(), 2124 } 2125 2126 enc, err := jsonutil.EncodeJSON(status) 2127 if err != nil { 2128 return err 2129 } 2130 2131 entry := &physical.Entry{ 2132 Key: storageMigrationLock, 2133 Value: enc, 2134 } 2135 2136 return b.Put(context.Background(), entry) 2137} 2138 2139type grpclogFaker struct { 2140 logger log.Logger 2141 log bool 2142} 2143 2144func (g *grpclogFaker) Fatal(args ...interface{}) { 2145 g.logger.Error(fmt.Sprint(args...)) 2146 os.Exit(1) 2147} 2148 2149func (g *grpclogFaker) Fatalf(format string, args ...interface{}) { 2150 g.logger.Error(fmt.Sprintf(format, args...)) 2151 os.Exit(1) 2152} 2153 2154func (g *grpclogFaker) Fatalln(args ...interface{}) { 2155 g.logger.Error(fmt.Sprintln(args...)) 2156 os.Exit(1) 2157} 2158 2159func (g *grpclogFaker) Print(args ...interface{}) { 2160 if g.log && g.logger.IsDebug() { 2161 g.logger.Debug(fmt.Sprint(args...)) 2162 } 2163} 2164 2165func (g *grpclogFaker) Printf(format string, args ...interface{}) { 2166 if g.log && g.logger.IsDebug() { 2167 g.logger.Debug(fmt.Sprintf(format, args...)) 2168 } 2169} 2170 2171func (g *grpclogFaker) Println(args ...interface{}) { 2172 if g.log && g.logger.IsDebug() { 2173 g.logger.Debug(fmt.Sprintln(args...)) 2174 } 2175} 2176