1package command
2
3import (
4	"context"
5	"crypto/sha256"
6	"encoding/base64"
7	"encoding/hex"
8	"fmt"
9	"io"
10	"io/ioutil"
11	"net"
12	"net/http"
13	"net/url"
14	"os"
15	"path/filepath"
16	"runtime"
17	"sort"
18	"strconv"
19	"strings"
20	"sync"
21	"time"
22
23	monitoring "cloud.google.com/go/monitoring/apiv3"
24	"github.com/armon/go-metrics"
25	"github.com/armon/go-metrics/circonus"
26	"github.com/armon/go-metrics/datadog"
27	"github.com/armon/go-metrics/prometheus"
28	stackdriver "github.com/google/go-metrics-stackdriver"
29	"github.com/hashicorp/errwrap"
30	"github.com/hashicorp/go-hclog"
31	log "github.com/hashicorp/go-hclog"
32	wrapping "github.com/hashicorp/go-kms-wrapping"
33	aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
34	"github.com/hashicorp/go-multierror"
35	"github.com/hashicorp/go-sockaddr"
36	"github.com/hashicorp/vault/audit"
37	"github.com/hashicorp/vault/command/server"
38	serverseal "github.com/hashicorp/vault/command/server/seal"
39	"github.com/hashicorp/vault/helper/builtinplugins"
40	"github.com/hashicorp/vault/helper/metricsutil"
41	"github.com/hashicorp/vault/helper/namespace"
42	vaulthttp "github.com/hashicorp/vault/http"
43	"github.com/hashicorp/vault/internalshared/gatedwriter"
44	"github.com/hashicorp/vault/internalshared/reloadutil"
45	"github.com/hashicorp/vault/sdk/helper/jsonutil"
46	"github.com/hashicorp/vault/sdk/helper/logging"
47	"github.com/hashicorp/vault/sdk/helper/mlock"
48	"github.com/hashicorp/vault/sdk/helper/parseutil"
49	"github.com/hashicorp/vault/sdk/helper/useragent"
50	"github.com/hashicorp/vault/sdk/logical"
51	"github.com/hashicorp/vault/sdk/physical"
52	"github.com/hashicorp/vault/sdk/version"
53	sr "github.com/hashicorp/vault/serviceregistration"
54	"github.com/hashicorp/vault/vault"
55	vaultseal "github.com/hashicorp/vault/vault/seal"
56	"github.com/mitchellh/cli"
57	"github.com/mitchellh/go-testing-interface"
58	"github.com/posener/complete"
59	"go.uber.org/atomic"
60	"golang.org/x/net/http/httpproxy"
61	"google.golang.org/api/option"
62	"google.golang.org/grpc/grpclog"
63)
64
65var _ cli.Command = (*ServerCommand)(nil)
66var _ cli.CommandAutocomplete = (*ServerCommand)(nil)
67
68var memProfilerEnabled = false
69
70var enableFourClusterDev = func(c *ServerCommand, base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int {
71	c.logger.Error("-dev-four-cluster only supported in enterprise Vault")
72	return 1
73}
74
75const storageMigrationLock = "core/migration"
76
77type ServerCommand struct {
78	*BaseCommand
79
80	AuditBackends      map[string]audit.Factory
81	CredentialBackends map[string]logical.Factory
82	LogicalBackends    map[string]logical.Factory
83	PhysicalBackends   map[string]physical.Factory
84
85	ServiceRegistrations map[string]sr.Factory
86
87	ShutdownCh chan struct{}
88	SighupCh   chan struct{}
89	SigUSR2Ch  chan struct{}
90
91	WaitGroup *sync.WaitGroup
92
93	logOutput   io.Writer
94	gatedWriter *gatedwriter.Writer
95	logger      log.Logger
96
97	cleanupGuard sync.Once
98
99	reloadFuncsLock *sync.RWMutex
100	reloadFuncs     *map[string][]reloadutil.ReloadFunc
101	startedCh       chan (struct{}) // for tests
102	reloadedCh      chan (struct{}) // for tests
103
104	// new stuff
105	flagConfigs            []string
106	flagLogLevel           string
107	flagLogFormat          string
108	flagRecovery           bool
109	flagDev                bool
110	flagDevRootTokenID     string
111	flagDevListenAddr      string
112	flagDevNoStoreToken    bool
113	flagDevPluginDir       string
114	flagDevPluginInit      bool
115	flagDevHA              bool
116	flagDevLatency         int
117	flagDevLatencyJitter   int
118	flagDevLeasedKV        bool
119	flagDevKVV1            bool
120	flagDevSkipInit        bool
121	flagDevThreeNode       bool
122	flagDevFourCluster     bool
123	flagDevTransactional   bool
124	flagDevAutoSeal        bool
125	flagTestVerifyOnly     bool
126	flagCombineLogs        bool
127	flagTestServerConfig   bool
128	flagDevConsul          bool
129	flagExitOnCoreShutdown bool
130}
131
132type ServerListener struct {
133	net.Listener
134	config                       map[string]interface{}
135	maxRequestSize               int64
136	maxRequestDuration           time.Duration
137	unauthenticatedMetricsAccess bool
138}
139
140func (c *ServerCommand) Synopsis() string {
141	return "Start a Vault server"
142}
143
144func (c *ServerCommand) Help() string {
145	helpText := `
146Usage: vault server [options]
147
148  This command starts a Vault server that responds to API requests. By default,
149  Vault will start in a "sealed" state. The Vault cluster must be initialized
150  before use, usually by the "vault operator init" command. Each Vault server must
151  also be unsealed using the "vault operator unseal" command or the API before the
152  server can respond to requests.
153
154  Start a server with a configuration file:
155
156      $ vault server -config=/etc/vault/config.hcl
157
158  Run in "dev" mode:
159
160      $ vault server -dev -dev-root-token-id="root"
161
162  For a full list of examples, please see the documentation.
163
164` + c.Flags().Help()
165	return strings.TrimSpace(helpText)
166}
167
168func (c *ServerCommand) Flags() *FlagSets {
169	set := c.flagSet(FlagSetHTTP)
170
171	f := set.NewFlagSet("Command Options")
172
173	f.StringSliceVar(&StringSliceVar{
174		Name:   "config",
175		Target: &c.flagConfigs,
176		Completion: complete.PredictOr(
177			complete.PredictFiles("*.hcl"),
178			complete.PredictFiles("*.json"),
179			complete.PredictDirs("*"),
180		),
181		Usage: "Path to a configuration file or directory of configuration " +
182			"files. This flag can be specified multiple times to load multiple " +
183			"configurations. If the path is a directory, all files which end in " +
184			".hcl or .json are loaded.",
185	})
186
187	f.StringVar(&StringVar{
188		Name:       "log-level",
189		Target:     &c.flagLogLevel,
190		Default:    notSetValue,
191		EnvVar:     "VAULT_LOG_LEVEL",
192		Completion: complete.PredictSet("trace", "debug", "info", "warn", "err"),
193		Usage: "Log verbosity level. Supported values (in order of detail) are " +
194			"\"trace\", \"debug\", \"info\", \"warn\", and \"err\".",
195	})
196
197	f.StringVar(&StringVar{
198		Name:    "log-format",
199		Target:  &c.flagLogFormat,
200		Default: notSetValue,
201		// EnvVar can't be just "VAULT_LOG_FORMAT", because more than one env var name is supported
202		// for backwards compatibility reasons.
203		// See github.com/hashicorp/vault/sdk/helper/logging.ParseEnvLogFormat()
204		Completion: complete.PredictSet("standard", "json"),
205		Usage:      `Log format. Supported values are "standard" and "json".`,
206	})
207
208	f.BoolVar(&BoolVar{
209		Name:    "exit-on-core-shutdown",
210		Target:  &c.flagExitOnCoreShutdown,
211		Default: false,
212		Usage:   "Exit the vault server if the vault core is shutdown.",
213	})
214
215	f.BoolVar(&BoolVar{
216		Name:   "recovery",
217		Target: &c.flagRecovery,
218		Usage: "Enable recovery mode. In this mode, Vault is used to perform recovery actions." +
219			"Using a recovery operation token, \"sys/raw\" API can be used to manipulate the storage.",
220	})
221
222	f = set.NewFlagSet("Dev Options")
223
224	f.BoolVar(&BoolVar{
225		Name:   "dev",
226		Target: &c.flagDev,
227		Usage: "Enable development mode. In this mode, Vault runs in-memory and " +
228			"starts unsealed. As the name implies, do not run \"dev\" mode in " +
229			"production.",
230	})
231
232	f.StringVar(&StringVar{
233		Name:    "dev-root-token-id",
234		Target:  &c.flagDevRootTokenID,
235		Default: "",
236		EnvVar:  "VAULT_DEV_ROOT_TOKEN_ID",
237		Usage: "Initial root token. This only applies when running in \"dev\" " +
238			"mode.",
239	})
240
241	f.StringVar(&StringVar{
242		Name:    "dev-listen-address",
243		Target:  &c.flagDevListenAddr,
244		Default: "127.0.0.1:8200",
245		EnvVar:  "VAULT_DEV_LISTEN_ADDRESS",
246		Usage:   "Address to bind to in \"dev\" mode.",
247	})
248	f.BoolVar(&BoolVar{
249		Name:    "dev-no-store-token",
250		Target:  &c.flagDevNoStoreToken,
251		Default: false,
252		Usage: "Do not persist the dev root token to the token helper " +
253			"(usually the local filesystem) for use in future requests. " +
254			"The token will only be displayed in the command output.",
255	})
256
257	// Internal-only flags to follow.
258	//
259	// Why hello there little source code reader! Welcome to the Vault source
260	// code. The remaining options are intentionally undocumented and come with
261	// no warranty or backwards-compatibility promise. Do not use these flags
262	// in production. Do not build automation using these flags. Unless you are
263	// developing against Vault, you should not need any of these flags.
264
265	f.StringVar(&StringVar{
266		Name:       "dev-plugin-dir",
267		Target:     &c.flagDevPluginDir,
268		Default:    "",
269		Completion: complete.PredictDirs("*"),
270		Hidden:     true,
271	})
272
273	f.BoolVar(&BoolVar{
274		Name:    "dev-plugin-init",
275		Target:  &c.flagDevPluginInit,
276		Default: true,
277		Hidden:  true,
278	})
279
280	f.BoolVar(&BoolVar{
281		Name:    "dev-ha",
282		Target:  &c.flagDevHA,
283		Default: false,
284		Hidden:  true,
285	})
286
287	f.BoolVar(&BoolVar{
288		Name:    "dev-transactional",
289		Target:  &c.flagDevTransactional,
290		Default: false,
291		Hidden:  true,
292	})
293
294	f.IntVar(&IntVar{
295		Name:   "dev-latency",
296		Target: &c.flagDevLatency,
297		Hidden: true,
298	})
299
300	f.IntVar(&IntVar{
301		Name:   "dev-latency-jitter",
302		Target: &c.flagDevLatencyJitter,
303		Hidden: true,
304	})
305
306	f.BoolVar(&BoolVar{
307		Name:    "dev-leased-kv",
308		Target:  &c.flagDevLeasedKV,
309		Default: false,
310		Hidden:  true,
311	})
312
313	f.BoolVar(&BoolVar{
314		Name:    "dev-kv-v1",
315		Target:  &c.flagDevKVV1,
316		Default: false,
317		Hidden:  true,
318	})
319
320	f.BoolVar(&BoolVar{
321		Name:    "dev-auto-seal",
322		Target:  &c.flagDevAutoSeal,
323		Default: false,
324		Hidden:  true,
325	})
326
327	f.BoolVar(&BoolVar{
328		Name:    "dev-skip-init",
329		Target:  &c.flagDevSkipInit,
330		Default: false,
331		Hidden:  true,
332	})
333
334	f.BoolVar(&BoolVar{
335		Name:    "dev-three-node",
336		Target:  &c.flagDevThreeNode,
337		Default: false,
338		Hidden:  true,
339	})
340
341	f.BoolVar(&BoolVar{
342		Name:    "dev-four-cluster",
343		Target:  &c.flagDevFourCluster,
344		Default: false,
345		Hidden:  true,
346	})
347
348	f.BoolVar(&BoolVar{
349		Name:    "dev-consul",
350		Target:  &c.flagDevConsul,
351		Default: false,
352		Hidden:  true,
353	})
354
355	// TODO: should the below flags be public?
356	f.BoolVar(&BoolVar{
357		Name:    "combine-logs",
358		Target:  &c.flagCombineLogs,
359		Default: false,
360		Hidden:  true,
361	})
362
363	f.BoolVar(&BoolVar{
364		Name:    "test-verify-only",
365		Target:  &c.flagTestVerifyOnly,
366		Default: false,
367		Hidden:  true,
368	})
369
370	f.BoolVar(&BoolVar{
371		Name:    "test-server-config",
372		Target:  &c.flagTestServerConfig,
373		Default: false,
374		Hidden:  true,
375	})
376
377	// End internal-only flags.
378
379	return set
380}
381
382func (c *ServerCommand) AutocompleteArgs() complete.Predictor {
383	return complete.PredictNothing
384}
385
386func (c *ServerCommand) AutocompleteFlags() complete.Flags {
387	return c.Flags().Completions()
388}
389
390func (c *ServerCommand) parseConfig() (*server.Config, error) {
391	// Load the configuration
392	var config *server.Config
393	for _, path := range c.flagConfigs {
394		current, err := server.LoadConfig(path)
395		if err != nil {
396			return nil, errwrap.Wrapf(fmt.Sprintf("error loading configuration from %s: {{err}}", path), err)
397		}
398
399		if config == nil {
400			config = current
401		} else {
402			config = config.Merge(current)
403		}
404	}
405	return config, nil
406}
407
408func (c *ServerCommand) runRecoveryMode() int {
409	config, err := c.parseConfig()
410	if err != nil {
411		c.UI.Error(err.Error())
412		return 1
413	}
414
415	// Ensure at least one config was found.
416	if config == nil {
417		c.UI.Output(wrapAtLength(
418			"No configuration files found. Please provide configurations with the " +
419				"-config flag. If you are supplying the path to a directory, please " +
420				"ensure the directory contains files with the .hcl or .json " +
421				"extension."))
422		return 1
423	}
424
425	level, logLevelString, logLevelWasNotSet, logFormat, err := c.processLogLevelAndFormat(config)
426	if err != nil {
427		c.UI.Error(err.Error())
428		return 1
429	}
430
431	c.logger = log.New(&log.LoggerOptions{
432		Output: c.gatedWriter,
433		Level:  level,
434		// Note that if logFormat is either unspecified or standard, then
435		// the resulting logger's format will be standard.
436		JSONFormat: logFormat == logging.JSONFormat,
437	})
438
439	logLevelStr, err := c.adjustLogLevel(config, logLevelWasNotSet)
440	if err != nil {
441		c.UI.Error(err.Error())
442		return 1
443	}
444	if logLevelStr != "" {
445		logLevelString = logLevelStr
446	}
447
448	// create GRPC logger
449	namedGRPCLogFaker := c.logger.Named("grpclogfaker")
450	grpclog.SetLogger(&grpclogFaker{
451		logger: namedGRPCLogFaker,
452		log:    os.Getenv("VAULT_GRPC_LOGGING") != "",
453	})
454
455	if config.Storage == nil {
456		c.UI.Output("A storage backend must be specified")
457		return 1
458	}
459
460	if config.DefaultMaxRequestDuration != 0 {
461		vault.DefaultMaxRequestDuration = config.DefaultMaxRequestDuration
462	}
463
464	proxyCfg := httpproxy.FromEnvironment()
465	c.logger.Info("proxy environment", "http_proxy", proxyCfg.HTTPProxy,
466		"https_proxy", proxyCfg.HTTPSProxy, "no_proxy", proxyCfg.NoProxy)
467
468	// Initialize the storage backend
469	factory, exists := c.PhysicalBackends[config.Storage.Type]
470	if !exists {
471		c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type))
472		return 1
473	}
474	if config.Storage.Type == "raft" {
475		if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" {
476			config.ClusterAddr = envCA
477		}
478
479		if len(config.ClusterAddr) == 0 {
480			c.UI.Error("Cluster address must be set when using raft storage")
481			return 1
482		}
483	}
484
485	namedStorageLogger := c.logger.Named("storage." + config.Storage.Type)
486	backend, err := factory(config.Storage.Config, namedStorageLogger)
487	if err != nil {
488		c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", config.Storage.Type, err))
489		return 1
490	}
491
492	infoKeys := make([]string, 0, 10)
493	info := make(map[string]string)
494	info["log level"] = logLevelString
495	infoKeys = append(infoKeys, "log level")
496
497	var barrierSeal vault.Seal
498	var sealConfigError error
499
500	if len(config.Seals) == 0 {
501		config.Seals = append(config.Seals, &server.Seal{Type: wrapping.Shamir})
502	}
503
504	if len(config.Seals) > 1 {
505		c.UI.Error("Only one seal block is accepted in recovery mode")
506		return 1
507	}
508
509	configSeal := config.Seals[0]
510	sealType := wrapping.Shamir
511	if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" {
512		sealType = os.Getenv("VAULT_SEAL_TYPE")
513		configSeal.Type = sealType
514	} else {
515		sealType = configSeal.Type
516	}
517
518	var seal vault.Seal
519	sealLogger := c.logger.Named(sealType)
520	seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal(&vaultseal.Access{
521		Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
522			Logger: c.logger.Named("shamir"),
523		}),
524	}))
525	if sealConfigError != nil {
526		if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) {
527			c.UI.Error(fmt.Sprintf(
528				"Error parsing Seal configuration: %s", sealConfigError))
529			return 1
530		}
531	}
532	if seal == nil {
533		c.UI.Error(fmt.Sprintf(
534			"After configuring seal nil returned, seal type was %s", sealType))
535		return 1
536	}
537
538	barrierSeal = seal
539
540	// Ensure that the seal finalizer is called, even if using verify-only
541	defer func() {
542		err = seal.Finalize(context.Background())
543		if err != nil {
544			c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err))
545		}
546	}()
547
548	coreConfig := &vault.CoreConfig{
549		Physical:     backend,
550		StorageType:  config.Storage.Type,
551		Seal:         barrierSeal,
552		Logger:       c.logger,
553		DisableMlock: config.DisableMlock,
554		RecoveryMode: c.flagRecovery,
555		ClusterAddr:  config.ClusterAddr,
556	}
557
558	core, newCoreError := vault.NewCore(coreConfig)
559	if newCoreError != nil {
560		if vault.IsFatalError(newCoreError) {
561			c.UI.Error(fmt.Sprintf("Error initializing core: %s", newCoreError))
562			return 1
563		}
564	}
565
566	if err := core.InitializeRecovery(context.Background()); err != nil {
567		c.UI.Error(fmt.Sprintf("Error initializing core in recovery mode: %s", err))
568		return 1
569	}
570
571	// Compile server information for output later
572	infoKeys = append(infoKeys, "storage")
573	info["storage"] = config.Storage.Type
574
575	if coreConfig.ClusterAddr != "" {
576		info["cluster address"] = coreConfig.ClusterAddr
577		infoKeys = append(infoKeys, "cluster address")
578	}
579
580	// Initialize the listeners
581	lns := make([]ServerListener, 0, len(config.Listeners))
582	for _, lnConfig := range config.Listeners {
583		ln, _, _, err := server.NewListener(lnConfig.Type, lnConfig.Config, c.gatedWriter, c.UI)
584		if err != nil {
585			c.UI.Error(fmt.Sprintf("Error initializing listener of type %s: %s", lnConfig.Type, err))
586			return 1
587		}
588
589		lns = append(lns, ServerListener{
590			Listener: ln,
591			config:   lnConfig.Config,
592		})
593	}
594
595	listenerCloseFunc := func() {
596		for _, ln := range lns {
597			ln.Listener.Close()
598		}
599	}
600
601	defer c.cleanupGuard.Do(listenerCloseFunc)
602
603	infoKeys = append(infoKeys, "version")
604	verInfo := version.GetVersion()
605	info["version"] = verInfo.FullVersionNumber(false)
606	if verInfo.Revision != "" {
607		info["version sha"] = strings.Trim(verInfo.Revision, "'")
608		infoKeys = append(infoKeys, "version sha")
609	}
610
611	infoKeys = append(infoKeys, "recovery mode")
612	info["recovery mode"] = "true"
613
614	// Server configuration output
615	padding := 24
616	sort.Strings(infoKeys)
617	c.UI.Output("==> Vault server configuration:\n")
618	for _, k := range infoKeys {
619		c.UI.Output(fmt.Sprintf(
620			"%s%s: %s",
621			strings.Repeat(" ", padding-len(k)),
622			strings.Title(k),
623			info[k]))
624	}
625	c.UI.Output("")
626
627	for _, ln := range lns {
628		handler := vaulthttp.Handler(&vault.HandlerProperties{
629			Core:                  core,
630			MaxRequestSize:        ln.maxRequestSize,
631			MaxRequestDuration:    ln.maxRequestDuration,
632			DisablePrintableCheck: config.DisablePrintableCheck,
633			RecoveryMode:          c.flagRecovery,
634			RecoveryToken:         atomic.NewString(""),
635		})
636
637		server := &http.Server{
638			Handler:           handler,
639			ReadHeaderTimeout: 10 * time.Second,
640			ReadTimeout:       30 * time.Second,
641			IdleTimeout:       5 * time.Minute,
642			ErrorLog:          c.logger.StandardLogger(nil),
643		}
644
645		go server.Serve(ln.Listener)
646	}
647
648	if sealConfigError != nil {
649		init, err := core.Initialized(context.Background())
650		if err != nil {
651			c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err))
652			return 1
653		}
654		if init {
655			c.UI.Error("Vault is initialized but no Seal key could be loaded")
656			return 1
657		}
658	}
659
660	if newCoreError != nil {
661		c.UI.Warn(wrapAtLength(
662			"WARNING! A non-fatal error occurred during initialization. Please " +
663				"check the logs for more information."))
664		c.UI.Warn("")
665	}
666
667	if !c.flagCombineLogs {
668		c.UI.Output("==> Vault server started! Log data will stream in below:\n")
669	}
670
671	c.logger.(hclog.OutputResettable).ResetOutputWithFlush(&hclog.LoggerOptions{
672		Output: c.logOutput,
673	}, c.gatedWriter)
674
675	for {
676		select {
677		case <-c.ShutdownCh:
678			c.UI.Output("==> Vault shutdown triggered")
679
680			c.cleanupGuard.Do(listenerCloseFunc)
681
682			if err := core.Shutdown(); err != nil {
683				c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err))
684			}
685
686			return 0
687
688		case <-c.SigUSR2Ch:
689			buf := make([]byte, 32*1024*1024)
690			n := runtime.Stack(buf[:], true)
691			c.logger.Info("goroutine trace", "stack", string(buf[:n]))
692		}
693	}
694
695	return 0
696}
697
698func (c *ServerCommand) adjustLogLevel(config *server.Config, logLevelWasNotSet bool) (string, error) {
699	var logLevelString string
700	if config.LogLevel != "" && logLevelWasNotSet {
701		configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel))
702		logLevelString = configLogLevel
703		switch configLogLevel {
704		case "trace":
705			c.logger.SetLevel(log.Trace)
706		case "debug":
707			c.logger.SetLevel(log.Debug)
708		case "notice", "info", "":
709			c.logger.SetLevel(log.Info)
710		case "warn", "warning":
711			c.logger.SetLevel(log.Warn)
712		case "err", "error":
713			c.logger.SetLevel(log.Error)
714		default:
715			return "", fmt.Errorf("unknown log level: %s", config.LogLevel)
716		}
717	}
718	return logLevelString, nil
719}
720
721func (c *ServerCommand) processLogLevelAndFormat(config *server.Config) (log.Level, string, bool, logging.LogFormat, error) {
722	// Create a logger. We wrap it in a gated writer so that it doesn't
723	// start logging too early.
724	c.logOutput = os.Stderr
725	if c.flagCombineLogs {
726		c.logOutput = os.Stdout
727	}
728	c.gatedWriter = gatedwriter.NewWriter(c.logOutput)
729	var level log.Level
730	var logLevelWasNotSet bool
731	logFormat := logging.UnspecifiedFormat
732	logLevelString := c.flagLogLevel
733	c.flagLogLevel = strings.ToLower(strings.TrimSpace(c.flagLogLevel))
734	switch c.flagLogLevel {
735	case notSetValue, "":
736		logLevelWasNotSet = true
737		logLevelString = "info"
738		level = log.Info
739	case "trace":
740		level = log.Trace
741	case "debug":
742		level = log.Debug
743	case "notice", "info":
744		level = log.Info
745	case "warn", "warning":
746		level = log.Warn
747	case "err", "error":
748		level = log.Error
749	default:
750		return level, logLevelString, logLevelWasNotSet, logFormat, fmt.Errorf("unknown log level: %s", c.flagLogLevel)
751	}
752
753	if c.flagLogFormat != notSetValue {
754		var err error
755		logFormat, err = logging.ParseLogFormat(c.flagLogFormat)
756		if err != nil {
757			return level, logLevelString, logLevelWasNotSet, logFormat, err
758		}
759	}
760	if logFormat == logging.UnspecifiedFormat {
761		logFormat = logging.ParseEnvLogFormat()
762	}
763	if logFormat == logging.UnspecifiedFormat {
764		var err error
765		logFormat, err = logging.ParseLogFormat(config.LogFormat)
766		if err != nil {
767			return level, logLevelString, logLevelWasNotSet, logFormat, err
768		}
769	}
770
771	return level, logLevelString, logLevelWasNotSet, logFormat, nil
772}
773
774func (c *ServerCommand) Run(args []string) int {
775	f := c.Flags()
776
777	if err := f.Parse(args); err != nil {
778		c.UI.Error(err.Error())
779		return 1
780	}
781
782	if c.flagRecovery {
783		return c.runRecoveryMode()
784	}
785
786	// Automatically enable dev mode if other dev flags are provided.
787	if c.flagDevConsul || c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster || c.flagDevAutoSeal || c.flagDevKVV1 {
788		c.flagDev = true
789	}
790
791	// Validation
792	if !c.flagDev {
793		switch {
794		case len(c.flagConfigs) == 0:
795			c.UI.Error("Must specify at least one config path using -config")
796			return 1
797		case c.flagDevRootTokenID != "":
798			c.UI.Warn(wrapAtLength(
799				"You cannot specify a custom root token ID outside of \"dev\" mode. " +
800					"Your request has been ignored."))
801			c.flagDevRootTokenID = ""
802		}
803	}
804
805	// Load the configuration
806	var config *server.Config
807	if c.flagDev {
808		var devStorageType string
809		switch {
810		case c.flagDevConsul:
811			devStorageType = "consul"
812		case c.flagDevHA && c.flagDevTransactional:
813			devStorageType = "inmem_transactional_ha"
814		case !c.flagDevHA && c.flagDevTransactional:
815			devStorageType = "inmem_transactional"
816		case c.flagDevHA && !c.flagDevTransactional:
817			devStorageType = "inmem_ha"
818		default:
819			devStorageType = "inmem"
820		}
821		config = server.DevConfig(devStorageType)
822		if c.flagDevListenAddr != "" {
823			config.Listeners[0].Config["address"] = c.flagDevListenAddr
824		}
825	}
826
827	parsedConfig, err := c.parseConfig()
828	if err != nil {
829		c.UI.Error(err.Error())
830		return 1
831	}
832	if config == nil {
833		config = parsedConfig
834	} else {
835		config = config.Merge(parsedConfig)
836	}
837
838	// Ensure at least one config was found.
839	if config == nil {
840		c.UI.Output(wrapAtLength(
841			"No configuration files found. Please provide configurations with the " +
842				"-config flag. If you are supplying the path to a directory, please " +
843				"ensure the directory contains files with the .hcl or .json " +
844				"extension."))
845		return 1
846	}
847
848	level, logLevelString, logLevelWasNotSet, logFormat, err := c.processLogLevelAndFormat(config)
849	if err != nil {
850		c.UI.Error(err.Error())
851		return 1
852	}
853
854	if c.flagDevThreeNode || c.flagDevFourCluster {
855		c.logger = log.New(&log.LoggerOptions{
856			Mutex:  &sync.Mutex{},
857			Output: c.gatedWriter,
858			Level:  log.Trace,
859		})
860	} else {
861		c.logger = log.New(&log.LoggerOptions{
862			Output: c.gatedWriter,
863			Level:  level,
864			// Note that if logFormat is either unspecified or standard, then
865			// the resulting logger's format will be standard.
866			JSONFormat: logFormat == logging.JSONFormat,
867		})
868	}
869
870	allLoggers := []log.Logger{c.logger}
871
872	logLevelStr, err := c.adjustLogLevel(config, logLevelWasNotSet)
873	if err != nil {
874		c.UI.Error(err.Error())
875		return 1
876	}
877	if logLevelStr != "" {
878		logLevelString = logLevelStr
879	}
880
881	// create GRPC logger
882	namedGRPCLogFaker := c.logger.Named("grpclogfaker")
883	allLoggers = append(allLoggers, namedGRPCLogFaker)
884	grpclog.SetLogger(&grpclogFaker{
885		logger: namedGRPCLogFaker,
886		log:    os.Getenv("VAULT_GRPC_LOGGING") != "",
887	})
888
889	if memProfilerEnabled {
890		c.startMemProfiler()
891	}
892
893	// Ensure that a backend is provided
894	if config.Storage == nil {
895		c.UI.Output("A storage backend must be specified")
896		return 1
897	}
898
899	if config.DefaultMaxRequestDuration != 0 {
900		vault.DefaultMaxRequestDuration = config.DefaultMaxRequestDuration
901	}
902
903	// log proxy settings
904	proxyCfg := httpproxy.FromEnvironment()
905	c.logger.Info("proxy environment", "http_proxy", proxyCfg.HTTPProxy,
906		"https_proxy", proxyCfg.HTTPSProxy, "no_proxy", proxyCfg.NoProxy)
907
908	// If mlockall(2) isn't supported, show a warning. We disable this in dev
909	// because it is quite scary to see when first using Vault. We also disable
910	// this if the user has explicitly disabled mlock in configuration.
911	if !c.flagDev && !config.DisableMlock && !mlock.Supported() {
912		c.UI.Warn(wrapAtLength(
913			"WARNING! mlock is not supported on this system! An mlockall(2)-like " +
914				"syscall to prevent memory from being swapped to disk is not " +
915				"supported on this system. For better security, only run Vault on " +
916				"systems where this call is supported. If you are running Vault " +
917				"in a Docker container, provide the IPC_LOCK cap to the container."))
918	}
919
920	metricsHelper, err := c.setupTelemetry(config)
921	if err != nil {
922		c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err))
923		return 1
924	}
925
926	// Initialize the backend
927	factory, exists := c.PhysicalBackends[config.Storage.Type]
928	if !exists {
929		c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type))
930		return 1
931	}
932
933	// Do any custom configuration needed per backend
934	switch config.Storage.Type {
935	case "consul":
936		if config.ServiceRegistration == nil {
937			// If Consul is configured for storage and service registration is unconfigured,
938			// use Consul for service registration without requiring additional configuration.
939			// This maintains backward-compatibility.
940			config.ServiceRegistration = &server.ServiceRegistration{
941				Type:   "consul",
942				Config: config.Storage.Config,
943			}
944		}
945	case "raft":
946		if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" {
947			config.ClusterAddr = envCA
948		}
949		if len(config.ClusterAddr) == 0 {
950			c.UI.Error("Cluster address must be set when using raft storage")
951			return 1
952		}
953	}
954
955	namedStorageLogger := c.logger.Named("storage." + config.Storage.Type)
956	allLoggers = append(allLoggers, namedStorageLogger)
957	backend, err := factory(config.Storage.Config, namedStorageLogger)
958	if err != nil {
959		c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", config.Storage.Type, err))
960		return 1
961	}
962
963	// Prevent server startup if migration is active
964	if c.storageMigrationActive(backend) {
965		return 1
966	}
967
968	// Instantiate the wait group
969	c.WaitGroup = &sync.WaitGroup{}
970
971	// Initialize the Service Discovery, if there is one
972	var configSR sr.ServiceRegistration
973	if config.ServiceRegistration != nil {
974		sdFactory, ok := c.ServiceRegistrations[config.ServiceRegistration.Type]
975		if !ok {
976			c.UI.Error(fmt.Sprintf("Unknown service_registration type %s", config.ServiceRegistration.Type))
977			return 1
978		}
979
980		namedSDLogger := c.logger.Named("service_registration." + config.ServiceRegistration.Type)
981		allLoggers = append(allLoggers, namedSDLogger)
982
983		// Since we haven't even begun starting Vault's core yet,
984		// we know that Vault is in its pre-running state.
985		state := sr.State{
986			VaultVersion:         version.GetVersion().VersionNumber(),
987			IsInitialized:        false,
988			IsSealed:             true,
989			IsActive:             false,
990			IsPerformanceStandby: false,
991		}
992		configSR, err = sdFactory(config.ServiceRegistration.Config, namedSDLogger, state, config.Storage.RedirectAddr)
993		if err != nil {
994			c.UI.Error(fmt.Sprintf("Error initializing service_registration of type %s: %s", config.ServiceRegistration.Type, err))
995			return 1
996		}
997		if err := configSR.Run(c.ShutdownCh, c.WaitGroup); err != nil {
998			c.UI.Error(fmt.Sprintf("Error running service_registration of type %s: %s", config.ServiceRegistration.Type, err))
999			return 1
1000		}
1001	}
1002
1003	infoKeys := make([]string, 0, 10)
1004	info := make(map[string]string)
1005	info["log level"] = logLevelString
1006	infoKeys = append(infoKeys, "log level")
1007
1008	var barrierSeal vault.Seal
1009	var unwrapSeal vault.Seal
1010
1011	var sealConfigError error
1012	if c.flagDevAutoSeal {
1013		barrierSeal = vault.NewAutoSeal(vaultseal.NewTestSeal(nil))
1014	} else {
1015		// Handle the case where no seal is provided
1016		switch len(config.Seals) {
1017		case 0:
1018			config.Seals = append(config.Seals, &server.Seal{Type: wrapping.Shamir})
1019		case 1:
1020			// If there's only one seal and it's disabled assume they want to
1021			// migrate to a shamir seal and simply didn't provide it
1022			if config.Seals[0].Disabled {
1023				config.Seals = append(config.Seals, &server.Seal{Type: wrapping.Shamir})
1024			}
1025		}
1026		for _, configSeal := range config.Seals {
1027			sealType := wrapping.Shamir
1028			if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" {
1029				sealType = os.Getenv("VAULT_SEAL_TYPE")
1030				configSeal.Type = sealType
1031			} else {
1032				sealType = configSeal.Type
1033			}
1034
1035			var seal vault.Seal
1036			sealLogger := c.logger.Named(sealType)
1037			allLoggers = append(allLoggers, sealLogger)
1038			seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal(&vaultseal.Access{
1039				Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
1040					Logger: c.logger.Named("shamir"),
1041				}),
1042			}))
1043			if sealConfigError != nil {
1044				if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) {
1045					c.UI.Error(fmt.Sprintf(
1046						"Error parsing Seal configuration: %s", sealConfigError))
1047					return 1
1048				}
1049			}
1050			if seal == nil {
1051				c.UI.Error(fmt.Sprintf(
1052					"After configuring seal nil returned, seal type was %s", sealType))
1053				return 1
1054			}
1055
1056			if configSeal.Disabled {
1057				unwrapSeal = seal
1058			} else {
1059				barrierSeal = seal
1060			}
1061
1062			// Ensure that the seal finalizer is called, even if using verify-only
1063			defer func() {
1064				err = seal.Finalize(context.Background())
1065				if err != nil {
1066					c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err))
1067				}
1068			}()
1069
1070		}
1071	}
1072
1073	if barrierSeal == nil {
1074		c.UI.Error(fmt.Sprintf("Could not create barrier seal! Most likely proper Seal configuration information was not set, but no error was generated."))
1075		return 1
1076	}
1077
1078	// prepare a secure random reader for core
1079	secureRandomReader, err := createSecureRandomReaderFunc(config, &barrierSeal)
1080	if err != nil {
1081		c.UI.Error(err.Error())
1082		return 1
1083	}
1084
1085	coreConfig := &vault.CoreConfig{
1086		RawConfig:                 config,
1087		Physical:                  backend,
1088		RedirectAddr:              config.Storage.RedirectAddr,
1089		StorageType:               config.Storage.Type,
1090		HAPhysical:                nil,
1091		ServiceRegistration:       configSR,
1092		Seal:                      barrierSeal,
1093		AuditBackends:             c.AuditBackends,
1094		CredentialBackends:        c.CredentialBackends,
1095		LogicalBackends:           c.LogicalBackends,
1096		Logger:                    c.logger,
1097		DisableCache:              config.DisableCache,
1098		DisableMlock:              config.DisableMlock,
1099		MaxLeaseTTL:               config.MaxLeaseTTL,
1100		DefaultLeaseTTL:           config.DefaultLeaseTTL,
1101		ClusterName:               config.ClusterName,
1102		CacheSize:                 config.CacheSize,
1103		PluginDirectory:           config.PluginDirectory,
1104		EnableUI:                  config.EnableUI,
1105		EnableRaw:                 config.EnableRawEndpoint,
1106		DisableSealWrap:           config.DisableSealWrap,
1107		DisablePerformanceStandby: config.DisablePerformanceStandby,
1108		DisableIndexing:           config.DisableIndexing,
1109		AllLoggers:                allLoggers,
1110		BuiltinRegistry:           builtinplugins.Registry,
1111		DisableKeyEncodingChecks:  config.DisablePrintableCheck,
1112		MetricsHelper:             metricsHelper,
1113		SecureRandomReader:        secureRandomReader,
1114	}
1115	if c.flagDev {
1116		coreConfig.DevToken = c.flagDevRootTokenID
1117		if c.flagDevLeasedKV {
1118			coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory
1119		}
1120		if c.flagDevPluginDir != "" {
1121			coreConfig.PluginDirectory = c.flagDevPluginDir
1122		}
1123		if c.flagDevLatency > 0 {
1124			injectLatency := time.Duration(c.flagDevLatency) * time.Millisecond
1125			if _, txnOK := backend.(physical.Transactional); txnOK {
1126				coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger)
1127			} else {
1128				coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger)
1129			}
1130		}
1131	}
1132
1133	if c.flagDevThreeNode {
1134		return c.enableThreeNodeDevCluster(coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
1135	}
1136
1137	if c.flagDevFourCluster {
1138		return enableFourClusterDev(c, coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
1139	}
1140
1141	var disableClustering bool
1142
1143	// Initialize the separate HA storage backend, if it exists
1144	var ok bool
1145	if config.HAStorage != nil {
1146		// TODO: Remove when Raft can server as the ha_storage backend.
1147		// See https://github.com/hashicorp/vault/issues/8206
1148		if config.HAStorage.Type == "raft" {
1149			c.UI.Error("Raft cannot be used as seperate HA storage at this time")
1150			return 1
1151		}
1152		factory, exists := c.PhysicalBackends[config.HAStorage.Type]
1153		if !exists {
1154			c.UI.Error(fmt.Sprintf("Unknown HA storage type %s", config.HAStorage.Type))
1155			return 1
1156
1157		}
1158		habackend, err := factory(config.HAStorage.Config, c.logger)
1159		if err != nil {
1160			c.UI.Error(fmt.Sprintf(
1161				"Error initializing HA storage of type %s: %s", config.HAStorage.Type, err))
1162			return 1
1163
1164		}
1165
1166		if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
1167			c.UI.Error("Specified HA storage does not support HA")
1168			return 1
1169		}
1170
1171		if !coreConfig.HAPhysical.HAEnabled() {
1172			c.UI.Error("Specified HA storage has HA support disabled; please consult documentation")
1173			return 1
1174		}
1175
1176		coreConfig.RedirectAddr = config.HAStorage.RedirectAddr
1177		disableClustering = config.HAStorage.DisableClustering
1178		if !disableClustering {
1179			coreConfig.ClusterAddr = config.HAStorage.ClusterAddr
1180		}
1181	} else {
1182		if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
1183			coreConfig.RedirectAddr = config.Storage.RedirectAddr
1184			disableClustering = config.Storage.DisableClustering
1185			if !disableClustering {
1186				coreConfig.ClusterAddr = config.Storage.ClusterAddr
1187			}
1188		}
1189	}
1190
1191	if envRA := os.Getenv("VAULT_API_ADDR"); envRA != "" {
1192		coreConfig.RedirectAddr = envRA
1193	} else if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" {
1194		coreConfig.RedirectAddr = envRA
1195	} else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" {
1196		coreConfig.RedirectAddr = envAA
1197	}
1198
1199	// Attempt to detect the redirect address, if possible
1200	if coreConfig.RedirectAddr == "" {
1201		c.logger.Warn("no `api_addr` value specified in config or in VAULT_API_ADDR; falling back to detection if possible, but this value should be manually set")
1202	}
1203	var detect physical.RedirectDetect
1204	if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
1205		detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect)
1206	} else {
1207		detect, ok = coreConfig.Physical.(physical.RedirectDetect)
1208	}
1209	if ok && coreConfig.RedirectAddr == "" {
1210		redirect, err := c.detectRedirect(detect, config)
1211		if err != nil {
1212			c.UI.Error(fmt.Sprintf("Error detecting api address: %s", err))
1213		} else if redirect == "" {
1214			c.UI.Error("Failed to detect api address")
1215		} else {
1216			coreConfig.RedirectAddr = redirect
1217		}
1218	}
1219	if coreConfig.RedirectAddr == "" && c.flagDev {
1220		coreConfig.RedirectAddr = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"])
1221	}
1222
1223	// After the redirect bits are sorted out, if no cluster address was
1224	// explicitly given, derive one from the redirect addr
1225	if disableClustering {
1226		coreConfig.ClusterAddr = ""
1227	} else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" {
1228		coreConfig.ClusterAddr = envCA
1229	} else {
1230		var addrToUse string
1231		switch {
1232		case coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "":
1233			addrToUse = coreConfig.RedirectAddr
1234		case c.flagDev:
1235			addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"])
1236		default:
1237			goto CLUSTER_SYNTHESIS_COMPLETE
1238		}
1239		u, err := url.ParseRequestURI(addrToUse)
1240		if err != nil {
1241			c.UI.Error(fmt.Sprintf(
1242				"Error parsing synthesized cluster address %s: %v", addrToUse, err))
1243			return 1
1244		}
1245		host, port, err := net.SplitHostPort(u.Host)
1246		if err != nil {
1247			// This sucks, as it's a const in the function but not exported in the package
1248			if strings.Contains(err.Error(), "missing port in address") {
1249				host = u.Host
1250				port = "443"
1251			} else {
1252				c.UI.Error(fmt.Sprintf("Error parsing api address: %v", err))
1253				return 1
1254			}
1255		}
1256		nPort, err := strconv.Atoi(port)
1257		if err != nil {
1258			c.UI.Error(fmt.Sprintf(
1259				"Error parsing synthesized address; failed to convert %q to a numeric: %v", port, err))
1260			return 1
1261		}
1262		u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1))
1263		// Will always be TLS-secured
1264		u.Scheme = "https"
1265		coreConfig.ClusterAddr = u.String()
1266	}
1267
1268CLUSTER_SYNTHESIS_COMPLETE:
1269
1270	if coreConfig.RedirectAddr == coreConfig.ClusterAddr && len(coreConfig.RedirectAddr) != 0 {
1271		c.UI.Error(fmt.Sprintf(
1272			"Address %q used for both API and cluster addresses", coreConfig.RedirectAddr))
1273		return 1
1274	}
1275
1276	if coreConfig.ClusterAddr != "" {
1277		// Force https as we'll always be TLS-secured
1278		u, err := url.ParseRequestURI(coreConfig.ClusterAddr)
1279		if err != nil {
1280			c.UI.Error(fmt.Sprintf("Error parsing cluster address %s: %v", coreConfig.ClusterAddr, err))
1281			return 11
1282		}
1283		u.Scheme = "https"
1284		coreConfig.ClusterAddr = u.String()
1285	}
1286
1287	// Override the UI enabling config by the environment variable
1288	if enableUI := os.Getenv("VAULT_UI"); enableUI != "" {
1289		var err error
1290		coreConfig.EnableUI, err = strconv.ParseBool(enableUI)
1291		if err != nil {
1292			c.UI.Output("Error parsing the environment variable VAULT_UI")
1293			return 1
1294		}
1295	}
1296
1297	// If ServiceRegistration is configured, then the backend must support HA
1298	isBackendHA := coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled()
1299	if !c.flagDev && (coreConfig.ServiceRegistration != nil) && !isBackendHA {
1300		c.UI.Output("service_registration is configured, but storage does not support HA")
1301		return 1
1302	}
1303
1304	// Apply any enterprise configuration onto the coreConfig.
1305	adjustCoreConfigForEnt(config, coreConfig)
1306
1307	// Initialize the core
1308	core, newCoreError := vault.NewCore(coreConfig)
1309	if newCoreError != nil {
1310		if vault.IsFatalError(newCoreError) {
1311			c.UI.Error(fmt.Sprintf("Error initializing core: %s", newCoreError))
1312			return 1
1313		}
1314	}
1315
1316	// Copy the reload funcs pointers back
1317	c.reloadFuncs = coreConfig.ReloadFuncs
1318	c.reloadFuncsLock = coreConfig.ReloadFuncsLock
1319
1320	// Compile server information for output later
1321	info["storage"] = config.Storage.Type
1322	info["mlock"] = fmt.Sprintf(
1323		"supported: %v, enabled: %v",
1324		mlock.Supported(), !config.DisableMlock && mlock.Supported())
1325	infoKeys = append(infoKeys, "mlock", "storage")
1326
1327	if coreConfig.ClusterAddr != "" {
1328		info["cluster address"] = coreConfig.ClusterAddr
1329		infoKeys = append(infoKeys, "cluster address")
1330	}
1331	if coreConfig.RedirectAddr != "" {
1332		info["api address"] = coreConfig.RedirectAddr
1333		infoKeys = append(infoKeys, "api address")
1334	}
1335
1336	if config.HAStorage != nil {
1337		info["HA storage"] = config.HAStorage.Type
1338		infoKeys = append(infoKeys, "HA storage")
1339	} else {
1340		// If the storage supports HA, then note it
1341		if coreConfig.HAPhysical != nil {
1342			if coreConfig.HAPhysical.HAEnabled() {
1343				info["storage"] += " (HA available)"
1344			} else {
1345				info["storage"] += " (HA disabled)"
1346			}
1347		}
1348	}
1349
1350	clusterAddrs := []*net.TCPAddr{}
1351
1352	// Initialize the listeners
1353	lns := make([]ServerListener, 0, len(config.Listeners))
1354	c.reloadFuncsLock.Lock()
1355	for i, lnConfig := range config.Listeners {
1356		ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, c.gatedWriter, c.UI)
1357		if err != nil {
1358			c.UI.Error(fmt.Sprintf("Error initializing listener of type %s: %s", lnConfig.Type, err))
1359			return 1
1360		}
1361
1362		if reloadFunc != nil {
1363			relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type]
1364			relSlice = append(relSlice, reloadFunc)
1365			(*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice
1366		}
1367
1368		if !disableClustering && lnConfig.Type == "tcp" {
1369			var addrRaw interface{}
1370			var addr string
1371			var ok bool
1372			if addrRaw, ok = lnConfig.Config["cluster_address"]; ok {
1373				addr = addrRaw.(string)
1374				tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
1375				if err != nil {
1376					c.UI.Error(fmt.Sprintf("Error resolving cluster_address: %s", err))
1377					return 1
1378				}
1379				clusterAddrs = append(clusterAddrs, tcpAddr)
1380			} else {
1381				tcpAddr, ok := ln.Addr().(*net.TCPAddr)
1382				if !ok {
1383					c.UI.Error("Failed to parse tcp listener")
1384					return 1
1385				}
1386				clusterAddr := &net.TCPAddr{
1387					IP:   tcpAddr.IP,
1388					Port: tcpAddr.Port + 1,
1389				}
1390				clusterAddrs = append(clusterAddrs, clusterAddr)
1391				addr = clusterAddr.String()
1392			}
1393			props["cluster address"] = addr
1394		}
1395
1396		var maxRequestSize int64 = vaulthttp.DefaultMaxRequestSize
1397		if valRaw, ok := lnConfig.Config["max_request_size"]; ok {
1398			val, err := parseutil.ParseInt(valRaw)
1399			if err != nil {
1400				c.UI.Error(fmt.Sprintf("Could not parse max_request_size value %v", valRaw))
1401				return 1
1402			}
1403
1404			if val >= 0 {
1405				maxRequestSize = val
1406			}
1407		}
1408		props["max_request_size"] = fmt.Sprintf("%d", maxRequestSize)
1409
1410		maxRequestDuration := vault.DefaultMaxRequestDuration
1411		if valRaw, ok := lnConfig.Config["max_request_duration"]; ok {
1412			val, err := parseutil.ParseDurationSecond(valRaw)
1413			if err != nil {
1414				c.UI.Error(fmt.Sprintf("Could not parse max_request_duration value %v", valRaw))
1415				return 1
1416			}
1417
1418			if val >= 0 {
1419				maxRequestDuration = val
1420			}
1421		}
1422		props["max_request_duration"] = fmt.Sprintf("%s", maxRequestDuration.String())
1423
1424		var unauthenticatedMetricsAccess bool
1425		if telemetryRaw, ok := lnConfig.Config["telemetry"]; ok {
1426			telemetry, ok := telemetryRaw.([]map[string]interface{})
1427			if !ok {
1428				c.UI.Error(fmt.Sprintf("Could not parse telemetry sink value %v", telemetryRaw))
1429				return 1
1430			}
1431
1432			for _, item := range telemetry {
1433				if valRaw, ok := item["unauthenticated_metrics_access"]; ok {
1434					unauthenticatedMetricsAccess, err = parseutil.ParseBool(valRaw)
1435					if err != nil {
1436						c.UI.Error(fmt.Sprintf("Could not parse unauthenticated_metrics_access value %v", valRaw))
1437						return 1
1438					}
1439				}
1440			}
1441		}
1442
1443		lns = append(lns, ServerListener{
1444			Listener:                     ln,
1445			config:                       lnConfig.Config,
1446			maxRequestSize:               maxRequestSize,
1447			maxRequestDuration:           maxRequestDuration,
1448			unauthenticatedMetricsAccess: unauthenticatedMetricsAccess,
1449		})
1450
1451		// Store the listener props for output later
1452		key := fmt.Sprintf("listener %d", i+1)
1453		propsList := make([]string, 0, len(props))
1454		for k, v := range props {
1455			propsList = append(propsList, fmt.Sprintf(
1456				"%s: %q", k, v))
1457		}
1458		sort.Strings(propsList)
1459		infoKeys = append(infoKeys, key)
1460		info[key] = fmt.Sprintf(
1461			"%s (%s)", lnConfig.Type, strings.Join(propsList, ", "))
1462
1463	}
1464	c.reloadFuncsLock.Unlock()
1465	if !disableClustering {
1466		if c.logger.IsDebug() {
1467			c.logger.Debug("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs)
1468		}
1469	}
1470
1471	// Make sure we close all listeners from this point on
1472	listenerCloseFunc := func() {
1473		for _, ln := range lns {
1474			ln.Listener.Close()
1475		}
1476	}
1477
1478	defer c.cleanupGuard.Do(listenerCloseFunc)
1479
1480	infoKeys = append(infoKeys, "version")
1481	verInfo := version.GetVersion()
1482	info["version"] = verInfo.FullVersionNumber(false)
1483	if verInfo.Revision != "" {
1484		info["version sha"] = strings.Trim(verInfo.Revision, "'")
1485		infoKeys = append(infoKeys, "version sha")
1486	}
1487	infoKeys = append(infoKeys, "cgo")
1488	info["cgo"] = "disabled"
1489	if version.CgoEnabled {
1490		info["cgo"] = "enabled"
1491	}
1492
1493	infoKeys = append(infoKeys, "recovery mode")
1494	info["recovery mode"] = "false"
1495
1496	// Server configuration output
1497	padding := 24
1498	sort.Strings(infoKeys)
1499	c.UI.Output("==> Vault server configuration:\n")
1500	for _, k := range infoKeys {
1501		c.UI.Output(fmt.Sprintf(
1502			"%s%s: %s",
1503			strings.Repeat(" ", padding-len(k)),
1504			strings.Title(k),
1505			info[k]))
1506	}
1507	c.UI.Output("")
1508
1509	// Tests might not want to start a vault server and just want to verify
1510	// the configuration.
1511	if c.flagTestVerifyOnly {
1512		return 0
1513	}
1514
1515	// This needs to happen before we first unseal, so before we trigger dev
1516	// mode if it's set
1517	core.SetClusterListenerAddrs(clusterAddrs)
1518	core.SetClusterHandler(vaulthttp.Handler(&vault.HandlerProperties{
1519		Core: core,
1520	}))
1521
1522	// Before unsealing with stored keys, setup seal migration if needed
1523	if err := adjustCoreForSealMigration(c.logger, core, barrierSeal, unwrapSeal); err != nil {
1524		c.UI.Error(err.Error())
1525		return 1
1526	}
1527
1528	// Attempt unsealing in a background goroutine. This is needed for when a
1529	// Vault cluster with multiple servers is configured with auto-unseal but is
1530	// uninitialized. Once one server initializes the storage backend, this
1531	// goroutine will pick up the unseal keys and unseal this instance.
1532	if !core.IsInSealMigration() {
1533		go func() {
1534			for {
1535				err := core.UnsealWithStoredKeys(context.Background())
1536				if err == nil {
1537					return
1538				}
1539
1540				if vault.IsFatalError(err) {
1541					c.logger.Error("error unsealing core", "error", err)
1542					return
1543				} else {
1544					c.logger.Warn("failed to unseal core", "error", err)
1545				}
1546
1547				select {
1548				case <-c.ShutdownCh:
1549					return
1550				case <-time.After(5 * time.Second):
1551				}
1552			}
1553		}()
1554	}
1555
1556	// When the underlying storage is raft, kick off retry join if it was specified
1557	// in the configuration
1558	if config.Storage.Type == "raft" {
1559		if err := core.InitiateRetryJoin(context.Background()); err != nil {
1560			c.UI.Error(fmt.Sprintf("Failed to initiate raft retry join, %q", err.Error()))
1561			return 1
1562		}
1563	}
1564
1565	// Perform initialization of HTTP server after the verifyOnly check.
1566	// If we're in Dev mode, then initialize the core
1567	if c.flagDev && !c.flagDevSkipInit {
1568		init, err := c.enableDev(core, coreConfig)
1569		if err != nil {
1570			c.UI.Error(fmt.Sprintf("Error initializing Dev mode: %s", err))
1571			return 1
1572		}
1573
1574		var plugins, pluginsNotLoaded []string
1575		if c.flagDevPluginDir != "" && c.flagDevPluginInit {
1576
1577			f, err := os.Open(c.flagDevPluginDir)
1578			if err != nil {
1579				c.UI.Error(fmt.Sprintf("Error reading plugin dir: %s", err))
1580				return 1
1581			}
1582
1583			list, err := f.Readdirnames(0)
1584			f.Close()
1585			if err != nil {
1586				c.UI.Error(fmt.Sprintf("Error listing plugins: %s", err))
1587				return 1
1588			}
1589
1590			for _, name := range list {
1591				path := filepath.Join(f.Name(), name)
1592				if err := c.addPlugin(path, init.RootToken, core); err != nil {
1593					if !errwrap.Contains(err, vault.ErrPluginBadType.Error()) {
1594						c.UI.Error(fmt.Sprintf("Error enabling plugin %s: %s", name, err))
1595						return 1
1596					}
1597					pluginsNotLoaded = append(pluginsNotLoaded, name)
1598					continue
1599				}
1600				plugins = append(plugins, name)
1601			}
1602
1603			sort.Strings(plugins)
1604		}
1605
1606		// Print the big dev mode warning!
1607		c.UI.Warn(wrapAtLength(
1608			"WARNING! dev mode is enabled! In this mode, Vault runs entirely " +
1609				"in-memory and starts unsealed with a single unseal key. The root " +
1610				"token is already authenticated to the CLI, so you can immediately " +
1611				"begin using Vault."))
1612		c.UI.Warn("")
1613		c.UI.Warn("You may need to set the following environment variable:")
1614		c.UI.Warn("")
1615
1616		endpointURL := "http://" + config.Listeners[0].Config["address"].(string)
1617		if runtime.GOOS == "windows" {
1618			c.UI.Warn("PowerShell:")
1619			c.UI.Warn(fmt.Sprintf("    $env:VAULT_ADDR=\"%s\"", endpointURL))
1620			c.UI.Warn("cmd.exe:")
1621			c.UI.Warn(fmt.Sprintf("    set VAULT_ADDR=%s", endpointURL))
1622		} else {
1623			c.UI.Warn(fmt.Sprintf("    $ export VAULT_ADDR='%s'", endpointURL))
1624		}
1625
1626		// Unseal key is not returned if stored shares is supported
1627		if len(init.SecretShares) > 0 {
1628			c.UI.Warn("")
1629			c.UI.Warn(wrapAtLength(
1630				"The unseal key and root token are displayed below in case you want " +
1631					"to seal/unseal the Vault or re-authenticate."))
1632			c.UI.Warn("")
1633			c.UI.Warn(fmt.Sprintf("Unseal Key: %s", base64.StdEncoding.EncodeToString(init.SecretShares[0])))
1634		}
1635
1636		if len(init.RecoveryShares) > 0 {
1637			c.UI.Warn("")
1638			c.UI.Warn(wrapAtLength(
1639				"The recovery key and root token are displayed below in case you want " +
1640					"to seal/unseal the Vault or re-authenticate."))
1641			c.UI.Warn("")
1642			c.UI.Warn(fmt.Sprintf("Recovery Key: %s", base64.StdEncoding.EncodeToString(init.RecoveryShares[0])))
1643		}
1644
1645		c.UI.Warn(fmt.Sprintf("Root Token: %s", init.RootToken))
1646
1647		if len(plugins) > 0 {
1648			c.UI.Warn("")
1649			c.UI.Warn(wrapAtLength(
1650				"The following dev plugins are registered in the catalog:"))
1651			for _, p := range plugins {
1652				c.UI.Warn(fmt.Sprintf("    - %s", p))
1653			}
1654		}
1655
1656		if len(pluginsNotLoaded) > 0 {
1657			c.UI.Warn("")
1658			c.UI.Warn(wrapAtLength(
1659				"The following dev plugins FAILED to be registered in the catalog due to unknown type:"))
1660			for _, p := range pluginsNotLoaded {
1661				c.UI.Warn(fmt.Sprintf("    - %s", p))
1662			}
1663		}
1664
1665		c.UI.Warn("")
1666		c.UI.Warn(wrapAtLength(
1667			"Development mode should NOT be used in production installations!"))
1668		c.UI.Warn("")
1669	}
1670
1671	// Initialize the HTTP servers
1672	for _, ln := range lns {
1673		handler := vaulthttp.Handler(&vault.HandlerProperties{
1674			Core:                         core,
1675			MaxRequestSize:               ln.maxRequestSize,
1676			MaxRequestDuration:           ln.maxRequestDuration,
1677			DisablePrintableCheck:        config.DisablePrintableCheck,
1678			UnauthenticatedMetricsAccess: ln.unauthenticatedMetricsAccess,
1679			RecoveryMode:                 c.flagRecovery,
1680		})
1681
1682		// We perform validation on the config earlier, we can just cast here
1683		if _, ok := ln.config["x_forwarded_for_authorized_addrs"]; ok {
1684			hopSkips := ln.config["x_forwarded_for_hop_skips"].(int)
1685			authzdAddrs := ln.config["x_forwarded_for_authorized_addrs"].([]*sockaddr.SockAddrMarshaler)
1686			rejectNotPresent := ln.config["x_forwarded_for_reject_not_present"].(bool)
1687			rejectNonAuthz := ln.config["x_forwarded_for_reject_not_authorized"].(bool)
1688			if len(authzdAddrs) > 0 {
1689				handler = vaulthttp.WrapForwardedForHandler(handler, authzdAddrs, rejectNotPresent, rejectNonAuthz, hopSkips)
1690			}
1691		}
1692
1693		// server defaults
1694		server := &http.Server{
1695			Handler:           handler,
1696			ReadHeaderTimeout: 10 * time.Second,
1697			ReadTimeout:       30 * time.Second,
1698			IdleTimeout:       5 * time.Minute,
1699			ErrorLog:          c.logger.StandardLogger(nil),
1700		}
1701
1702		// override server defaults with config values for read/write/idle timeouts if configured
1703		if readHeaderTimeoutInterface, ok := ln.config["http_read_header_timeout"]; ok {
1704			readHeaderTimeout, err := parseutil.ParseDurationSecond(readHeaderTimeoutInterface)
1705			if err != nil {
1706				c.UI.Error(fmt.Sprintf("Could not parse a time value for http_read_header_timeout %v", readHeaderTimeout))
1707				return 1
1708			}
1709			server.ReadHeaderTimeout = readHeaderTimeout
1710		}
1711
1712		if readTimeoutInterface, ok := ln.config["http_read_timeout"]; ok {
1713			readTimeout, err := parseutil.ParseDurationSecond(readTimeoutInterface)
1714			if err != nil {
1715				c.UI.Error(fmt.Sprintf("Could not parse a time value for http_read_timeout %v", readTimeout))
1716				return 1
1717			}
1718			server.ReadTimeout = readTimeout
1719		}
1720
1721		if writeTimeoutInterface, ok := ln.config["http_write_timeout"]; ok {
1722			writeTimeout, err := parseutil.ParseDurationSecond(writeTimeoutInterface)
1723			if err != nil {
1724				c.UI.Error(fmt.Sprintf("Could not parse a time value for http_write_timeout %v", writeTimeout))
1725				return 1
1726			}
1727			server.WriteTimeout = writeTimeout
1728		}
1729
1730		if idleTimeoutInterface, ok := ln.config["http_idle_timeout"]; ok {
1731			idleTimeout, err := parseutil.ParseDurationSecond(idleTimeoutInterface)
1732			if err != nil {
1733				c.UI.Error(fmt.Sprintf("Could not parse a time value for http_idle_timeout %v", idleTimeout))
1734				return 1
1735			}
1736			server.IdleTimeout = idleTimeout
1737		}
1738
1739		// server config tests can exit now
1740		if c.flagTestServerConfig {
1741			continue
1742		}
1743
1744		go server.Serve(ln.Listener)
1745	}
1746
1747	if c.flagTestServerConfig {
1748		return 0
1749	}
1750
1751	if sealConfigError != nil {
1752		init, err := core.Initialized(context.Background())
1753		if err != nil {
1754			c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err))
1755			return 1
1756		}
1757		if init {
1758			c.UI.Error("Vault is initialized but no Seal key could be loaded")
1759			return 1
1760		}
1761	}
1762
1763	if newCoreError != nil {
1764		c.UI.Warn(wrapAtLength(
1765			"WARNING! A non-fatal error occurred during initialization. Please " +
1766				"check the logs for more information."))
1767		c.UI.Warn("")
1768	}
1769
1770	// Output the header that the server has started
1771	if !c.flagCombineLogs {
1772		c.UI.Output("==> Vault server started! Log data will stream in below:\n")
1773	}
1774
1775	// Inform any tests that the server is ready
1776	select {
1777	case c.startedCh <- struct{}{}:
1778	default:
1779	}
1780
1781	// Release the log gate.
1782	c.logger.(hclog.OutputResettable).ResetOutputWithFlush(&hclog.LoggerOptions{
1783		Output: c.logOutput,
1784	}, c.gatedWriter)
1785
1786	// Write out the PID to the file now that server has successfully started
1787	if err := c.storePidFile(config.PidFile); err != nil {
1788		c.UI.Error(fmt.Sprintf("Error storing PID: %s", err))
1789		return 1
1790	}
1791
1792	defer func() {
1793		if err := c.removePidFile(config.PidFile); err != nil {
1794			c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err))
1795		}
1796	}()
1797
1798	var coreShutdownDoneCh <-chan struct{}
1799	if c.flagExitOnCoreShutdown {
1800		coreShutdownDoneCh = core.ShutdownDone()
1801	}
1802
1803	// Wait for shutdown
1804	shutdownTriggered := false
1805	retCode := 0
1806
1807	for !shutdownTriggered {
1808		select {
1809		case <-coreShutdownDoneCh:
1810			c.UI.Output("==> Vault core was shut down")
1811			retCode = 1
1812			shutdownTriggered = true
1813		case <-c.ShutdownCh:
1814			c.UI.Output("==> Vault shutdown triggered")
1815			shutdownTriggered = true
1816		case <-c.SighupCh:
1817			c.UI.Output("==> Vault reload triggered")
1818
1819			// Check for new log level
1820			var config *server.Config
1821			var level log.Level
1822			for _, path := range c.flagConfigs {
1823				current, err := server.LoadConfig(path)
1824				if err != nil {
1825					c.logger.Error("could not reload config", "path", path, "error", err)
1826					goto RUNRELOADFUNCS
1827				}
1828
1829				if config == nil {
1830					config = current
1831				} else {
1832					config = config.Merge(current)
1833				}
1834			}
1835
1836			// Ensure at least one config was found.
1837			if config == nil {
1838				c.logger.Error("no config found at reload time")
1839				goto RUNRELOADFUNCS
1840			}
1841
1842			core.SetConfig(config)
1843
1844			if config.LogLevel != "" {
1845				configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel))
1846				switch configLogLevel {
1847				case "trace":
1848					level = log.Trace
1849				case "debug":
1850					level = log.Debug
1851				case "notice", "info", "":
1852					level = log.Info
1853				case "warn", "warning":
1854					level = log.Warn
1855				case "err", "error":
1856					level = log.Error
1857				default:
1858					c.logger.Error("unknown log level found on reload", "level", config.LogLevel)
1859					goto RUNRELOADFUNCS
1860				}
1861				core.SetLogLevel(level)
1862			}
1863
1864		RUNRELOADFUNCS:
1865			if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs); err != nil {
1866				c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
1867			}
1868
1869		case <-c.SigUSR2Ch:
1870			buf := make([]byte, 32*1024*1024)
1871			n := runtime.Stack(buf[:], true)
1872			c.logger.Info("goroutine trace", "stack", string(buf[:n]))
1873		}
1874	}
1875
1876	// Stop the listeners so that we don't process further client requests.
1877	c.cleanupGuard.Do(listenerCloseFunc)
1878
1879	// Shutdown will wait until after Vault is sealed, which means the
1880	// request forwarding listeners will also be closed (and also
1881	// waited for).
1882	if err := core.Shutdown(); err != nil {
1883		c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err))
1884	}
1885
1886	// Wait for dependent goroutines to complete
1887	c.WaitGroup.Wait()
1888	return retCode
1889}
1890
1891func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig) (*vault.InitResult, error) {
1892	ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
1893
1894	var recoveryConfig *vault.SealConfig
1895	barrierConfig := &vault.SealConfig{
1896		SecretShares:    1,
1897		SecretThreshold: 1,
1898	}
1899
1900	if core.SealAccess().RecoveryKeySupported() {
1901		recoveryConfig = &vault.SealConfig{
1902			SecretShares:    1,
1903			SecretThreshold: 1,
1904		}
1905	}
1906
1907	if core.SealAccess().StoredKeysSupported() != vaultseal.StoredKeysNotSupported {
1908		barrierConfig.StoredShares = 1
1909	}
1910
1911	// Initialize it with a basic single key
1912	init, err := core.Initialize(ctx, &vault.InitParams{
1913		BarrierConfig:  barrierConfig,
1914		RecoveryConfig: recoveryConfig,
1915	})
1916	if err != nil {
1917		return nil, err
1918	}
1919
1920	// Handle unseal with stored keys
1921	if core.SealAccess().StoredKeysSupported() == vaultseal.StoredKeysSupportedGeneric {
1922		err := core.UnsealWithStoredKeys(ctx)
1923		if err != nil {
1924			return nil, err
1925		}
1926	} else {
1927		// Copy the key so that it can be zeroed
1928		key := make([]byte, len(init.SecretShares[0]))
1929		copy(key, init.SecretShares[0])
1930
1931		// Unseal the core
1932		unsealed, err := core.Unseal(key)
1933		if err != nil {
1934			return nil, err
1935		}
1936		if !unsealed {
1937			return nil, fmt.Errorf("failed to unseal Vault for dev mode")
1938		}
1939	}
1940
1941	isLeader, _, _, err := core.Leader()
1942	if err != nil && err != vault.ErrHANotEnabled {
1943		return nil, errwrap.Wrapf("failed to check active status: {{err}}", err)
1944	}
1945	if err == nil {
1946		leaderCount := 5
1947		for !isLeader {
1948			if leaderCount == 0 {
1949				buf := make([]byte, 1<<16)
1950				runtime.Stack(buf, true)
1951				return nil, fmt.Errorf("failed to get active status after five seconds; call stack is\n%s\n", buf)
1952			}
1953			time.Sleep(1 * time.Second)
1954			isLeader, _, _, err = core.Leader()
1955			if err != nil {
1956				return nil, errwrap.Wrapf("failed to check active status: {{err}}", err)
1957			}
1958			leaderCount--
1959		}
1960	}
1961
1962	// Generate a dev root token if one is provided in the flag
1963	if coreConfig.DevToken != "" {
1964		req := &logical.Request{
1965			ID:          "dev-gen-root",
1966			Operation:   logical.UpdateOperation,
1967			ClientToken: init.RootToken,
1968			Path:        "auth/token/create",
1969			Data: map[string]interface{}{
1970				"id":                coreConfig.DevToken,
1971				"policies":          []string{"root"},
1972				"no_parent":         true,
1973				"no_default_policy": true,
1974			},
1975		}
1976		resp, err := core.HandleRequest(ctx, req)
1977		if err != nil {
1978			return nil, errwrap.Wrapf(fmt.Sprintf("failed to create root token with ID %q: {{err}}", coreConfig.DevToken), err)
1979		}
1980		if resp == nil {
1981			return nil, fmt.Errorf("nil response when creating root token with ID %q", coreConfig.DevToken)
1982		}
1983		if resp.Auth == nil {
1984			return nil, fmt.Errorf("nil auth when creating root token with ID %q", coreConfig.DevToken)
1985		}
1986
1987		init.RootToken = resp.Auth.ClientToken
1988
1989		req.ID = "dev-revoke-init-root"
1990		req.Path = "auth/token/revoke-self"
1991		req.Data = nil
1992		resp, err = core.HandleRequest(ctx, req)
1993		if err != nil {
1994			return nil, errwrap.Wrapf("failed to revoke initial root token: {{err}}", err)
1995		}
1996	}
1997
1998	// Set the token
1999	if !c.flagDevNoStoreToken {
2000		tokenHelper, err := c.TokenHelper()
2001		if err != nil {
2002			return nil, err
2003		}
2004		if err := tokenHelper.Store(init.RootToken); err != nil {
2005			return nil, err
2006		}
2007	}
2008
2009	kvVer := "2"
2010	if c.flagDevKVV1 || c.flagDevLeasedKV {
2011		kvVer = "1"
2012	}
2013	req := &logical.Request{
2014		Operation:   logical.UpdateOperation,
2015		ClientToken: init.RootToken,
2016		Path:        "sys/mounts/secret",
2017		Data: map[string]interface{}{
2018			"type":        "kv",
2019			"path":        "secret/",
2020			"description": "key/value secret storage",
2021			"options": map[string]string{
2022				"version": kvVer,
2023			},
2024		},
2025	}
2026	resp, err := core.HandleRequest(ctx, req)
2027	if err != nil {
2028		return nil, errwrap.Wrapf("error creating default K/V store: {{err}}", err)
2029	}
2030	if resp.IsError() {
2031		return nil, errwrap.Wrapf("failed to create default K/V store: {{err}}", resp.Error())
2032	}
2033
2034	return init, nil
2035}
2036
2037func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int {
2038	testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{
2039		HandlerFunc:       vaulthttp.Handler,
2040		BaseListenAddress: c.flagDevListenAddr,
2041		Logger:            c.logger,
2042		TempDir:           tempDir,
2043	})
2044	defer c.cleanupGuard.Do(testCluster.Cleanup)
2045
2046	info["cluster parameters path"] = testCluster.TempDir
2047	infoKeys = append(infoKeys, "cluster parameters path")
2048
2049	for i, core := range testCluster.Cores {
2050		info[fmt.Sprintf("node %d api address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String())
2051		infoKeys = append(infoKeys, fmt.Sprintf("node %d api address", i))
2052	}
2053
2054	infoKeys = append(infoKeys, "version")
2055	verInfo := version.GetVersion()
2056	info["version"] = verInfo.FullVersionNumber(false)
2057	if verInfo.Revision != "" {
2058		info["version sha"] = strings.Trim(verInfo.Revision, "'")
2059		infoKeys = append(infoKeys, "version sha")
2060	}
2061	infoKeys = append(infoKeys, "cgo")
2062	info["cgo"] = "disabled"
2063	if version.CgoEnabled {
2064		info["cgo"] = "enabled"
2065	}
2066
2067	// Server configuration output
2068	padding := 24
2069	sort.Strings(infoKeys)
2070	c.UI.Output("==> Vault server configuration:\n")
2071	for _, k := range infoKeys {
2072		c.UI.Output(fmt.Sprintf(
2073			"%s%s: %s",
2074			strings.Repeat(" ", padding-len(k)),
2075			strings.Title(k),
2076			info[k]))
2077	}
2078	c.UI.Output("")
2079
2080	for _, core := range testCluster.Cores {
2081		core.Server.Handler = vaulthttp.Handler(&vault.HandlerProperties{
2082			Core: core.Core,
2083		})
2084		core.SetClusterHandler(core.Server.Handler)
2085	}
2086
2087	testCluster.Start()
2088
2089	ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
2090
2091	if base.DevToken != "" {
2092		req := &logical.Request{
2093			ID:          "dev-gen-root",
2094			Operation:   logical.UpdateOperation,
2095			ClientToken: testCluster.RootToken,
2096			Path:        "auth/token/create",
2097			Data: map[string]interface{}{
2098				"id":                base.DevToken,
2099				"policies":          []string{"root"},
2100				"no_parent":         true,
2101				"no_default_policy": true,
2102			},
2103		}
2104		resp, err := testCluster.Cores[0].HandleRequest(ctx, req)
2105		if err != nil {
2106			c.UI.Error(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err))
2107			return 1
2108		}
2109		if resp == nil {
2110			c.UI.Error(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken))
2111			return 1
2112		}
2113		if resp.Auth == nil {
2114			c.UI.Error(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken))
2115			return 1
2116		}
2117
2118		testCluster.RootToken = resp.Auth.ClientToken
2119
2120		req.ID = "dev-revoke-init-root"
2121		req.Path = "auth/token/revoke-self"
2122		req.Data = nil
2123		resp, err = testCluster.Cores[0].HandleRequest(ctx, req)
2124		if err != nil {
2125			c.UI.Output(fmt.Sprintf("failed to revoke initial root token: %s", err))
2126			return 1
2127		}
2128	}
2129
2130	// Set the token
2131	tokenHelper, err := c.TokenHelper()
2132	if err != nil {
2133		c.UI.Error(fmt.Sprintf("Error getting token helper: %s", err))
2134		return 1
2135	}
2136	if err := tokenHelper.Store(testCluster.RootToken); err != nil {
2137		c.UI.Error(fmt.Sprintf("Error storing in token helper: %s", err))
2138		return 1
2139	}
2140
2141	if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0755); err != nil {
2142		c.UI.Error(fmt.Sprintf("Error writing token to tempfile: %s", err))
2143		return 1
2144	}
2145
2146	c.UI.Output(fmt.Sprintf(
2147		"==> Three node dev mode is enabled\n\n" +
2148			"The unseal key and root token are reproduced below in case you\n" +
2149			"want to seal/unseal the Vault or play with authentication.\n",
2150	))
2151
2152	for i, key := range testCluster.BarrierKeys {
2153		c.UI.Output(fmt.Sprintf(
2154			"Unseal Key %d: %s",
2155			i+1, base64.StdEncoding.EncodeToString(key),
2156		))
2157	}
2158
2159	c.UI.Output(fmt.Sprintf(
2160		"\nRoot Token: %s\n", testCluster.RootToken,
2161	))
2162
2163	c.UI.Output(fmt.Sprintf(
2164		"\nUseful env vars:\n"+
2165			"VAULT_TOKEN=%s\n"+
2166			"VAULT_ADDR=%s\n"+
2167			"VAULT_CACERT=%s/ca_cert.pem\n",
2168		testCluster.RootToken,
2169		testCluster.Cores[0].Client.Address(),
2170		testCluster.TempDir,
2171	))
2172
2173	// Output the header that the server has started
2174	c.UI.Output("==> Vault server started! Log data will stream in below:\n")
2175
2176	// Inform any tests that the server is ready
2177	select {
2178	case c.startedCh <- struct{}{}:
2179	default:
2180	}
2181
2182	// Release the log gate.
2183	c.logger.(hclog.OutputResettable).ResetOutputWithFlush(&hclog.LoggerOptions{
2184		Output: c.logOutput,
2185	}, c.gatedWriter)
2186
2187	// Wait for shutdown
2188	shutdownTriggered := false
2189
2190	for !shutdownTriggered {
2191		select {
2192		case <-c.ShutdownCh:
2193			c.UI.Output("==> Vault shutdown triggered")
2194
2195			// Stop the listeners so that we don't process further client requests.
2196			c.cleanupGuard.Do(testCluster.Cleanup)
2197
2198			// Shutdown will wait until after Vault is sealed, which means the
2199			// request forwarding listeners will also be closed (and also
2200			// waited for).
2201			for _, core := range testCluster.Cores {
2202				if err := core.Shutdown(); err != nil {
2203					c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err))
2204				}
2205			}
2206
2207			shutdownTriggered = true
2208
2209		case <-c.SighupCh:
2210			c.UI.Output("==> Vault reload triggered")
2211			for _, core := range testCluster.Cores {
2212				if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil); err != nil {
2213					c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
2214				}
2215			}
2216		}
2217	}
2218
2219	return 0
2220}
2221
2222// addPlugin adds any plugins to the catalog
2223func (c *ServerCommand) addPlugin(path, token string, core *vault.Core) error {
2224	// Get the sha256 of the file at the given path.
2225	pluginSum := func(p string) (string, error) {
2226		hasher := sha256.New()
2227		f, err := os.Open(p)
2228		if err != nil {
2229			return "", err
2230		}
2231		defer f.Close()
2232		if _, err := io.Copy(hasher, f); err != nil {
2233			return "", err
2234		}
2235		return hex.EncodeToString(hasher.Sum(nil)), nil
2236	}
2237
2238	// Mount any test plugins. We do this explicitly before we inform tests of
2239	// a completely booted server intentionally.
2240	sha256sum, err := pluginSum(path)
2241	if err != nil {
2242		return err
2243	}
2244
2245	// Default the name to the basename of the binary
2246	name := filepath.Base(path)
2247
2248	// File a request against core to enable the plugin
2249	req := &logical.Request{
2250		Operation:   logical.UpdateOperation,
2251		ClientToken: token,
2252		Path:        fmt.Sprintf("sys/plugins/catalog/%s", name),
2253		Data: map[string]interface{}{
2254			"sha256":  sha256sum,
2255			"command": name,
2256		},
2257	}
2258	ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
2259	if _, err := core.HandleRequest(ctx, req); err != nil {
2260		return err
2261	}
2262
2263	return nil
2264}
2265
2266// detectRedirect is used to attempt redirect address detection
2267func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
2268	config *server.Config) (string, error) {
2269	// Get the hostname
2270	host, err := detect.DetectHostAddr()
2271	if err != nil {
2272		return "", err
2273	}
2274
2275	// set [] for ipv6 addresses
2276	if strings.Contains(host, ":") && !strings.Contains(host, "]") {
2277		host = "[" + host + "]"
2278	}
2279
2280	// Default the port and scheme
2281	scheme := "https"
2282	port := 8200
2283
2284	// Attempt to detect overrides
2285	for _, list := range config.Listeners {
2286		// Only attempt TCP
2287		if list.Type != "tcp" {
2288			continue
2289		}
2290
2291		// Check if TLS is disabled
2292		if val, ok := list.Config["tls_disable"]; ok {
2293			disable, err := parseutil.ParseBool(val)
2294			if err != nil {
2295				return "", errwrap.Wrapf("tls_disable: {{err}}", err)
2296			}
2297
2298			if disable {
2299				scheme = "http"
2300			}
2301		}
2302
2303		// Check for address override
2304		var addr string
2305		addrRaw, ok := list.Config["address"]
2306		if !ok {
2307			addr = "127.0.0.1:8200"
2308		} else {
2309			addr = addrRaw.(string)
2310		}
2311
2312		// Check for localhost
2313		hostStr, portStr, err := net.SplitHostPort(addr)
2314		if err != nil {
2315			continue
2316		}
2317		if hostStr == "127.0.0.1" {
2318			host = hostStr
2319		}
2320
2321		// Check for custom port
2322		listPort, err := strconv.Atoi(portStr)
2323		if err != nil {
2324			continue
2325		}
2326		port = listPort
2327	}
2328
2329	// Build a URL
2330	url := &url.URL{
2331		Scheme: scheme,
2332		Host:   fmt.Sprintf("%s:%d", host, port),
2333	}
2334
2335	// Return the URL string
2336	return url.String(), nil
2337}
2338
2339// setupTelemetry is used to setup the telemetry sub-systems and returns the in-memory sink to be used in http configuration
2340func (c *ServerCommand) setupTelemetry(config *server.Config) (*metricsutil.MetricsHelper, error) {
2341	/* Setup telemetry
2342	Aggregate on 10 second intervals for 1 minute. Expose the
2343	metrics over stderr when there is a SIGUSR1 received.
2344	*/
2345	inm := metrics.NewInmemSink(10*time.Second, time.Minute)
2346	metrics.DefaultInmemSignal(inm)
2347
2348	var telConfig *server.Telemetry
2349	if config.Telemetry != nil {
2350		telConfig = config.Telemetry
2351	} else {
2352		telConfig = &server.Telemetry{}
2353	}
2354
2355	serviceName := "vault"
2356	if telConfig.MetricsPrefix != "" {
2357		serviceName = telConfig.MetricsPrefix
2358	}
2359
2360	metricsConf := metrics.DefaultConfig(serviceName)
2361	metricsConf.EnableHostname = !telConfig.DisableHostname
2362	metricsConf.EnableHostnameLabel = telConfig.EnableHostnameLabel
2363
2364	// Configure the statsite sink
2365	var fanout metrics.FanoutSink
2366	var prometheusEnabled bool
2367
2368	// Configure the Prometheus sink
2369	if telConfig.PrometheusRetentionTime != 0 {
2370		prometheusEnabled = true
2371		prometheusOpts := prometheus.PrometheusOpts{
2372			Expiration: telConfig.PrometheusRetentionTime,
2373		}
2374
2375		sink, err := prometheus.NewPrometheusSinkFrom(prometheusOpts)
2376		if err != nil {
2377			return nil, err
2378		}
2379		fanout = append(fanout, sink)
2380	}
2381
2382	metricHelper := metricsutil.NewMetricsHelper(inm, prometheusEnabled)
2383
2384	if telConfig.StatsiteAddr != "" {
2385		sink, err := metrics.NewStatsiteSink(telConfig.StatsiteAddr)
2386		if err != nil {
2387			return nil, err
2388		}
2389		fanout = append(fanout, sink)
2390	}
2391
2392	// Configure the statsd sink
2393	if telConfig.StatsdAddr != "" {
2394		sink, err := metrics.NewStatsdSink(telConfig.StatsdAddr)
2395		if err != nil {
2396			return nil, err
2397		}
2398		fanout = append(fanout, sink)
2399	}
2400
2401	// Configure the Circonus sink
2402	if telConfig.CirconusAPIToken != "" || telConfig.CirconusCheckSubmissionURL != "" {
2403		cfg := &circonus.Config{}
2404		cfg.Interval = telConfig.CirconusSubmissionInterval
2405		cfg.CheckManager.API.TokenKey = telConfig.CirconusAPIToken
2406		cfg.CheckManager.API.TokenApp = telConfig.CirconusAPIApp
2407		cfg.CheckManager.API.URL = telConfig.CirconusAPIURL
2408		cfg.CheckManager.Check.SubmissionURL = telConfig.CirconusCheckSubmissionURL
2409		cfg.CheckManager.Check.ID = telConfig.CirconusCheckID
2410		cfg.CheckManager.Check.ForceMetricActivation = telConfig.CirconusCheckForceMetricActivation
2411		cfg.CheckManager.Check.InstanceID = telConfig.CirconusCheckInstanceID
2412		cfg.CheckManager.Check.SearchTag = telConfig.CirconusCheckSearchTag
2413		cfg.CheckManager.Check.DisplayName = telConfig.CirconusCheckDisplayName
2414		cfg.CheckManager.Check.Tags = telConfig.CirconusCheckTags
2415		cfg.CheckManager.Broker.ID = telConfig.CirconusBrokerID
2416		cfg.CheckManager.Broker.SelectTag = telConfig.CirconusBrokerSelectTag
2417
2418		if cfg.CheckManager.API.TokenApp == "" {
2419			cfg.CheckManager.API.TokenApp = "vault"
2420		}
2421
2422		if cfg.CheckManager.Check.DisplayName == "" {
2423			cfg.CheckManager.Check.DisplayName = "Vault"
2424		}
2425
2426		if cfg.CheckManager.Check.SearchTag == "" {
2427			cfg.CheckManager.Check.SearchTag = "service:vault"
2428		}
2429
2430		sink, err := circonus.NewCirconusSink(cfg)
2431		if err != nil {
2432			return nil, err
2433		}
2434		sink.Start()
2435		fanout = append(fanout, sink)
2436	}
2437
2438	if telConfig.DogStatsDAddr != "" {
2439		var tags []string
2440
2441		if telConfig.DogStatsDTags != nil {
2442			tags = telConfig.DogStatsDTags
2443		}
2444
2445		sink, err := datadog.NewDogStatsdSink(telConfig.DogStatsDAddr, metricsConf.HostName)
2446		if err != nil {
2447			return nil, errwrap.Wrapf("failed to start DogStatsD sink: {{err}}", err)
2448		}
2449		sink.SetTags(tags)
2450		fanout = append(fanout, sink)
2451	}
2452
2453	// Configure the stackdriver sink
2454	if telConfig.StackdriverProjectID != "" {
2455		client, err := monitoring.NewMetricClient(context.Background(), option.WithUserAgent(useragent.String()))
2456		if err != nil {
2457			return nil, fmt.Errorf("Failed to create stackdriver client: %v", err)
2458		}
2459		sink := stackdriver.NewSink(client, &stackdriver.Config{
2460			ProjectID: telConfig.StackdriverProjectID,
2461			Location:  telConfig.StackdriverLocation,
2462			Namespace: telConfig.StackdriverNamespace,
2463		})
2464		fanout = append(fanout, sink)
2465	}
2466
2467	// Initialize the global sink
2468	if len(fanout) > 1 {
2469		// Hostname enabled will create poor quality metrics name for prometheus
2470		if !telConfig.DisableHostname {
2471			c.UI.Warn("telemetry.disable_hostname has been set to false. Recommended setting is true for Prometheus to avoid poorly named metrics.")
2472		}
2473	} else {
2474		metricsConf.EnableHostname = false
2475	}
2476	fanout = append(fanout, inm)
2477	_, err := metrics.NewGlobal(metricsConf, fanout)
2478
2479	if err != nil {
2480		return nil, err
2481	}
2482
2483	return metricHelper, nil
2484}
2485
2486func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reloadutil.ReloadFunc, configPath []string) error {
2487	lock.RLock()
2488	defer lock.RUnlock()
2489
2490	var reloadErrors *multierror.Error
2491
2492	for k, relFuncs := range *reloadFuncs {
2493		switch {
2494		case strings.HasPrefix(k, "listener|"):
2495			for _, relFunc := range relFuncs {
2496				if relFunc != nil {
2497					if err := relFunc(nil); err != nil {
2498						reloadErrors = multierror.Append(reloadErrors, errwrap.Wrapf("error encountered reloading listener: {{err}}", err))
2499					}
2500				}
2501			}
2502
2503		case strings.HasPrefix(k, "audit_file|"):
2504			for _, relFunc := range relFuncs {
2505				if relFunc != nil {
2506					if err := relFunc(nil); err != nil {
2507						reloadErrors = multierror.Append(reloadErrors, errwrap.Wrapf(fmt.Sprintf("error encountered reloading file audit device at path %q: {{err}}", strings.TrimPrefix(k, "audit_file|")), err))
2508					}
2509				}
2510			}
2511		}
2512	}
2513
2514	// Send a message that we reloaded. This prevents "guessing" sleep times
2515	// in tests.
2516	select {
2517	case c.reloadedCh <- struct{}{}:
2518	default:
2519	}
2520
2521	return reloadErrors.ErrorOrNil()
2522}
2523
2524// storePidFile is used to write out our PID to a file if necessary
2525func (c *ServerCommand) storePidFile(pidPath string) error {
2526	// Quit fast if no pidfile
2527	if pidPath == "" {
2528		return nil
2529	}
2530
2531	// Open the PID file
2532	pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
2533	if err != nil {
2534		return errwrap.Wrapf("could not open pid file: {{err}}", err)
2535	}
2536	defer pidFile.Close()
2537
2538	// Write out the PID
2539	pid := os.Getpid()
2540	_, err = pidFile.WriteString(fmt.Sprintf("%d", pid))
2541	if err != nil {
2542		return errwrap.Wrapf("could not write to pid file: {{err}}", err)
2543	}
2544	return nil
2545}
2546
2547// removePidFile is used to cleanup the PID file if necessary
2548func (c *ServerCommand) removePidFile(pidPath string) error {
2549	if pidPath == "" {
2550		return nil
2551	}
2552	return os.Remove(pidPath)
2553}
2554
2555// storageMigrationActive checks and warns against in-progress storage migrations.
2556// This function will block until storage is available.
2557func (c *ServerCommand) storageMigrationActive(backend physical.Backend) bool {
2558	first := true
2559
2560	for {
2561		migrationStatus, err := CheckStorageMigration(backend)
2562		if err == nil {
2563			if migrationStatus != nil {
2564				startTime := migrationStatus.Start.Format(time.RFC3339)
2565				c.UI.Error(wrapAtLength(fmt.Sprintf("ERROR! Storage migration in progress (started: %s). "+
2566					"Server startup is prevented until the migration completes. Use 'vault operator migrate -reset' "+
2567					"to force clear the migration lock.", startTime)))
2568				return true
2569			}
2570			return false
2571		}
2572		if first {
2573			first = false
2574			c.UI.Warn("\nWARNING! Unable to read storage migration status.")
2575
2576			// unexpected state, so stop buffering log messages
2577			c.logger.(hclog.OutputResettable).ResetOutputWithFlush(&hclog.LoggerOptions{
2578				Output: c.logOutput,
2579			}, c.gatedWriter)
2580		}
2581		c.logger.Warn("storage migration check error", "error", err.Error())
2582
2583		select {
2584		case <-time.After(2 * time.Second):
2585		case <-c.ShutdownCh:
2586			return true
2587		}
2588	}
2589}
2590
2591type StorageMigrationStatus struct {
2592	Start time.Time `json:"start"`
2593}
2594
2595func CheckStorageMigration(b physical.Backend) (*StorageMigrationStatus, error) {
2596	entry, err := b.Get(context.Background(), storageMigrationLock)
2597
2598	if err != nil {
2599		return nil, err
2600	}
2601
2602	if entry == nil {
2603		return nil, nil
2604	}
2605
2606	var status StorageMigrationStatus
2607	if err := jsonutil.DecodeJSON(entry.Value, &status); err != nil {
2608		return nil, err
2609	}
2610
2611	return &status, nil
2612}
2613
2614func SetStorageMigration(b physical.Backend, active bool) error {
2615	if !active {
2616		return b.Delete(context.Background(), storageMigrationLock)
2617	}
2618
2619	status := StorageMigrationStatus{
2620		Start: time.Now(),
2621	}
2622
2623	enc, err := jsonutil.EncodeJSON(status)
2624	if err != nil {
2625		return err
2626	}
2627
2628	entry := &physical.Entry{
2629		Key:   storageMigrationLock,
2630		Value: enc,
2631	}
2632
2633	return b.Put(context.Background(), entry)
2634}
2635
2636type grpclogFaker struct {
2637	logger log.Logger
2638	log    bool
2639}
2640
2641func (g *grpclogFaker) Fatal(args ...interface{}) {
2642	g.logger.Error(fmt.Sprint(args...))
2643	os.Exit(1)
2644}
2645
2646func (g *grpclogFaker) Fatalf(format string, args ...interface{}) {
2647	g.logger.Error(fmt.Sprintf(format, args...))
2648	os.Exit(1)
2649}
2650
2651func (g *grpclogFaker) Fatalln(args ...interface{}) {
2652	g.logger.Error(fmt.Sprintln(args...))
2653	os.Exit(1)
2654}
2655
2656func (g *grpclogFaker) Print(args ...interface{}) {
2657	if g.log && g.logger.IsDebug() {
2658		g.logger.Debug(fmt.Sprint(args...))
2659	}
2660}
2661
2662func (g *grpclogFaker) Printf(format string, args ...interface{}) {
2663	if g.log && g.logger.IsDebug() {
2664		g.logger.Debug(fmt.Sprintf(format, args...))
2665	}
2666}
2667
2668func (g *grpclogFaker) Println(args ...interface{}) {
2669	if g.log && g.logger.IsDebug() {
2670		g.logger.Debug(fmt.Sprintln(args...))
2671	}
2672}
2673